File: | llvm/include/llvm/CodeGen/SelectionDAGNodes.h |
Warning: | line 1150, column 10 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// | ||||||
2 | // | ||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||
6 | // | ||||||
7 | //===----------------------------------------------------------------------===// | ||||||
8 | // | ||||||
9 | /// \file | ||||||
10 | /// Custom DAG lowering for SI | ||||||
11 | // | ||||||
12 | //===----------------------------------------------------------------------===// | ||||||
13 | |||||||
14 | #include "SIISelLowering.h" | ||||||
15 | #include "AMDGPU.h" | ||||||
16 | #include "AMDGPUInstrInfo.h" | ||||||
17 | #include "AMDGPUTargetMachine.h" | ||||||
18 | #include "SIMachineFunctionInfo.h" | ||||||
19 | #include "SIRegisterInfo.h" | ||||||
20 | #include "llvm/ADT/Statistic.h" | ||||||
21 | #include "llvm/Analysis/LegacyDivergenceAnalysis.h" | ||||||
22 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" | ||||||
23 | #include "llvm/BinaryFormat/ELF.h" | ||||||
24 | #include "llvm/CodeGen/Analysis.h" | ||||||
25 | #include "llvm/CodeGen/FunctionLoweringInfo.h" | ||||||
26 | #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" | ||||||
27 | #include "llvm/CodeGen/MachineFunction.h" | ||||||
28 | #include "llvm/CodeGen/MachineLoopInfo.h" | ||||||
29 | #include "llvm/IR/DiagnosticInfo.h" | ||||||
30 | #include "llvm/IR/IntrinsicInst.h" | ||||||
31 | #include "llvm/IR/IntrinsicsAMDGPU.h" | ||||||
32 | #include "llvm/IR/IntrinsicsR600.h" | ||||||
33 | #include "llvm/Support/CommandLine.h" | ||||||
34 | #include "llvm/Support/KnownBits.h" | ||||||
35 | |||||||
36 | using namespace llvm; | ||||||
37 | |||||||
38 | #define DEBUG_TYPE"si-lower" "si-lower" | ||||||
39 | |||||||
40 | STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"si-lower", "NumTailCalls" , "Number of tail calls"}; | ||||||
41 | |||||||
42 | static cl::opt<bool> DisableLoopAlignment( | ||||||
43 | "amdgpu-disable-loop-alignment", | ||||||
44 | cl::desc("Do not align and prefetch loops"), | ||||||
45 | cl::init(false)); | ||||||
46 | |||||||
47 | static cl::opt<bool> VGPRReserveforSGPRSpill( | ||||||
48 | "amdgpu-reserve-vgpr-for-sgpr-spill", | ||||||
49 | cl::desc("Allocates one VGPR for future SGPR Spill"), cl::init(true)); | ||||||
50 | |||||||
51 | static cl::opt<bool> UseDivergentRegisterIndexing( | ||||||
52 | "amdgpu-use-divergent-register-indexing", | ||||||
53 | cl::Hidden, | ||||||
54 | cl::desc("Use indirect register addressing for divergent indexes"), | ||||||
55 | cl::init(false)); | ||||||
56 | |||||||
57 | static bool hasFP32Denormals(const MachineFunction &MF) { | ||||||
58 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
59 | return Info->getMode().allFP32Denormals(); | ||||||
60 | } | ||||||
61 | |||||||
62 | static bool hasFP64FP16Denormals(const MachineFunction &MF) { | ||||||
63 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
64 | return Info->getMode().allFP64FP16Denormals(); | ||||||
65 | } | ||||||
66 | |||||||
67 | static unsigned findFirstFreeSGPR(CCState &CCInfo) { | ||||||
68 | unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); | ||||||
69 | for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { | ||||||
70 | if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { | ||||||
71 | return AMDGPU::SGPR0 + Reg; | ||||||
72 | } | ||||||
73 | } | ||||||
74 | llvm_unreachable("Cannot allocate sgpr")::llvm::llvm_unreachable_internal("Cannot allocate sgpr", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 74); | ||||||
75 | } | ||||||
76 | |||||||
77 | SITargetLowering::SITargetLowering(const TargetMachine &TM, | ||||||
78 | const GCNSubtarget &STI) | ||||||
79 | : AMDGPUTargetLowering(TM, STI), | ||||||
80 | Subtarget(&STI) { | ||||||
81 | addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); | ||||||
82 | addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); | ||||||
83 | |||||||
84 | addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass); | ||||||
85 | addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); | ||||||
86 | |||||||
87 | addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); | ||||||
88 | |||||||
89 | const SIRegisterInfo *TRI = STI.getRegisterInfo(); | ||||||
90 | const TargetRegisterClass *V64RegClass = TRI->getVGPR64Class(); | ||||||
91 | |||||||
92 | addRegisterClass(MVT::f64, V64RegClass); | ||||||
93 | addRegisterClass(MVT::v2f32, V64RegClass); | ||||||
94 | |||||||
95 | addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass); | ||||||
96 | addRegisterClass(MVT::v3f32, TRI->getVGPRClassForBitWidth(96)); | ||||||
97 | |||||||
98 | addRegisterClass(MVT::v2i64, &AMDGPU::SGPR_128RegClass); | ||||||
99 | addRegisterClass(MVT::v2f64, &AMDGPU::SGPR_128RegClass); | ||||||
100 | |||||||
101 | addRegisterClass(MVT::v4i32, &AMDGPU::SGPR_128RegClass); | ||||||
102 | addRegisterClass(MVT::v4f32, TRI->getVGPRClassForBitWidth(128)); | ||||||
103 | |||||||
104 | addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass); | ||||||
105 | addRegisterClass(MVT::v5f32, TRI->getVGPRClassForBitWidth(160)); | ||||||
106 | |||||||
107 | addRegisterClass(MVT::v6i32, &AMDGPU::SGPR_192RegClass); | ||||||
108 | addRegisterClass(MVT::v6f32, TRI->getVGPRClassForBitWidth(192)); | ||||||
109 | |||||||
110 | addRegisterClass(MVT::v3i64, &AMDGPU::SGPR_192RegClass); | ||||||
111 | addRegisterClass(MVT::v3f64, TRI->getVGPRClassForBitWidth(192)); | ||||||
112 | |||||||
113 | addRegisterClass(MVT::v7i32, &AMDGPU::SGPR_224RegClass); | ||||||
114 | addRegisterClass(MVT::v7f32, TRI->getVGPRClassForBitWidth(224)); | ||||||
115 | |||||||
116 | addRegisterClass(MVT::v8i32, &AMDGPU::SGPR_256RegClass); | ||||||
117 | addRegisterClass(MVT::v8f32, TRI->getVGPRClassForBitWidth(256)); | ||||||
118 | |||||||
119 | addRegisterClass(MVT::v4i64, &AMDGPU::SGPR_256RegClass); | ||||||
120 | addRegisterClass(MVT::v4f64, TRI->getVGPRClassForBitWidth(256)); | ||||||
121 | |||||||
122 | addRegisterClass(MVT::v16i32, &AMDGPU::SGPR_512RegClass); | ||||||
123 | addRegisterClass(MVT::v16f32, TRI->getVGPRClassForBitWidth(512)); | ||||||
124 | |||||||
125 | addRegisterClass(MVT::v8i64, &AMDGPU::SGPR_512RegClass); | ||||||
126 | addRegisterClass(MVT::v8f64, TRI->getVGPRClassForBitWidth(512)); | ||||||
127 | |||||||
128 | addRegisterClass(MVT::v16i64, &AMDGPU::SGPR_1024RegClass); | ||||||
129 | addRegisterClass(MVT::v16f64, TRI->getVGPRClassForBitWidth(1024)); | ||||||
130 | |||||||
131 | if (Subtarget->has16BitInsts()) { | ||||||
132 | addRegisterClass(MVT::i16, &AMDGPU::SReg_32RegClass); | ||||||
133 | addRegisterClass(MVT::f16, &AMDGPU::SReg_32RegClass); | ||||||
134 | |||||||
135 | // Unless there are also VOP3P operations, not operations are really legal. | ||||||
136 | addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32RegClass); | ||||||
137 | addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32RegClass); | ||||||
138 | addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass); | ||||||
139 | addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass); | ||||||
140 | } | ||||||
141 | |||||||
142 | addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass); | ||||||
143 | addRegisterClass(MVT::v32f32, TRI->getVGPRClassForBitWidth(1024)); | ||||||
144 | |||||||
145 | computeRegisterProperties(Subtarget->getRegisterInfo()); | ||||||
146 | |||||||
147 | // The boolean content concept here is too inflexible. Compares only ever | ||||||
148 | // really produce a 1-bit result. Any copy/extend from these will turn into a | ||||||
149 | // select, and zext/1 or sext/-1 are equally cheap. Arbitrarily choose 0/1, as | ||||||
150 | // it's what most targets use. | ||||||
151 | setBooleanContents(ZeroOrOneBooleanContent); | ||||||
152 | setBooleanVectorContents(ZeroOrOneBooleanContent); | ||||||
153 | |||||||
154 | // We need to custom lower vector stores from local memory | ||||||
155 | setOperationAction(ISD::LOAD, MVT::v2i32, Custom); | ||||||
156 | setOperationAction(ISD::LOAD, MVT::v3i32, Custom); | ||||||
157 | setOperationAction(ISD::LOAD, MVT::v4i32, Custom); | ||||||
158 | setOperationAction(ISD::LOAD, MVT::v5i32, Custom); | ||||||
159 | setOperationAction(ISD::LOAD, MVT::v6i32, Custom); | ||||||
160 | setOperationAction(ISD::LOAD, MVT::v7i32, Custom); | ||||||
161 | setOperationAction(ISD::LOAD, MVT::v8i32, Custom); | ||||||
162 | setOperationAction(ISD::LOAD, MVT::v16i32, Custom); | ||||||
163 | setOperationAction(ISD::LOAD, MVT::i1, Custom); | ||||||
164 | setOperationAction(ISD::LOAD, MVT::v32i32, Custom); | ||||||
165 | |||||||
166 | setOperationAction(ISD::STORE, MVT::v2i32, Custom); | ||||||
167 | setOperationAction(ISD::STORE, MVT::v3i32, Custom); | ||||||
168 | setOperationAction(ISD::STORE, MVT::v4i32, Custom); | ||||||
169 | setOperationAction(ISD::STORE, MVT::v5i32, Custom); | ||||||
170 | setOperationAction(ISD::STORE, MVT::v6i32, Custom); | ||||||
171 | setOperationAction(ISD::STORE, MVT::v7i32, Custom); | ||||||
172 | setOperationAction(ISD::STORE, MVT::v8i32, Custom); | ||||||
173 | setOperationAction(ISD::STORE, MVT::v16i32, Custom); | ||||||
174 | setOperationAction(ISD::STORE, MVT::i1, Custom); | ||||||
175 | setOperationAction(ISD::STORE, MVT::v32i32, Custom); | ||||||
176 | |||||||
177 | setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); | ||||||
178 | setTruncStoreAction(MVT::v3i32, MVT::v3i16, Expand); | ||||||
179 | setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand); | ||||||
180 | setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); | ||||||
181 | setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); | ||||||
182 | setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand); | ||||||
183 | setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand); | ||||||
184 | setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand); | ||||||
185 | setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand); | ||||||
186 | setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand); | ||||||
187 | setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand); | ||||||
188 | setTruncStoreAction(MVT::v2i16, MVT::v2i8, Expand); | ||||||
189 | setTruncStoreAction(MVT::v4i16, MVT::v4i8, Expand); | ||||||
190 | setTruncStoreAction(MVT::v8i16, MVT::v8i8, Expand); | ||||||
191 | setTruncStoreAction(MVT::v16i16, MVT::v16i8, Expand); | ||||||
192 | setTruncStoreAction(MVT::v32i16, MVT::v32i8, Expand); | ||||||
193 | |||||||
194 | setTruncStoreAction(MVT::v3i64, MVT::v3i16, Expand); | ||||||
195 | setTruncStoreAction(MVT::v3i64, MVT::v3i32, Expand); | ||||||
196 | setTruncStoreAction(MVT::v4i64, MVT::v4i8, Expand); | ||||||
197 | setTruncStoreAction(MVT::v8i64, MVT::v8i8, Expand); | ||||||
198 | setTruncStoreAction(MVT::v8i64, MVT::v8i16, Expand); | ||||||
199 | setTruncStoreAction(MVT::v8i64, MVT::v8i32, Expand); | ||||||
200 | setTruncStoreAction(MVT::v16i64, MVT::v16i32, Expand); | ||||||
201 | |||||||
202 | setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); | ||||||
203 | setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); | ||||||
204 | |||||||
205 | setOperationAction(ISD::SELECT, MVT::i1, Promote); | ||||||
206 | setOperationAction(ISD::SELECT, MVT::i64, Custom); | ||||||
207 | setOperationAction(ISD::SELECT, MVT::f64, Promote); | ||||||
208 | AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); | ||||||
209 | |||||||
210 | setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); | ||||||
211 | setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); | ||||||
212 | setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); | ||||||
213 | setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); | ||||||
214 | setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); | ||||||
215 | |||||||
216 | setOperationAction(ISD::SETCC, MVT::i1, Promote); | ||||||
217 | setOperationAction(ISD::SETCC, MVT::v2i1, Expand); | ||||||
218 | setOperationAction(ISD::SETCC, MVT::v4i1, Expand); | ||||||
219 | AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); | ||||||
220 | |||||||
221 | setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); | ||||||
222 | setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); | ||||||
223 | setOperationAction(ISD::TRUNCATE, MVT::v3i32, Expand); | ||||||
224 | setOperationAction(ISD::FP_ROUND, MVT::v3f32, Expand); | ||||||
225 | setOperationAction(ISD::TRUNCATE, MVT::v4i32, Expand); | ||||||
226 | setOperationAction(ISD::FP_ROUND, MVT::v4f32, Expand); | ||||||
227 | setOperationAction(ISD::TRUNCATE, MVT::v5i32, Expand); | ||||||
228 | setOperationAction(ISD::FP_ROUND, MVT::v5f32, Expand); | ||||||
229 | setOperationAction(ISD::TRUNCATE, MVT::v6i32, Expand); | ||||||
230 | setOperationAction(ISD::FP_ROUND, MVT::v6f32, Expand); | ||||||
231 | setOperationAction(ISD::TRUNCATE, MVT::v7i32, Expand); | ||||||
232 | setOperationAction(ISD::FP_ROUND, MVT::v7f32, Expand); | ||||||
233 | setOperationAction(ISD::TRUNCATE, MVT::v8i32, Expand); | ||||||
234 | setOperationAction(ISD::FP_ROUND, MVT::v8f32, Expand); | ||||||
235 | setOperationAction(ISD::TRUNCATE, MVT::v16i32, Expand); | ||||||
236 | setOperationAction(ISD::FP_ROUND, MVT::v16f32, Expand); | ||||||
237 | |||||||
238 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); | ||||||
239 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); | ||||||
240 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); | ||||||
241 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); | ||||||
242 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); | ||||||
243 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v3i16, Custom); | ||||||
244 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); | ||||||
245 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); | ||||||
246 | |||||||
247 | setOperationAction(ISD::BRCOND, MVT::Other, Custom); | ||||||
248 | setOperationAction(ISD::BR_CC, MVT::i1, Expand); | ||||||
249 | setOperationAction(ISD::BR_CC, MVT::i32, Expand); | ||||||
250 | setOperationAction(ISD::BR_CC, MVT::i64, Expand); | ||||||
251 | setOperationAction(ISD::BR_CC, MVT::f32, Expand); | ||||||
252 | setOperationAction(ISD::BR_CC, MVT::f64, Expand); | ||||||
253 | |||||||
254 | setOperationAction(ISD::UADDO, MVT::i32, Legal); | ||||||
255 | setOperationAction(ISD::USUBO, MVT::i32, Legal); | ||||||
256 | |||||||
257 | setOperationAction(ISD::ADDCARRY, MVT::i32, Legal); | ||||||
258 | setOperationAction(ISD::SUBCARRY, MVT::i32, Legal); | ||||||
259 | |||||||
260 | setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); | ||||||
261 | setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); | ||||||
262 | setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); | ||||||
263 | |||||||
264 | #if 0 | ||||||
265 | setOperationAction(ISD::ADDCARRY, MVT::i64, Legal); | ||||||
266 | setOperationAction(ISD::SUBCARRY, MVT::i64, Legal); | ||||||
267 | #endif | ||||||
268 | |||||||
269 | // We only support LOAD/STORE and vector manipulation ops for vectors | ||||||
270 | // with > 4 elements. | ||||||
271 | for (MVT VT : { MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, | ||||||
272 | MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16, | ||||||
273 | MVT::v3i64, MVT::v3f64, MVT::v6i32, MVT::v6f32, | ||||||
274 | MVT::v4i64, MVT::v4f64, MVT::v8i64, MVT::v8f64, | ||||||
275 | MVT::v16i64, MVT::v16f64, MVT::v32i32, MVT::v32f32 }) { | ||||||
276 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { | ||||||
277 | switch (Op) { | ||||||
278 | case ISD::LOAD: | ||||||
279 | case ISD::STORE: | ||||||
280 | case ISD::BUILD_VECTOR: | ||||||
281 | case ISD::BITCAST: | ||||||
282 | case ISD::EXTRACT_VECTOR_ELT: | ||||||
283 | case ISD::INSERT_VECTOR_ELT: | ||||||
284 | case ISD::EXTRACT_SUBVECTOR: | ||||||
285 | case ISD::SCALAR_TO_VECTOR: | ||||||
286 | break; | ||||||
287 | case ISD::INSERT_SUBVECTOR: | ||||||
288 | case ISD::CONCAT_VECTORS: | ||||||
289 | setOperationAction(Op, VT, Custom); | ||||||
290 | break; | ||||||
291 | default: | ||||||
292 | setOperationAction(Op, VT, Expand); | ||||||
293 | break; | ||||||
294 | } | ||||||
295 | } | ||||||
296 | } | ||||||
297 | |||||||
298 | setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand); | ||||||
299 | |||||||
300 | // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that | ||||||
301 | // is expanded to avoid having two separate loops in case the index is a VGPR. | ||||||
302 | |||||||
303 | // Most operations are naturally 32-bit vector operations. We only support | ||||||
304 | // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. | ||||||
305 | for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { | ||||||
306 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); | ||||||
307 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); | ||||||
308 | |||||||
309 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); | ||||||
310 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); | ||||||
311 | |||||||
312 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); | ||||||
313 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); | ||||||
314 | |||||||
315 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); | ||||||
316 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); | ||||||
317 | } | ||||||
318 | |||||||
319 | for (MVT Vec64 : { MVT::v3i64, MVT::v3f64 }) { | ||||||
320 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); | ||||||
321 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v6i32); | ||||||
322 | |||||||
323 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); | ||||||
324 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v6i32); | ||||||
325 | |||||||
326 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); | ||||||
327 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v6i32); | ||||||
328 | |||||||
329 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); | ||||||
330 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v6i32); | ||||||
331 | } | ||||||
332 | |||||||
333 | for (MVT Vec64 : { MVT::v4i64, MVT::v4f64 }) { | ||||||
334 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); | ||||||
335 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v8i32); | ||||||
336 | |||||||
337 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); | ||||||
338 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v8i32); | ||||||
339 | |||||||
340 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); | ||||||
341 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v8i32); | ||||||
342 | |||||||
343 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); | ||||||
344 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v8i32); | ||||||
345 | } | ||||||
346 | |||||||
347 | for (MVT Vec64 : { MVT::v8i64, MVT::v8f64 }) { | ||||||
348 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); | ||||||
349 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v16i32); | ||||||
350 | |||||||
351 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); | ||||||
352 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v16i32); | ||||||
353 | |||||||
354 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); | ||||||
355 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v16i32); | ||||||
356 | |||||||
357 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); | ||||||
358 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v16i32); | ||||||
359 | } | ||||||
360 | |||||||
361 | for (MVT Vec64 : { MVT::v16i64, MVT::v16f64 }) { | ||||||
362 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); | ||||||
363 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v32i32); | ||||||
364 | |||||||
365 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); | ||||||
366 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v32i32); | ||||||
367 | |||||||
368 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); | ||||||
369 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v32i32); | ||||||
370 | |||||||
371 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); | ||||||
372 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v32i32); | ||||||
373 | } | ||||||
374 | |||||||
375 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); | ||||||
376 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); | ||||||
377 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); | ||||||
378 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); | ||||||
379 | |||||||
380 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom); | ||||||
381 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); | ||||||
382 | |||||||
383 | // Avoid stack access for these. | ||||||
384 | // TODO: Generalize to more vector types. | ||||||
385 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); | ||||||
386 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); | ||||||
387 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom); | ||||||
388 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom); | ||||||
389 | |||||||
390 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom); | ||||||
391 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom); | ||||||
392 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom); | ||||||
393 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom); | ||||||
394 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom); | ||||||
395 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom); | ||||||
396 | |||||||
397 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom); | ||||||
398 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom); | ||||||
399 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom); | ||||||
400 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom); | ||||||
401 | |||||||
402 | // Deal with vec3 vector operations when widened to vec4. | ||||||
403 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Custom); | ||||||
404 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Custom); | ||||||
405 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Custom); | ||||||
406 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Custom); | ||||||
407 | |||||||
408 | // Deal with vec5/6/7 vector operations when widened to vec8. | ||||||
409 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Custom); | ||||||
410 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Custom); | ||||||
411 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v6i32, Custom); | ||||||
412 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v6f32, Custom); | ||||||
413 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v7i32, Custom); | ||||||
414 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v7f32, Custom); | ||||||
415 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Custom); | ||||||
416 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Custom); | ||||||
417 | |||||||
418 | // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, | ||||||
419 | // and output demarshalling | ||||||
420 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); | ||||||
421 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); | ||||||
422 | |||||||
423 | // We can't return success/failure, only the old value, | ||||||
424 | // let LLVM add the comparison | ||||||
425 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); | ||||||
426 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); | ||||||
427 | |||||||
428 | if (Subtarget->hasFlatAddressSpace()) { | ||||||
429 | setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); | ||||||
430 | setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); | ||||||
431 | } | ||||||
432 | |||||||
433 | setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); | ||||||
434 | setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); | ||||||
435 | |||||||
436 | // FIXME: This should be narrowed to i32, but that only happens if i64 is | ||||||
437 | // illegal. | ||||||
438 | // FIXME: Should lower sub-i32 bswaps to bit-ops without v_perm_b32. | ||||||
439 | setOperationAction(ISD::BSWAP, MVT::i64, Legal); | ||||||
440 | setOperationAction(ISD::BSWAP, MVT::i32, Legal); | ||||||
441 | |||||||
442 | // On SI this is s_memtime and s_memrealtime on VI. | ||||||
443 | setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); | ||||||
444 | setOperationAction(ISD::TRAP, MVT::Other, Custom); | ||||||
445 | setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom); | ||||||
446 | |||||||
447 | if (Subtarget->has16BitInsts()) { | ||||||
448 | setOperationAction(ISD::FPOW, MVT::f16, Promote); | ||||||
449 | setOperationAction(ISD::FPOWI, MVT::f16, Promote); | ||||||
450 | setOperationAction(ISD::FLOG, MVT::f16, Custom); | ||||||
451 | setOperationAction(ISD::FEXP, MVT::f16, Custom); | ||||||
452 | setOperationAction(ISD::FLOG10, MVT::f16, Custom); | ||||||
453 | } | ||||||
454 | |||||||
455 | if (Subtarget->hasMadMacF32Insts()) | ||||||
456 | setOperationAction(ISD::FMAD, MVT::f32, Legal); | ||||||
457 | |||||||
458 | if (!Subtarget->hasBFI()) { | ||||||
459 | // fcopysign can be done in a single instruction with BFI. | ||||||
460 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); | ||||||
461 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); | ||||||
462 | } | ||||||
463 | |||||||
464 | if (!Subtarget->hasBCNT(32)) | ||||||
465 | setOperationAction(ISD::CTPOP, MVT::i32, Expand); | ||||||
466 | |||||||
467 | if (!Subtarget->hasBCNT(64)) | ||||||
468 | setOperationAction(ISD::CTPOP, MVT::i64, Expand); | ||||||
469 | |||||||
470 | if (Subtarget->hasFFBH()) { | ||||||
471 | setOperationAction(ISD::CTLZ, MVT::i32, Custom); | ||||||
472 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); | ||||||
473 | } | ||||||
474 | |||||||
475 | if (Subtarget->hasFFBL()) { | ||||||
476 | setOperationAction(ISD::CTTZ, MVT::i32, Custom); | ||||||
477 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom); | ||||||
478 | } | ||||||
479 | |||||||
480 | // We only really have 32-bit BFE instructions (and 16-bit on VI). | ||||||
481 | // | ||||||
482 | // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any | ||||||
483 | // effort to match them now. We want this to be false for i64 cases when the | ||||||
484 | // extraction isn't restricted to the upper or lower half. Ideally we would | ||||||
485 | // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that | ||||||
486 | // span the midpoint are probably relatively rare, so don't worry about them | ||||||
487 | // for now. | ||||||
488 | if (Subtarget->hasBFE()) | ||||||
489 | setHasExtractBitsInsn(true); | ||||||
490 | |||||||
491 | // Clamp modifier on add/sub | ||||||
492 | if (Subtarget->hasIntClamp()) { | ||||||
493 | setOperationAction(ISD::UADDSAT, MVT::i32, Legal); | ||||||
494 | setOperationAction(ISD::USUBSAT, MVT::i32, Legal); | ||||||
495 | } | ||||||
496 | |||||||
497 | if (Subtarget->hasAddNoCarry()) { | ||||||
498 | setOperationAction(ISD::SADDSAT, MVT::i16, Legal); | ||||||
499 | setOperationAction(ISD::SSUBSAT, MVT::i16, Legal); | ||||||
500 | setOperationAction(ISD::SADDSAT, MVT::i32, Legal); | ||||||
501 | setOperationAction(ISD::SSUBSAT, MVT::i32, Legal); | ||||||
502 | } | ||||||
503 | |||||||
504 | setOperationAction(ISD::FMINNUM, MVT::f32, Custom); | ||||||
505 | setOperationAction(ISD::FMAXNUM, MVT::f32, Custom); | ||||||
506 | setOperationAction(ISD::FMINNUM, MVT::f64, Custom); | ||||||
507 | setOperationAction(ISD::FMAXNUM, MVT::f64, Custom); | ||||||
508 | |||||||
509 | |||||||
510 | // These are really only legal for ieee_mode functions. We should be avoiding | ||||||
511 | // them for functions that don't have ieee_mode enabled, so just say they are | ||||||
512 | // legal. | ||||||
513 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); | ||||||
514 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); | ||||||
515 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); | ||||||
516 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); | ||||||
517 | |||||||
518 | |||||||
519 | if (Subtarget->haveRoundOpsF64()) { | ||||||
520 | setOperationAction(ISD::FTRUNC, MVT::f64, Legal); | ||||||
521 | setOperationAction(ISD::FCEIL, MVT::f64, Legal); | ||||||
522 | setOperationAction(ISD::FRINT, MVT::f64, Legal); | ||||||
523 | } else { | ||||||
524 | setOperationAction(ISD::FCEIL, MVT::f64, Custom); | ||||||
525 | setOperationAction(ISD::FTRUNC, MVT::f64, Custom); | ||||||
526 | setOperationAction(ISD::FRINT, MVT::f64, Custom); | ||||||
527 | setOperationAction(ISD::FFLOOR, MVT::f64, Custom); | ||||||
528 | } | ||||||
529 | |||||||
530 | setOperationAction(ISD::FFLOOR, MVT::f64, Legal); | ||||||
531 | |||||||
532 | setOperationAction(ISD::FSIN, MVT::f32, Custom); | ||||||
533 | setOperationAction(ISD::FCOS, MVT::f32, Custom); | ||||||
534 | setOperationAction(ISD::FDIV, MVT::f32, Custom); | ||||||
535 | setOperationAction(ISD::FDIV, MVT::f64, Custom); | ||||||
536 | |||||||
537 | if (Subtarget->has16BitInsts()) { | ||||||
538 | setOperationAction(ISD::Constant, MVT::i16, Legal); | ||||||
539 | |||||||
540 | setOperationAction(ISD::SMIN, MVT::i16, Legal); | ||||||
541 | setOperationAction(ISD::SMAX, MVT::i16, Legal); | ||||||
542 | |||||||
543 | setOperationAction(ISD::UMIN, MVT::i16, Legal); | ||||||
544 | setOperationAction(ISD::UMAX, MVT::i16, Legal); | ||||||
545 | |||||||
546 | setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote); | ||||||
547 | AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); | ||||||
548 | |||||||
549 | setOperationAction(ISD::ROTR, MVT::i16, Expand); | ||||||
550 | setOperationAction(ISD::ROTL, MVT::i16, Expand); | ||||||
551 | |||||||
552 | setOperationAction(ISD::SDIV, MVT::i16, Promote); | ||||||
553 | setOperationAction(ISD::UDIV, MVT::i16, Promote); | ||||||
554 | setOperationAction(ISD::SREM, MVT::i16, Promote); | ||||||
555 | setOperationAction(ISD::UREM, MVT::i16, Promote); | ||||||
556 | setOperationAction(ISD::UADDSAT, MVT::i16, Legal); | ||||||
557 | setOperationAction(ISD::USUBSAT, MVT::i16, Legal); | ||||||
558 | |||||||
559 | setOperationAction(ISD::BITREVERSE, MVT::i16, Promote); | ||||||
560 | |||||||
561 | setOperationAction(ISD::CTTZ, MVT::i16, Promote); | ||||||
562 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote); | ||||||
563 | setOperationAction(ISD::CTLZ, MVT::i16, Promote); | ||||||
564 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote); | ||||||
565 | setOperationAction(ISD::CTPOP, MVT::i16, Promote); | ||||||
566 | |||||||
567 | setOperationAction(ISD::SELECT_CC, MVT::i16, Expand); | ||||||
568 | |||||||
569 | setOperationAction(ISD::BR_CC, MVT::i16, Expand); | ||||||
570 | |||||||
571 | setOperationAction(ISD::LOAD, MVT::i16, Custom); | ||||||
572 | |||||||
573 | setTruncStoreAction(MVT::i64, MVT::i16, Expand); | ||||||
574 | |||||||
575 | setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote); | ||||||
576 | AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32); | ||||||
577 | setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote); | ||||||
578 | AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32); | ||||||
579 | |||||||
580 | setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom); | ||||||
581 | setOperationAction(ISD::FP_TO_UINT, MVT::i16, Custom); | ||||||
582 | |||||||
583 | // F16 - Constant Actions. | ||||||
584 | setOperationAction(ISD::ConstantFP, MVT::f16, Legal); | ||||||
585 | |||||||
586 | // F16 - Load/Store Actions. | ||||||
587 | setOperationAction(ISD::LOAD, MVT::f16, Promote); | ||||||
588 | AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16); | ||||||
589 | setOperationAction(ISD::STORE, MVT::f16, Promote); | ||||||
590 | AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16); | ||||||
591 | |||||||
592 | // F16 - VOP1 Actions. | ||||||
593 | setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); | ||||||
594 | setOperationAction(ISD::FCOS, MVT::f16, Custom); | ||||||
595 | setOperationAction(ISD::FSIN, MVT::f16, Custom); | ||||||
596 | |||||||
597 | setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom); | ||||||
598 | setOperationAction(ISD::UINT_TO_FP, MVT::i16, Custom); | ||||||
599 | |||||||
600 | setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote); | ||||||
601 | setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote); | ||||||
602 | setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote); | ||||||
603 | setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote); | ||||||
604 | setOperationAction(ISD::FROUND, MVT::f16, Custom); | ||||||
605 | |||||||
606 | // F16 - VOP2 Actions. | ||||||
607 | setOperationAction(ISD::BR_CC, MVT::f16, Expand); | ||||||
608 | setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); | ||||||
609 | |||||||
610 | setOperationAction(ISD::FDIV, MVT::f16, Custom); | ||||||
611 | |||||||
612 | // F16 - VOP3 Actions. | ||||||
613 | setOperationAction(ISD::FMA, MVT::f16, Legal); | ||||||
614 | if (STI.hasMadF16()) | ||||||
615 | setOperationAction(ISD::FMAD, MVT::f16, Legal); | ||||||
616 | |||||||
617 | for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) { | ||||||
618 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { | ||||||
619 | switch (Op) { | ||||||
620 | case ISD::LOAD: | ||||||
621 | case ISD::STORE: | ||||||
622 | case ISD::BUILD_VECTOR: | ||||||
623 | case ISD::BITCAST: | ||||||
624 | case ISD::EXTRACT_VECTOR_ELT: | ||||||
625 | case ISD::INSERT_VECTOR_ELT: | ||||||
626 | case ISD::INSERT_SUBVECTOR: | ||||||
627 | case ISD::EXTRACT_SUBVECTOR: | ||||||
628 | case ISD::SCALAR_TO_VECTOR: | ||||||
629 | break; | ||||||
630 | case ISD::CONCAT_VECTORS: | ||||||
631 | setOperationAction(Op, VT, Custom); | ||||||
632 | break; | ||||||
633 | default: | ||||||
634 | setOperationAction(Op, VT, Expand); | ||||||
635 | break; | ||||||
636 | } | ||||||
637 | } | ||||||
638 | } | ||||||
639 | |||||||
640 | // v_perm_b32 can handle either of these. | ||||||
641 | setOperationAction(ISD::BSWAP, MVT::i16, Legal); | ||||||
642 | setOperationAction(ISD::BSWAP, MVT::v2i16, Legal); | ||||||
643 | setOperationAction(ISD::BSWAP, MVT::v4i16, Custom); | ||||||
644 | |||||||
645 | // XXX - Do these do anything? Vector constants turn into build_vector. | ||||||
646 | setOperationAction(ISD::Constant, MVT::v2i16, Legal); | ||||||
647 | setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal); | ||||||
648 | |||||||
649 | setOperationAction(ISD::UNDEF, MVT::v2i16, Legal); | ||||||
650 | setOperationAction(ISD::UNDEF, MVT::v2f16, Legal); | ||||||
651 | |||||||
652 | setOperationAction(ISD::STORE, MVT::v2i16, Promote); | ||||||
653 | AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32); | ||||||
654 | setOperationAction(ISD::STORE, MVT::v2f16, Promote); | ||||||
655 | AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32); | ||||||
656 | |||||||
657 | setOperationAction(ISD::LOAD, MVT::v2i16, Promote); | ||||||
658 | AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32); | ||||||
659 | setOperationAction(ISD::LOAD, MVT::v2f16, Promote); | ||||||
660 | AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32); | ||||||
661 | |||||||
662 | setOperationAction(ISD::AND, MVT::v2i16, Promote); | ||||||
663 | AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32); | ||||||
664 | setOperationAction(ISD::OR, MVT::v2i16, Promote); | ||||||
665 | AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32); | ||||||
666 | setOperationAction(ISD::XOR, MVT::v2i16, Promote); | ||||||
667 | AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32); | ||||||
668 | |||||||
669 | setOperationAction(ISD::LOAD, MVT::v4i16, Promote); | ||||||
670 | AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32); | ||||||
671 | setOperationAction(ISD::LOAD, MVT::v4f16, Promote); | ||||||
672 | AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32); | ||||||
673 | |||||||
674 | setOperationAction(ISD::STORE, MVT::v4i16, Promote); | ||||||
675 | AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32); | ||||||
676 | setOperationAction(ISD::STORE, MVT::v4f16, Promote); | ||||||
677 | AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32); | ||||||
678 | |||||||
679 | setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand); | ||||||
680 | setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand); | ||||||
681 | setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand); | ||||||
682 | setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand); | ||||||
683 | |||||||
684 | setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand); | ||||||
685 | setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand); | ||||||
686 | setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand); | ||||||
687 | |||||||
688 | if (!Subtarget->hasVOP3PInsts()) { | ||||||
689 | setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom); | ||||||
690 | setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom); | ||||||
691 | } | ||||||
692 | |||||||
693 | setOperationAction(ISD::FNEG, MVT::v2f16, Legal); | ||||||
694 | // This isn't really legal, but this avoids the legalizer unrolling it (and | ||||||
695 | // allows matching fneg (fabs x) patterns) | ||||||
696 | setOperationAction(ISD::FABS, MVT::v2f16, Legal); | ||||||
697 | |||||||
698 | setOperationAction(ISD::FMAXNUM, MVT::f16, Custom); | ||||||
699 | setOperationAction(ISD::FMINNUM, MVT::f16, Custom); | ||||||
700 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal); | ||||||
701 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal); | ||||||
702 | |||||||
703 | setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom); | ||||||
704 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom); | ||||||
705 | |||||||
706 | setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand); | ||||||
707 | setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand); | ||||||
708 | } | ||||||
709 | |||||||
710 | if (Subtarget->hasVOP3PInsts()) { | ||||||
711 | setOperationAction(ISD::ADD, MVT::v2i16, Legal); | ||||||
712 | setOperationAction(ISD::SUB, MVT::v2i16, Legal); | ||||||
713 | setOperationAction(ISD::MUL, MVT::v2i16, Legal); | ||||||
714 | setOperationAction(ISD::SHL, MVT::v2i16, Legal); | ||||||
715 | setOperationAction(ISD::SRL, MVT::v2i16, Legal); | ||||||
716 | setOperationAction(ISD::SRA, MVT::v2i16, Legal); | ||||||
717 | setOperationAction(ISD::SMIN, MVT::v2i16, Legal); | ||||||
718 | setOperationAction(ISD::UMIN, MVT::v2i16, Legal); | ||||||
719 | setOperationAction(ISD::SMAX, MVT::v2i16, Legal); | ||||||
720 | setOperationAction(ISD::UMAX, MVT::v2i16, Legal); | ||||||
721 | |||||||
722 | setOperationAction(ISD::UADDSAT, MVT::v2i16, Legal); | ||||||
723 | setOperationAction(ISD::USUBSAT, MVT::v2i16, Legal); | ||||||
724 | setOperationAction(ISD::SADDSAT, MVT::v2i16, Legal); | ||||||
725 | setOperationAction(ISD::SSUBSAT, MVT::v2i16, Legal); | ||||||
726 | |||||||
727 | setOperationAction(ISD::FADD, MVT::v2f16, Legal); | ||||||
728 | setOperationAction(ISD::FMUL, MVT::v2f16, Legal); | ||||||
729 | setOperationAction(ISD::FMA, MVT::v2f16, Legal); | ||||||
730 | |||||||
731 | setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal); | ||||||
732 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal); | ||||||
733 | |||||||
734 | setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal); | ||||||
735 | |||||||
736 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); | ||||||
737 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); | ||||||
738 | |||||||
739 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f16, Custom); | ||||||
740 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); | ||||||
741 | |||||||
742 | setOperationAction(ISD::SHL, MVT::v4i16, Custom); | ||||||
743 | setOperationAction(ISD::SRA, MVT::v4i16, Custom); | ||||||
744 | setOperationAction(ISD::SRL, MVT::v4i16, Custom); | ||||||
745 | setOperationAction(ISD::ADD, MVT::v4i16, Custom); | ||||||
746 | setOperationAction(ISD::SUB, MVT::v4i16, Custom); | ||||||
747 | setOperationAction(ISD::MUL, MVT::v4i16, Custom); | ||||||
748 | |||||||
749 | setOperationAction(ISD::SMIN, MVT::v4i16, Custom); | ||||||
750 | setOperationAction(ISD::SMAX, MVT::v4i16, Custom); | ||||||
751 | setOperationAction(ISD::UMIN, MVT::v4i16, Custom); | ||||||
752 | setOperationAction(ISD::UMAX, MVT::v4i16, Custom); | ||||||
753 | |||||||
754 | setOperationAction(ISD::UADDSAT, MVT::v4i16, Custom); | ||||||
755 | setOperationAction(ISD::SADDSAT, MVT::v4i16, Custom); | ||||||
756 | setOperationAction(ISD::USUBSAT, MVT::v4i16, Custom); | ||||||
757 | setOperationAction(ISD::SSUBSAT, MVT::v4i16, Custom); | ||||||
758 | |||||||
759 | setOperationAction(ISD::FADD, MVT::v4f16, Custom); | ||||||
760 | setOperationAction(ISD::FMUL, MVT::v4f16, Custom); | ||||||
761 | setOperationAction(ISD::FMA, MVT::v4f16, Custom); | ||||||
762 | |||||||
763 | setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom); | ||||||
764 | setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom); | ||||||
765 | |||||||
766 | setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom); | ||||||
767 | setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom); | ||||||
768 | setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom); | ||||||
769 | |||||||
770 | setOperationAction(ISD::FEXP, MVT::v2f16, Custom); | ||||||
771 | setOperationAction(ISD::SELECT, MVT::v4i16, Custom); | ||||||
772 | setOperationAction(ISD::SELECT, MVT::v4f16, Custom); | ||||||
773 | |||||||
774 | if (Subtarget->hasPackedFP32Ops()) { | ||||||
775 | setOperationAction(ISD::FADD, MVT::v2f32, Legal); | ||||||
776 | setOperationAction(ISD::FMUL, MVT::v2f32, Legal); | ||||||
777 | setOperationAction(ISD::FMA, MVT::v2f32, Legal); | ||||||
778 | setOperationAction(ISD::FNEG, MVT::v2f32, Legal); | ||||||
779 | |||||||
780 | for (MVT VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32, MVT::v32f32 }) { | ||||||
781 | setOperationAction(ISD::FADD, VT, Custom); | ||||||
782 | setOperationAction(ISD::FMUL, VT, Custom); | ||||||
783 | setOperationAction(ISD::FMA, VT, Custom); | ||||||
784 | } | ||||||
785 | } | ||||||
786 | } | ||||||
787 | |||||||
788 | setOperationAction(ISD::FNEG, MVT::v4f16, Custom); | ||||||
789 | setOperationAction(ISD::FABS, MVT::v4f16, Custom); | ||||||
790 | |||||||
791 | if (Subtarget->has16BitInsts()) { | ||||||
792 | setOperationAction(ISD::SELECT, MVT::v2i16, Promote); | ||||||
793 | AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32); | ||||||
794 | setOperationAction(ISD::SELECT, MVT::v2f16, Promote); | ||||||
795 | AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32); | ||||||
796 | } else { | ||||||
797 | // Legalization hack. | ||||||
798 | setOperationAction(ISD::SELECT, MVT::v2i16, Custom); | ||||||
799 | setOperationAction(ISD::SELECT, MVT::v2f16, Custom); | ||||||
800 | |||||||
801 | setOperationAction(ISD::FNEG, MVT::v2f16, Custom); | ||||||
802 | setOperationAction(ISD::FABS, MVT::v2f16, Custom); | ||||||
803 | } | ||||||
804 | |||||||
805 | for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) { | ||||||
806 | setOperationAction(ISD::SELECT, VT, Custom); | ||||||
807 | } | ||||||
808 | |||||||
809 | setOperationAction(ISD::SMULO, MVT::i64, Custom); | ||||||
810 | setOperationAction(ISD::UMULO, MVT::i64, Custom); | ||||||
811 | |||||||
812 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); | ||||||
813 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); | ||||||
814 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); | ||||||
815 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); | ||||||
816 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom); | ||||||
817 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom); | ||||||
818 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom); | ||||||
819 | |||||||
820 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom); | ||||||
821 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2i16, Custom); | ||||||
822 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v3f16, Custom); | ||||||
823 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v3i16, Custom); | ||||||
824 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom); | ||||||
825 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4i16, Custom); | ||||||
826 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom); | ||||||
827 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); | ||||||
828 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::f16, Custom); | ||||||
829 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom); | ||||||
830 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); | ||||||
831 | |||||||
832 | setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); | ||||||
833 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom); | ||||||
834 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom); | ||||||
835 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v3i16, Custom); | ||||||
836 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v3f16, Custom); | ||||||
837 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom); | ||||||
838 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v4i16, Custom); | ||||||
839 | setOperationAction(ISD::INTRINSIC_VOID, MVT::f16, Custom); | ||||||
840 | setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); | ||||||
841 | setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); | ||||||
842 | |||||||
843 | setTargetDAGCombine(ISD::ADD); | ||||||
844 | setTargetDAGCombine(ISD::ADDCARRY); | ||||||
845 | setTargetDAGCombine(ISD::SUB); | ||||||
846 | setTargetDAGCombine(ISD::SUBCARRY); | ||||||
847 | setTargetDAGCombine(ISD::FADD); | ||||||
848 | setTargetDAGCombine(ISD::FSUB); | ||||||
849 | setTargetDAGCombine(ISD::FMINNUM); | ||||||
850 | setTargetDAGCombine(ISD::FMAXNUM); | ||||||
851 | setTargetDAGCombine(ISD::FMINNUM_IEEE); | ||||||
852 | setTargetDAGCombine(ISD::FMAXNUM_IEEE); | ||||||
853 | setTargetDAGCombine(ISD::FMA); | ||||||
854 | setTargetDAGCombine(ISD::SMIN); | ||||||
855 | setTargetDAGCombine(ISD::SMAX); | ||||||
856 | setTargetDAGCombine(ISD::UMIN); | ||||||
857 | setTargetDAGCombine(ISD::UMAX); | ||||||
858 | setTargetDAGCombine(ISD::SETCC); | ||||||
859 | setTargetDAGCombine(ISD::AND); | ||||||
860 | setTargetDAGCombine(ISD::OR); | ||||||
861 | setTargetDAGCombine(ISD::XOR); | ||||||
862 | setTargetDAGCombine(ISD::SINT_TO_FP); | ||||||
863 | setTargetDAGCombine(ISD::UINT_TO_FP); | ||||||
864 | setTargetDAGCombine(ISD::FCANONICALIZE); | ||||||
865 | setTargetDAGCombine(ISD::SCALAR_TO_VECTOR); | ||||||
866 | setTargetDAGCombine(ISD::ZERO_EXTEND); | ||||||
867 | setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); | ||||||
868 | setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); | ||||||
869 | setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); | ||||||
870 | |||||||
871 | // All memory operations. Some folding on the pointer operand is done to help | ||||||
872 | // matching the constant offsets in the addressing modes. | ||||||
873 | setTargetDAGCombine(ISD::LOAD); | ||||||
874 | setTargetDAGCombine(ISD::STORE); | ||||||
875 | setTargetDAGCombine(ISD::ATOMIC_LOAD); | ||||||
876 | setTargetDAGCombine(ISD::ATOMIC_STORE); | ||||||
877 | setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); | ||||||
878 | setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); | ||||||
879 | setTargetDAGCombine(ISD::ATOMIC_SWAP); | ||||||
880 | setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); | ||||||
881 | setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); | ||||||
882 | setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); | ||||||
883 | setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); | ||||||
884 | setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); | ||||||
885 | setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); | ||||||
886 | setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); | ||||||
887 | setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); | ||||||
888 | setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); | ||||||
889 | setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); | ||||||
890 | setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD); | ||||||
891 | setTargetDAGCombine(ISD::INTRINSIC_VOID); | ||||||
892 | setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); | ||||||
893 | |||||||
894 | // FIXME: In other contexts we pretend this is a per-function property. | ||||||
895 | setStackPointerRegisterToSaveRestore(AMDGPU::SGPR32); | ||||||
896 | |||||||
897 | setSchedulingPreference(Sched::RegPressure); | ||||||
898 | } | ||||||
899 | |||||||
900 | const GCNSubtarget *SITargetLowering::getSubtarget() const { | ||||||
901 | return Subtarget; | ||||||
902 | } | ||||||
903 | |||||||
904 | //===----------------------------------------------------------------------===// | ||||||
905 | // TargetLowering queries | ||||||
906 | //===----------------------------------------------------------------------===// | ||||||
907 | |||||||
908 | // v_mad_mix* support a conversion from f16 to f32. | ||||||
909 | // | ||||||
910 | // There is only one special case when denormals are enabled we don't currently, | ||||||
911 | // where this is OK to use. | ||||||
912 | bool SITargetLowering::isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, | ||||||
913 | EVT DestVT, EVT SrcVT) const { | ||||||
914 | return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) || | ||||||
915 | (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) && | ||||||
916 | DestVT.getScalarType() == MVT::f32 && | ||||||
917 | SrcVT.getScalarType() == MVT::f16 && | ||||||
918 | // TODO: This probably only requires no input flushing? | ||||||
919 | !hasFP32Denormals(DAG.getMachineFunction()); | ||||||
920 | } | ||||||
921 | |||||||
922 | bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const { | ||||||
923 | // SI has some legal vector types, but no legal vector operations. Say no | ||||||
924 | // shuffles are legal in order to prefer scalarizing some vector operations. | ||||||
925 | return false; | ||||||
926 | } | ||||||
927 | |||||||
928 | MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, | ||||||
929 | CallingConv::ID CC, | ||||||
930 | EVT VT) const { | ||||||
931 | if (CC == CallingConv::AMDGPU_KERNEL) | ||||||
932 | return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); | ||||||
933 | |||||||
934 | if (VT.isVector()) { | ||||||
935 | EVT ScalarVT = VT.getScalarType(); | ||||||
936 | unsigned Size = ScalarVT.getSizeInBits(); | ||||||
937 | if (Size == 16) { | ||||||
938 | if (Subtarget->has16BitInsts()) | ||||||
939 | return VT.isInteger() ? MVT::v2i16 : MVT::v2f16; | ||||||
940 | return VT.isInteger() ? MVT::i32 : MVT::f32; | ||||||
941 | } | ||||||
942 | |||||||
943 | if (Size < 16) | ||||||
944 | return Subtarget->has16BitInsts() ? MVT::i16 : MVT::i32; | ||||||
945 | return Size == 32 ? ScalarVT.getSimpleVT() : MVT::i32; | ||||||
946 | } | ||||||
947 | |||||||
948 | if (VT.getSizeInBits() > 32) | ||||||
949 | return MVT::i32; | ||||||
950 | |||||||
951 | return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); | ||||||
952 | } | ||||||
953 | |||||||
954 | unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, | ||||||
955 | CallingConv::ID CC, | ||||||
956 | EVT VT) const { | ||||||
957 | if (CC == CallingConv::AMDGPU_KERNEL) | ||||||
958 | return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); | ||||||
959 | |||||||
960 | if (VT.isVector()) { | ||||||
961 | unsigned NumElts = VT.getVectorNumElements(); | ||||||
962 | EVT ScalarVT = VT.getScalarType(); | ||||||
963 | unsigned Size = ScalarVT.getSizeInBits(); | ||||||
964 | |||||||
965 | // FIXME: Should probably promote 8-bit vectors to i16. | ||||||
966 | if (Size == 16 && Subtarget->has16BitInsts()) | ||||||
967 | return (NumElts + 1) / 2; | ||||||
968 | |||||||
969 | if (Size <= 32) | ||||||
970 | return NumElts; | ||||||
971 | |||||||
972 | if (Size > 32) | ||||||
973 | return NumElts * ((Size + 31) / 32); | ||||||
974 | } else if (VT.getSizeInBits() > 32) | ||||||
975 | return (VT.getSizeInBits() + 31) / 32; | ||||||
976 | |||||||
977 | return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); | ||||||
978 | } | ||||||
979 | |||||||
980 | unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv( | ||||||
981 | LLVMContext &Context, CallingConv::ID CC, | ||||||
982 | EVT VT, EVT &IntermediateVT, | ||||||
983 | unsigned &NumIntermediates, MVT &RegisterVT) const { | ||||||
984 | if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) { | ||||||
985 | unsigned NumElts = VT.getVectorNumElements(); | ||||||
986 | EVT ScalarVT = VT.getScalarType(); | ||||||
987 | unsigned Size = ScalarVT.getSizeInBits(); | ||||||
988 | // FIXME: We should fix the ABI to be the same on targets without 16-bit | ||||||
989 | // support, but unless we can properly handle 3-vectors, it will be still be | ||||||
990 | // inconsistent. | ||||||
991 | if (Size == 16 && Subtarget->has16BitInsts()) { | ||||||
992 | RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16; | ||||||
993 | IntermediateVT = RegisterVT; | ||||||
994 | NumIntermediates = (NumElts + 1) / 2; | ||||||
995 | return NumIntermediates; | ||||||
996 | } | ||||||
997 | |||||||
998 | if (Size == 32) { | ||||||
999 | RegisterVT = ScalarVT.getSimpleVT(); | ||||||
1000 | IntermediateVT = RegisterVT; | ||||||
1001 | NumIntermediates = NumElts; | ||||||
1002 | return NumIntermediates; | ||||||
1003 | } | ||||||
1004 | |||||||
1005 | if (Size < 16 && Subtarget->has16BitInsts()) { | ||||||
1006 | // FIXME: Should probably form v2i16 pieces | ||||||
1007 | RegisterVT = MVT::i16; | ||||||
1008 | IntermediateVT = ScalarVT; | ||||||
1009 | NumIntermediates = NumElts; | ||||||
1010 | return NumIntermediates; | ||||||
1011 | } | ||||||
1012 | |||||||
1013 | |||||||
1014 | if (Size != 16 && Size <= 32) { | ||||||
1015 | RegisterVT = MVT::i32; | ||||||
1016 | IntermediateVT = ScalarVT; | ||||||
1017 | NumIntermediates = NumElts; | ||||||
1018 | return NumIntermediates; | ||||||
1019 | } | ||||||
1020 | |||||||
1021 | if (Size > 32) { | ||||||
1022 | RegisterVT = MVT::i32; | ||||||
1023 | IntermediateVT = RegisterVT; | ||||||
1024 | NumIntermediates = NumElts * ((Size + 31) / 32); | ||||||
1025 | return NumIntermediates; | ||||||
1026 | } | ||||||
1027 | } | ||||||
1028 | |||||||
1029 | return TargetLowering::getVectorTypeBreakdownForCallingConv( | ||||||
1030 | Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT); | ||||||
1031 | } | ||||||
1032 | |||||||
1033 | static EVT memVTFromImageData(Type *Ty, unsigned DMaskLanes) { | ||||||
1034 | assert(DMaskLanes != 0)(static_cast <bool> (DMaskLanes != 0) ? void (0) : __assert_fail ("DMaskLanes != 0", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1034, __extension__ __PRETTY_FUNCTION__)); | ||||||
1035 | |||||||
1036 | if (auto *VT = dyn_cast<FixedVectorType>(Ty)) { | ||||||
1037 | unsigned NumElts = std::min(DMaskLanes, VT->getNumElements()); | ||||||
1038 | return EVT::getVectorVT(Ty->getContext(), | ||||||
1039 | EVT::getEVT(VT->getElementType()), | ||||||
1040 | NumElts); | ||||||
1041 | } | ||||||
1042 | |||||||
1043 | return EVT::getEVT(Ty); | ||||||
1044 | } | ||||||
1045 | |||||||
1046 | // Peek through TFE struct returns to only use the data size. | ||||||
1047 | static EVT memVTFromImageReturn(Type *Ty, unsigned DMaskLanes) { | ||||||
1048 | auto *ST = dyn_cast<StructType>(Ty); | ||||||
1049 | if (!ST) | ||||||
1050 | return memVTFromImageData(Ty, DMaskLanes); | ||||||
1051 | |||||||
1052 | // Some intrinsics return an aggregate type - special case to work out the | ||||||
1053 | // correct memVT. | ||||||
1054 | // | ||||||
1055 | // Only limited forms of aggregate type currently expected. | ||||||
1056 | if (ST->getNumContainedTypes() != 2 || | ||||||
1057 | !ST->getContainedType(1)->isIntegerTy(32)) | ||||||
1058 | return EVT(); | ||||||
1059 | return memVTFromImageData(ST->getContainedType(0), DMaskLanes); | ||||||
1060 | } | ||||||
1061 | |||||||
1062 | bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, | ||||||
1063 | const CallInst &CI, | ||||||
1064 | MachineFunction &MF, | ||||||
1065 | unsigned IntrID) const { | ||||||
1066 | if (const AMDGPU::RsrcIntrinsic *RsrcIntr = | ||||||
1067 | AMDGPU::lookupRsrcIntrinsic(IntrID)) { | ||||||
1068 | AttributeList Attr = Intrinsic::getAttributes(CI.getContext(), | ||||||
1069 | (Intrinsic::ID)IntrID); | ||||||
1070 | if (Attr.hasFnAttr(Attribute::ReadNone)) | ||||||
1071 | return false; | ||||||
1072 | |||||||
1073 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
1074 | |||||||
1075 | if (RsrcIntr->IsImage) { | ||||||
1076 | Info.ptrVal = | ||||||
1077 | MFI->getImagePSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); | ||||||
1078 | Info.align.reset(); | ||||||
1079 | } else { | ||||||
1080 | Info.ptrVal = | ||||||
1081 | MFI->getBufferPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); | ||||||
1082 | } | ||||||
1083 | |||||||
1084 | Info.flags = MachineMemOperand::MODereferenceable; | ||||||
1085 | if (Attr.hasFnAttr(Attribute::ReadOnly)) { | ||||||
1086 | unsigned DMaskLanes = 4; | ||||||
1087 | |||||||
1088 | if (RsrcIntr->IsImage) { | ||||||
1089 | const AMDGPU::ImageDimIntrinsicInfo *Intr | ||||||
1090 | = AMDGPU::getImageDimIntrinsicInfo(IntrID); | ||||||
1091 | const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = | ||||||
1092 | AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); | ||||||
1093 | |||||||
1094 | if (!BaseOpcode->Gather4) { | ||||||
1095 | // If this isn't a gather, we may have excess loaded elements in the | ||||||
1096 | // IR type. Check the dmask for the real number of elements loaded. | ||||||
1097 | unsigned DMask | ||||||
1098 | = cast<ConstantInt>(CI.getArgOperand(0))->getZExtValue(); | ||||||
1099 | DMaskLanes = DMask == 0 ? 1 : countPopulation(DMask); | ||||||
1100 | } | ||||||
1101 | |||||||
1102 | Info.memVT = memVTFromImageReturn(CI.getType(), DMaskLanes); | ||||||
1103 | } else | ||||||
1104 | Info.memVT = EVT::getEVT(CI.getType()); | ||||||
1105 | |||||||
1106 | // FIXME: What does alignment mean for an image? | ||||||
1107 | Info.opc = ISD::INTRINSIC_W_CHAIN; | ||||||
1108 | Info.flags |= MachineMemOperand::MOLoad; | ||||||
1109 | } else if (Attr.hasFnAttr(Attribute::WriteOnly)) { | ||||||
1110 | Info.opc = ISD::INTRINSIC_VOID; | ||||||
1111 | |||||||
1112 | Type *DataTy = CI.getArgOperand(0)->getType(); | ||||||
1113 | if (RsrcIntr->IsImage) { | ||||||
1114 | unsigned DMask = cast<ConstantInt>(CI.getArgOperand(1))->getZExtValue(); | ||||||
1115 | unsigned DMaskLanes = DMask == 0 ? 1 : countPopulation(DMask); | ||||||
1116 | Info.memVT = memVTFromImageData(DataTy, DMaskLanes); | ||||||
1117 | } else | ||||||
1118 | Info.memVT = EVT::getEVT(DataTy); | ||||||
1119 | |||||||
1120 | Info.flags |= MachineMemOperand::MOStore; | ||||||
1121 | } else { | ||||||
1122 | // Atomic | ||||||
1123 | Info.opc = CI.getType()->isVoidTy() ? ISD::INTRINSIC_VOID : | ||||||
1124 | ISD::INTRINSIC_W_CHAIN; | ||||||
1125 | Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType()); | ||||||
1126 | Info.flags = MachineMemOperand::MOLoad | | ||||||
1127 | MachineMemOperand::MOStore | | ||||||
1128 | MachineMemOperand::MODereferenceable; | ||||||
1129 | |||||||
1130 | // XXX - Should this be volatile without known ordering? | ||||||
1131 | Info.flags |= MachineMemOperand::MOVolatile; | ||||||
1132 | } | ||||||
1133 | return true; | ||||||
1134 | } | ||||||
1135 | |||||||
1136 | switch (IntrID) { | ||||||
1137 | case Intrinsic::amdgcn_atomic_inc: | ||||||
1138 | case Intrinsic::amdgcn_atomic_dec: | ||||||
1139 | case Intrinsic::amdgcn_ds_ordered_add: | ||||||
1140 | case Intrinsic::amdgcn_ds_ordered_swap: | ||||||
1141 | case Intrinsic::amdgcn_ds_fadd: | ||||||
1142 | case Intrinsic::amdgcn_ds_fmin: | ||||||
1143 | case Intrinsic::amdgcn_ds_fmax: { | ||||||
1144 | Info.opc = ISD::INTRINSIC_W_CHAIN; | ||||||
1145 | Info.memVT = MVT::getVT(CI.getType()); | ||||||
1146 | Info.ptrVal = CI.getOperand(0); | ||||||
1147 | Info.align.reset(); | ||||||
1148 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; | ||||||
1149 | |||||||
1150 | const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4)); | ||||||
1151 | if (!Vol->isZero()) | ||||||
1152 | Info.flags |= MachineMemOperand::MOVolatile; | ||||||
1153 | |||||||
1154 | return true; | ||||||
1155 | } | ||||||
1156 | case Intrinsic::amdgcn_buffer_atomic_fadd: { | ||||||
1157 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
1158 | |||||||
1159 | Info.opc = ISD::INTRINSIC_W_CHAIN; | ||||||
1160 | Info.memVT = MVT::getVT(CI.getOperand(0)->getType()); | ||||||
1161 | Info.ptrVal = | ||||||
1162 | MFI->getBufferPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); | ||||||
1163 | Info.align.reset(); | ||||||
1164 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; | ||||||
1165 | |||||||
1166 | const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4)); | ||||||
1167 | if (!Vol || !Vol->isZero()) | ||||||
1168 | Info.flags |= MachineMemOperand::MOVolatile; | ||||||
1169 | |||||||
1170 | return true; | ||||||
1171 | } | ||||||
1172 | case Intrinsic::amdgcn_ds_append: | ||||||
1173 | case Intrinsic::amdgcn_ds_consume: { | ||||||
1174 | Info.opc = ISD::INTRINSIC_W_CHAIN; | ||||||
1175 | Info.memVT = MVT::getVT(CI.getType()); | ||||||
1176 | Info.ptrVal = CI.getOperand(0); | ||||||
1177 | Info.align.reset(); | ||||||
1178 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; | ||||||
1179 | |||||||
1180 | const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1)); | ||||||
1181 | if (!Vol->isZero()) | ||||||
1182 | Info.flags |= MachineMemOperand::MOVolatile; | ||||||
1183 | |||||||
1184 | return true; | ||||||
1185 | } | ||||||
1186 | case Intrinsic::amdgcn_global_atomic_csub: { | ||||||
1187 | Info.opc = ISD::INTRINSIC_W_CHAIN; | ||||||
1188 | Info.memVT = MVT::getVT(CI.getType()); | ||||||
1189 | Info.ptrVal = CI.getOperand(0); | ||||||
1190 | Info.align.reset(); | ||||||
1191 | Info.flags = MachineMemOperand::MOLoad | | ||||||
1192 | MachineMemOperand::MOStore | | ||||||
1193 | MachineMemOperand::MOVolatile; | ||||||
1194 | return true; | ||||||
1195 | } | ||||||
1196 | case Intrinsic::amdgcn_image_bvh_intersect_ray: { | ||||||
1197 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
1198 | Info.opc = ISD::INTRINSIC_W_CHAIN; | ||||||
1199 | Info.memVT = MVT::getVT(CI.getType()); // XXX: what is correct VT? | ||||||
1200 | Info.ptrVal = | ||||||
1201 | MFI->getImagePSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); | ||||||
1202 | Info.align.reset(); | ||||||
1203 | Info.flags = MachineMemOperand::MOLoad | | ||||||
1204 | MachineMemOperand::MODereferenceable; | ||||||
1205 | return true; | ||||||
1206 | } | ||||||
1207 | case Intrinsic::amdgcn_global_atomic_fadd: | ||||||
1208 | case Intrinsic::amdgcn_global_atomic_fmin: | ||||||
1209 | case Intrinsic::amdgcn_global_atomic_fmax: | ||||||
1210 | case Intrinsic::amdgcn_flat_atomic_fadd: | ||||||
1211 | case Intrinsic::amdgcn_flat_atomic_fmin: | ||||||
1212 | case Intrinsic::amdgcn_flat_atomic_fmax: { | ||||||
1213 | Info.opc = ISD::INTRINSIC_W_CHAIN; | ||||||
1214 | Info.memVT = MVT::getVT(CI.getType()); | ||||||
1215 | Info.ptrVal = CI.getOperand(0); | ||||||
1216 | Info.align.reset(); | ||||||
1217 | Info.flags = MachineMemOperand::MOLoad | | ||||||
1218 | MachineMemOperand::MOStore | | ||||||
1219 | MachineMemOperand::MODereferenceable | | ||||||
1220 | MachineMemOperand::MOVolatile; | ||||||
1221 | return true; | ||||||
1222 | } | ||||||
1223 | case Intrinsic::amdgcn_ds_gws_init: | ||||||
1224 | case Intrinsic::amdgcn_ds_gws_barrier: | ||||||
1225 | case Intrinsic::amdgcn_ds_gws_sema_v: | ||||||
1226 | case Intrinsic::amdgcn_ds_gws_sema_br: | ||||||
1227 | case Intrinsic::amdgcn_ds_gws_sema_p: | ||||||
1228 | case Intrinsic::amdgcn_ds_gws_sema_release_all: { | ||||||
1229 | Info.opc = ISD::INTRINSIC_VOID; | ||||||
1230 | |||||||
1231 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
1232 | Info.ptrVal = | ||||||
1233 | MFI->getGWSPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); | ||||||
1234 | |||||||
1235 | // This is an abstract access, but we need to specify a type and size. | ||||||
1236 | Info.memVT = MVT::i32; | ||||||
1237 | Info.size = 4; | ||||||
1238 | Info.align = Align(4); | ||||||
1239 | |||||||
1240 | Info.flags = MachineMemOperand::MOStore; | ||||||
1241 | if (IntrID == Intrinsic::amdgcn_ds_gws_barrier) | ||||||
1242 | Info.flags = MachineMemOperand::MOLoad; | ||||||
1243 | return true; | ||||||
1244 | } | ||||||
1245 | default: | ||||||
1246 | return false; | ||||||
1247 | } | ||||||
1248 | } | ||||||
1249 | |||||||
1250 | bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II, | ||||||
1251 | SmallVectorImpl<Value*> &Ops, | ||||||
1252 | Type *&AccessTy) const { | ||||||
1253 | switch (II->getIntrinsicID()) { | ||||||
1254 | case Intrinsic::amdgcn_atomic_inc: | ||||||
1255 | case Intrinsic::amdgcn_atomic_dec: | ||||||
1256 | case Intrinsic::amdgcn_ds_ordered_add: | ||||||
1257 | case Intrinsic::amdgcn_ds_ordered_swap: | ||||||
1258 | case Intrinsic::amdgcn_ds_append: | ||||||
1259 | case Intrinsic::amdgcn_ds_consume: | ||||||
1260 | case Intrinsic::amdgcn_ds_fadd: | ||||||
1261 | case Intrinsic::amdgcn_ds_fmin: | ||||||
1262 | case Intrinsic::amdgcn_ds_fmax: | ||||||
1263 | case Intrinsic::amdgcn_global_atomic_fadd: | ||||||
1264 | case Intrinsic::amdgcn_flat_atomic_fadd: | ||||||
1265 | case Intrinsic::amdgcn_flat_atomic_fmin: | ||||||
1266 | case Intrinsic::amdgcn_flat_atomic_fmax: | ||||||
1267 | case Intrinsic::amdgcn_global_atomic_csub: { | ||||||
1268 | Value *Ptr = II->getArgOperand(0); | ||||||
1269 | AccessTy = II->getType(); | ||||||
1270 | Ops.push_back(Ptr); | ||||||
1271 | return true; | ||||||
1272 | } | ||||||
1273 | default: | ||||||
1274 | return false; | ||||||
1275 | } | ||||||
1276 | } | ||||||
1277 | |||||||
1278 | bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { | ||||||
1279 | if (!Subtarget->hasFlatInstOffsets()) { | ||||||
1280 | // Flat instructions do not have offsets, and only have the register | ||||||
1281 | // address. | ||||||
1282 | return AM.BaseOffs == 0 && AM.Scale == 0; | ||||||
1283 | } | ||||||
1284 | |||||||
1285 | return AM.Scale == 0 && | ||||||
1286 | (AM.BaseOffs == 0 || | ||||||
1287 | Subtarget->getInstrInfo()->isLegalFLATOffset( | ||||||
1288 | AM.BaseOffs, AMDGPUAS::FLAT_ADDRESS, SIInstrFlags::FLAT)); | ||||||
1289 | } | ||||||
1290 | |||||||
1291 | bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const { | ||||||
1292 | if (Subtarget->hasFlatGlobalInsts()) | ||||||
1293 | return AM.Scale == 0 && | ||||||
1294 | (AM.BaseOffs == 0 || Subtarget->getInstrInfo()->isLegalFLATOffset( | ||||||
1295 | AM.BaseOffs, AMDGPUAS::GLOBAL_ADDRESS, | ||||||
1296 | SIInstrFlags::FlatGlobal)); | ||||||
1297 | |||||||
1298 | if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) { | ||||||
1299 | // Assume the we will use FLAT for all global memory accesses | ||||||
1300 | // on VI. | ||||||
1301 | // FIXME: This assumption is currently wrong. On VI we still use | ||||||
1302 | // MUBUF instructions for the r + i addressing mode. As currently | ||||||
1303 | // implemented, the MUBUF instructions only work on buffer < 4GB. | ||||||
1304 | // It may be possible to support > 4GB buffers with MUBUF instructions, | ||||||
1305 | // by setting the stride value in the resource descriptor which would | ||||||
1306 | // increase the size limit to (stride * 4GB). However, this is risky, | ||||||
1307 | // because it has never been validated. | ||||||
1308 | return isLegalFlatAddressingMode(AM); | ||||||
1309 | } | ||||||
1310 | |||||||
1311 | return isLegalMUBUFAddressingMode(AM); | ||||||
1312 | } | ||||||
1313 | |||||||
1314 | bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { | ||||||
1315 | // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and | ||||||
1316 | // additionally can do r + r + i with addr64. 32-bit has more addressing | ||||||
1317 | // mode options. Depending on the resource constant, it can also do | ||||||
1318 | // (i64 r0) + (i32 r1) * (i14 i). | ||||||
1319 | // | ||||||
1320 | // Private arrays end up using a scratch buffer most of the time, so also | ||||||
1321 | // assume those use MUBUF instructions. Scratch loads / stores are currently | ||||||
1322 | // implemented as mubuf instructions with offen bit set, so slightly | ||||||
1323 | // different than the normal addr64. | ||||||
1324 | if (!SIInstrInfo::isLegalMUBUFImmOffset(AM.BaseOffs)) | ||||||
1325 | return false; | ||||||
1326 | |||||||
1327 | // FIXME: Since we can split immediate into soffset and immediate offset, | ||||||
1328 | // would it make sense to allow any immediate? | ||||||
1329 | |||||||
1330 | switch (AM.Scale) { | ||||||
1331 | case 0: // r + i or just i, depending on HasBaseReg. | ||||||
1332 | return true; | ||||||
1333 | case 1: | ||||||
1334 | return true; // We have r + r or r + i. | ||||||
1335 | case 2: | ||||||
1336 | if (AM.HasBaseReg) { | ||||||
1337 | // Reject 2 * r + r. | ||||||
1338 | return false; | ||||||
1339 | } | ||||||
1340 | |||||||
1341 | // Allow 2 * r as r + r | ||||||
1342 | // Or 2 * r + i is allowed as r + r + i. | ||||||
1343 | return true; | ||||||
1344 | default: // Don't allow n * r | ||||||
1345 | return false; | ||||||
1346 | } | ||||||
1347 | } | ||||||
1348 | |||||||
1349 | bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, | ||||||
1350 | const AddrMode &AM, Type *Ty, | ||||||
1351 | unsigned AS, Instruction *I) const { | ||||||
1352 | // No global is ever allowed as a base. | ||||||
1353 | if (AM.BaseGV) | ||||||
1354 | return false; | ||||||
1355 | |||||||
1356 | if (AS == AMDGPUAS::GLOBAL_ADDRESS) | ||||||
1357 | return isLegalGlobalAddressingMode(AM); | ||||||
1358 | |||||||
1359 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || | ||||||
1360 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || | ||||||
1361 | AS == AMDGPUAS::BUFFER_FAT_POINTER) { | ||||||
1362 | // If the offset isn't a multiple of 4, it probably isn't going to be | ||||||
1363 | // correctly aligned. | ||||||
1364 | // FIXME: Can we get the real alignment here? | ||||||
1365 | if (AM.BaseOffs % 4 != 0) | ||||||
1366 | return isLegalMUBUFAddressingMode(AM); | ||||||
1367 | |||||||
1368 | // There are no SMRD extloads, so if we have to do a small type access we | ||||||
1369 | // will use a MUBUF load. | ||||||
1370 | // FIXME?: We also need to do this if unaligned, but we don't know the | ||||||
1371 | // alignment here. | ||||||
1372 | if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4) | ||||||
1373 | return isLegalGlobalAddressingMode(AM); | ||||||
1374 | |||||||
1375 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { | ||||||
1376 | // SMRD instructions have an 8-bit, dword offset on SI. | ||||||
1377 | if (!isUInt<8>(AM.BaseOffs / 4)) | ||||||
1378 | return false; | ||||||
1379 | } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) { | ||||||
1380 | // On CI+, this can also be a 32-bit literal constant offset. If it fits | ||||||
1381 | // in 8-bits, it can use a smaller encoding. | ||||||
1382 | if (!isUInt<32>(AM.BaseOffs / 4)) | ||||||
1383 | return false; | ||||||
1384 | } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { | ||||||
1385 | // On VI, these use the SMEM format and the offset is 20-bit in bytes. | ||||||
1386 | if (!isUInt<20>(AM.BaseOffs)) | ||||||
1387 | return false; | ||||||
1388 | } else | ||||||
1389 | llvm_unreachable("unhandled generation")::llvm::llvm_unreachable_internal("unhandled generation", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1389); | ||||||
1390 | |||||||
1391 | if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. | ||||||
1392 | return true; | ||||||
1393 | |||||||
1394 | if (AM.Scale == 1 && AM.HasBaseReg) | ||||||
1395 | return true; | ||||||
1396 | |||||||
1397 | return false; | ||||||
1398 | |||||||
1399 | } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { | ||||||
1400 | return isLegalMUBUFAddressingMode(AM); | ||||||
1401 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || | ||||||
1402 | AS == AMDGPUAS::REGION_ADDRESS) { | ||||||
1403 | // Basic, single offset DS instructions allow a 16-bit unsigned immediate | ||||||
1404 | // field. | ||||||
1405 | // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have | ||||||
1406 | // an 8-bit dword offset but we don't know the alignment here. | ||||||
1407 | if (!isUInt<16>(AM.BaseOffs)) | ||||||
1408 | return false; | ||||||
1409 | |||||||
1410 | if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. | ||||||
1411 | return true; | ||||||
1412 | |||||||
1413 | if (AM.Scale == 1 && AM.HasBaseReg) | ||||||
1414 | return true; | ||||||
1415 | |||||||
1416 | return false; | ||||||
1417 | } else if (AS == AMDGPUAS::FLAT_ADDRESS || | ||||||
1418 | AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) { | ||||||
1419 | // For an unknown address space, this usually means that this is for some | ||||||
1420 | // reason being used for pure arithmetic, and not based on some addressing | ||||||
1421 | // computation. We don't have instructions that compute pointers with any | ||||||
1422 | // addressing modes, so treat them as having no offset like flat | ||||||
1423 | // instructions. | ||||||
1424 | return isLegalFlatAddressingMode(AM); | ||||||
1425 | } | ||||||
1426 | |||||||
1427 | // Assume a user alias of global for unknown address spaces. | ||||||
1428 | return isLegalGlobalAddressingMode(AM); | ||||||
1429 | } | ||||||
1430 | |||||||
1431 | bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT, | ||||||
1432 | const MachineFunction &MF) const { | ||||||
1433 | if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) { | ||||||
1434 | return (MemVT.getSizeInBits() <= 4 * 32); | ||||||
1435 | } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { | ||||||
1436 | unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize(); | ||||||
1437 | return (MemVT.getSizeInBits() <= MaxPrivateBits); | ||||||
1438 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { | ||||||
1439 | return (MemVT.getSizeInBits() <= 2 * 32); | ||||||
1440 | } | ||||||
1441 | return true; | ||||||
1442 | } | ||||||
1443 | |||||||
1444 | bool SITargetLowering::allowsMisalignedMemoryAccessesImpl( | ||||||
1445 | unsigned Size, unsigned AddrSpace, Align Alignment, | ||||||
1446 | MachineMemOperand::Flags Flags, bool *IsFast) const { | ||||||
1447 | if (IsFast) | ||||||
1448 | *IsFast = false; | ||||||
1449 | |||||||
1450 | if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || | ||||||
1451 | AddrSpace == AMDGPUAS::REGION_ADDRESS) { | ||||||
1452 | // Check if alignment requirements for ds_read/write instructions are | ||||||
1453 | // disabled. | ||||||
1454 | if (Subtarget->hasUnalignedDSAccessEnabled() && | ||||||
1455 | !Subtarget->hasLDSMisalignedBug()) { | ||||||
1456 | if (IsFast) | ||||||
1457 | *IsFast = Alignment != Align(2); | ||||||
1458 | return true; | ||||||
1459 | } | ||||||
1460 | |||||||
1461 | // Either, the alignment requirements are "enabled", or there is an | ||||||
1462 | // unaligned LDS access related hardware bug though alignment requirements | ||||||
1463 | // are "disabled". In either case, we need to check for proper alignment | ||||||
1464 | // requirements. | ||||||
1465 | // | ||||||
1466 | if (Size == 64) { | ||||||
1467 | // 8 byte accessing via ds_read/write_b64 require 8-byte alignment, but we | ||||||
1468 | // can do a 4 byte aligned, 8 byte access in a single operation using | ||||||
1469 | // ds_read2/write2_b32 with adjacent offsets. | ||||||
1470 | bool AlignedBy4 = Alignment >= Align(4); | ||||||
1471 | if (IsFast) | ||||||
1472 | *IsFast = AlignedBy4; | ||||||
1473 | |||||||
1474 | return AlignedBy4; | ||||||
1475 | } | ||||||
1476 | if (Size == 96) { | ||||||
1477 | // 12 byte accessing via ds_read/write_b96 require 16-byte alignment on | ||||||
1478 | // gfx8 and older. | ||||||
1479 | bool AlignedBy16 = Alignment >= Align(16); | ||||||
1480 | if (IsFast) | ||||||
1481 | *IsFast = AlignedBy16; | ||||||
1482 | |||||||
1483 | return AlignedBy16; | ||||||
1484 | } | ||||||
1485 | if (Size == 128) { | ||||||
1486 | // 16 byte accessing via ds_read/write_b128 require 16-byte alignment on | ||||||
1487 | // gfx8 and older, but we can do a 8 byte aligned, 16 byte access in a | ||||||
1488 | // single operation using ds_read2/write2_b64. | ||||||
1489 | bool AlignedBy8 = Alignment >= Align(8); | ||||||
1490 | if (IsFast) | ||||||
1491 | *IsFast = AlignedBy8; | ||||||
1492 | |||||||
1493 | return AlignedBy8; | ||||||
1494 | } | ||||||
1495 | } | ||||||
1496 | |||||||
1497 | if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) { | ||||||
1498 | bool AlignedBy4 = Alignment >= Align(4); | ||||||
1499 | if (IsFast) | ||||||
1500 | *IsFast = AlignedBy4; | ||||||
1501 | |||||||
1502 | return AlignedBy4 || | ||||||
1503 | Subtarget->enableFlatScratch() || | ||||||
1504 | Subtarget->hasUnalignedScratchAccess(); | ||||||
1505 | } | ||||||
1506 | |||||||
1507 | // FIXME: We have to be conservative here and assume that flat operations | ||||||
1508 | // will access scratch. If we had access to the IR function, then we | ||||||
1509 | // could determine if any private memory was used in the function. | ||||||
1510 | if (AddrSpace == AMDGPUAS::FLAT_ADDRESS && | ||||||
1511 | !Subtarget->hasUnalignedScratchAccess()) { | ||||||
1512 | bool AlignedBy4 = Alignment >= Align(4); | ||||||
1513 | if (IsFast) | ||||||
1514 | *IsFast = AlignedBy4; | ||||||
1515 | |||||||
1516 | return AlignedBy4; | ||||||
1517 | } | ||||||
1518 | |||||||
1519 | if (Subtarget->hasUnalignedBufferAccessEnabled() && | ||||||
1520 | !(AddrSpace == AMDGPUAS::LOCAL_ADDRESS || | ||||||
1521 | AddrSpace == AMDGPUAS::REGION_ADDRESS)) { | ||||||
1522 | // If we have an uniform constant load, it still requires using a slow | ||||||
1523 | // buffer instruction if unaligned. | ||||||
1524 | if (IsFast) { | ||||||
1525 | // Accesses can really be issued as 1-byte aligned or 4-byte aligned, so | ||||||
1526 | // 2-byte alignment is worse than 1 unless doing a 2-byte accesss. | ||||||
1527 | *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS || | ||||||
1528 | AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ? | ||||||
1529 | Alignment >= Align(4) : Alignment != Align(2); | ||||||
1530 | } | ||||||
1531 | |||||||
1532 | return true; | ||||||
1533 | } | ||||||
1534 | |||||||
1535 | // Smaller than dword value must be aligned. | ||||||
1536 | if (Size < 32) | ||||||
1537 | return false; | ||||||
1538 | |||||||
1539 | // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the | ||||||
1540 | // byte-address are ignored, thus forcing Dword alignment. | ||||||
1541 | // This applies to private, global, and constant memory. | ||||||
1542 | if (IsFast) | ||||||
1543 | *IsFast = true; | ||||||
1544 | |||||||
1545 | return Size >= 32 && Alignment >= Align(4); | ||||||
1546 | } | ||||||
1547 | |||||||
1548 | bool SITargetLowering::allowsMisalignedMemoryAccesses( | ||||||
1549 | EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, | ||||||
1550 | bool *IsFast) const { | ||||||
1551 | if (IsFast) | ||||||
1552 | *IsFast = false; | ||||||
1553 | |||||||
1554 | // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, | ||||||
1555 | // which isn't a simple VT. | ||||||
1556 | // Until MVT is extended to handle this, simply check for the size and | ||||||
1557 | // rely on the condition below: allow accesses if the size is a multiple of 4. | ||||||
1558 | if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 && | ||||||
1559 | VT.getStoreSize() > 16)) { | ||||||
1560 | return false; | ||||||
1561 | } | ||||||
1562 | |||||||
1563 | return allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AddrSpace, | ||||||
1564 | Alignment, Flags, IsFast); | ||||||
1565 | } | ||||||
1566 | |||||||
1567 | EVT SITargetLowering::getOptimalMemOpType( | ||||||
1568 | const MemOp &Op, const AttributeList &FuncAttributes) const { | ||||||
1569 | // FIXME: Should account for address space here. | ||||||
1570 | |||||||
1571 | // The default fallback uses the private pointer size as a guess for a type to | ||||||
1572 | // use. Make sure we switch these to 64-bit accesses. | ||||||
1573 | |||||||
1574 | if (Op.size() >= 16 && | ||||||
1575 | Op.isDstAligned(Align(4))) // XXX: Should only do for global | ||||||
1576 | return MVT::v4i32; | ||||||
1577 | |||||||
1578 | if (Op.size() >= 8 && Op.isDstAligned(Align(4))) | ||||||
1579 | return MVT::v2i32; | ||||||
1580 | |||||||
1581 | // Use the default. | ||||||
1582 | return MVT::Other; | ||||||
1583 | } | ||||||
1584 | |||||||
1585 | bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { | ||||||
1586 | const MemSDNode *MemNode = cast<MemSDNode>(N); | ||||||
1587 | const Value *Ptr = MemNode->getMemOperand()->getValue(); | ||||||
1588 | const Instruction *I = dyn_cast_or_null<Instruction>(Ptr); | ||||||
1589 | return I && I->getMetadata("amdgpu.noclobber"); | ||||||
1590 | } | ||||||
1591 | |||||||
1592 | bool SITargetLowering::isNonGlobalAddrSpace(unsigned AS) { | ||||||
1593 | return AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS || | ||||||
1594 | AS == AMDGPUAS::PRIVATE_ADDRESS; | ||||||
1595 | } | ||||||
1596 | |||||||
1597 | bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS, | ||||||
1598 | unsigned DestAS) const { | ||||||
1599 | // Flat -> private/local is a simple truncate. | ||||||
1600 | // Flat -> global is no-op | ||||||
1601 | if (SrcAS == AMDGPUAS::FLAT_ADDRESS) | ||||||
1602 | return true; | ||||||
1603 | |||||||
1604 | const GCNTargetMachine &TM = | ||||||
1605 | static_cast<const GCNTargetMachine &>(getTargetMachine()); | ||||||
1606 | return TM.isNoopAddrSpaceCast(SrcAS, DestAS); | ||||||
1607 | } | ||||||
1608 | |||||||
1609 | bool SITargetLowering::isMemOpUniform(const SDNode *N) const { | ||||||
1610 | const MemSDNode *MemNode = cast<MemSDNode>(N); | ||||||
1611 | |||||||
1612 | return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand()); | ||||||
1613 | } | ||||||
1614 | |||||||
1615 | TargetLoweringBase::LegalizeTypeAction | ||||||
1616 | SITargetLowering::getPreferredVectorAction(MVT VT) const { | ||||||
1617 | if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 && | ||||||
1618 | VT.getScalarType().bitsLE(MVT::i16)) | ||||||
1619 | return VT.isPow2VectorType() ? TypeSplitVector : TypeWidenVector; | ||||||
1620 | return TargetLoweringBase::getPreferredVectorAction(VT); | ||||||
1621 | } | ||||||
1622 | |||||||
1623 | bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, | ||||||
1624 | Type *Ty) const { | ||||||
1625 | // FIXME: Could be smarter if called for vector constants. | ||||||
1626 | return true; | ||||||
1627 | } | ||||||
1628 | |||||||
1629 | bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { | ||||||
1630 | if (Subtarget->has16BitInsts() && VT == MVT::i16) { | ||||||
1631 | switch (Op) { | ||||||
1632 | case ISD::LOAD: | ||||||
1633 | case ISD::STORE: | ||||||
1634 | |||||||
1635 | // These operations are done with 32-bit instructions anyway. | ||||||
1636 | case ISD::AND: | ||||||
1637 | case ISD::OR: | ||||||
1638 | case ISD::XOR: | ||||||
1639 | case ISD::SELECT: | ||||||
1640 | // TODO: Extensions? | ||||||
1641 | return true; | ||||||
1642 | default: | ||||||
1643 | return false; | ||||||
1644 | } | ||||||
1645 | } | ||||||
1646 | |||||||
1647 | // SimplifySetCC uses this function to determine whether or not it should | ||||||
1648 | // create setcc with i1 operands. We don't have instructions for i1 setcc. | ||||||
1649 | if (VT == MVT::i1 && Op == ISD::SETCC) | ||||||
1650 | return false; | ||||||
1651 | |||||||
1652 | return TargetLowering::isTypeDesirableForOp(Op, VT); | ||||||
1653 | } | ||||||
1654 | |||||||
1655 | SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG, | ||||||
1656 | const SDLoc &SL, | ||||||
1657 | SDValue Chain, | ||||||
1658 | uint64_t Offset) const { | ||||||
1659 | const DataLayout &DL = DAG.getDataLayout(); | ||||||
1660 | MachineFunction &MF = DAG.getMachineFunction(); | ||||||
1661 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
1662 | |||||||
1663 | const ArgDescriptor *InputPtrReg; | ||||||
1664 | const TargetRegisterClass *RC; | ||||||
1665 | LLT ArgTy; | ||||||
1666 | MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); | ||||||
1667 | |||||||
1668 | std::tie(InputPtrReg, RC, ArgTy) = | ||||||
1669 | Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); | ||||||
1670 | |||||||
1671 | // We may not have the kernarg segment argument if we have no kernel | ||||||
1672 | // arguments. | ||||||
1673 | if (!InputPtrReg) | ||||||
1674 | return DAG.getConstant(0, SL, PtrVT); | ||||||
1675 | |||||||
1676 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); | ||||||
1677 | SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, | ||||||
1678 | MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT); | ||||||
1679 | |||||||
1680 | return DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Offset)); | ||||||
1681 | } | ||||||
1682 | |||||||
1683 | SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG, | ||||||
1684 | const SDLoc &SL) const { | ||||||
1685 | uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(), | ||||||
1686 | FIRST_IMPLICIT); | ||||||
1687 | return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset); | ||||||
1688 | } | ||||||
1689 | |||||||
1690 | SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT, | ||||||
1691 | const SDLoc &SL, SDValue Val, | ||||||
1692 | bool Signed, | ||||||
1693 | const ISD::InputArg *Arg) const { | ||||||
1694 | // First, if it is a widened vector, narrow it. | ||||||
1695 | if (VT.isVector() && | ||||||
1696 | VT.getVectorNumElements() != MemVT.getVectorNumElements()) { | ||||||
1697 | EVT NarrowedVT = | ||||||
1698 | EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), | ||||||
1699 | VT.getVectorNumElements()); | ||||||
1700 | Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val, | ||||||
1701 | DAG.getConstant(0, SL, MVT::i32)); | ||||||
1702 | } | ||||||
1703 | |||||||
1704 | // Then convert the vector elements or scalar value. | ||||||
1705 | if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) && | ||||||
1706 | VT.bitsLT(MemVT)) { | ||||||
1707 | unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext; | ||||||
1708 | Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT)); | ||||||
1709 | } | ||||||
1710 | |||||||
1711 | if (MemVT.isFloatingPoint()) | ||||||
1712 | Val = getFPExtOrFPRound(DAG, Val, SL, VT); | ||||||
1713 | else if (Signed) | ||||||
1714 | Val = DAG.getSExtOrTrunc(Val, SL, VT); | ||||||
1715 | else | ||||||
1716 | Val = DAG.getZExtOrTrunc(Val, SL, VT); | ||||||
1717 | |||||||
1718 | return Val; | ||||||
1719 | } | ||||||
1720 | |||||||
1721 | SDValue SITargetLowering::lowerKernargMemParameter( | ||||||
1722 | SelectionDAG &DAG, EVT VT, EVT MemVT, const SDLoc &SL, SDValue Chain, | ||||||
1723 | uint64_t Offset, Align Alignment, bool Signed, | ||||||
1724 | const ISD::InputArg *Arg) const { | ||||||
1725 | MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); | ||||||
1726 | |||||||
1727 | // Try to avoid using an extload by loading earlier than the argument address, | ||||||
1728 | // and extracting the relevant bits. The load should hopefully be merged with | ||||||
1729 | // the previous argument. | ||||||
1730 | if (MemVT.getStoreSize() < 4 && Alignment < 4) { | ||||||
1731 | // TODO: Handle align < 4 and size >= 4 (can happen with packed structs). | ||||||
1732 | int64_t AlignDownOffset = alignDown(Offset, 4); | ||||||
1733 | int64_t OffsetDiff = Offset - AlignDownOffset; | ||||||
1734 | |||||||
1735 | EVT IntVT = MemVT.changeTypeToInteger(); | ||||||
1736 | |||||||
1737 | // TODO: If we passed in the base kernel offset we could have a better | ||||||
1738 | // alignment than 4, but we don't really need it. | ||||||
1739 | SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset); | ||||||
1740 | SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, Align(4), | ||||||
1741 | MachineMemOperand::MODereferenceable | | ||||||
1742 | MachineMemOperand::MOInvariant); | ||||||
1743 | |||||||
1744 | SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32); | ||||||
1745 | SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt); | ||||||
1746 | |||||||
1747 | SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract); | ||||||
1748 | ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal); | ||||||
1749 | ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg); | ||||||
1750 | |||||||
1751 | |||||||
1752 | return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL); | ||||||
1753 | } | ||||||
1754 | |||||||
1755 | SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset); | ||||||
1756 | SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Alignment, | ||||||
1757 | MachineMemOperand::MODereferenceable | | ||||||
1758 | MachineMemOperand::MOInvariant); | ||||||
1759 | |||||||
1760 | SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg); | ||||||
1761 | return DAG.getMergeValues({ Val, Load.getValue(1) }, SL); | ||||||
1762 | } | ||||||
1763 | |||||||
1764 | SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA, | ||||||
1765 | const SDLoc &SL, SDValue Chain, | ||||||
1766 | const ISD::InputArg &Arg) const { | ||||||
1767 | MachineFunction &MF = DAG.getMachineFunction(); | ||||||
1768 | MachineFrameInfo &MFI = MF.getFrameInfo(); | ||||||
1769 | |||||||
1770 | if (Arg.Flags.isByVal()) { | ||||||
1771 | unsigned Size = Arg.Flags.getByValSize(); | ||||||
1772 | int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false); | ||||||
1773 | return DAG.getFrameIndex(FrameIdx, MVT::i32); | ||||||
1774 | } | ||||||
1775 | |||||||
1776 | unsigned ArgOffset = VA.getLocMemOffset(); | ||||||
1777 | unsigned ArgSize = VA.getValVT().getStoreSize(); | ||||||
1778 | |||||||
1779 | int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true); | ||||||
1780 | |||||||
1781 | // Create load nodes to retrieve arguments from the stack. | ||||||
1782 | SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); | ||||||
1783 | SDValue ArgValue; | ||||||
1784 | |||||||
1785 | // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT) | ||||||
1786 | ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; | ||||||
1787 | MVT MemVT = VA.getValVT(); | ||||||
1788 | |||||||
1789 | switch (VA.getLocInfo()) { | ||||||
1790 | default: | ||||||
1791 | break; | ||||||
1792 | case CCValAssign::BCvt: | ||||||
1793 | MemVT = VA.getLocVT(); | ||||||
1794 | break; | ||||||
1795 | case CCValAssign::SExt: | ||||||
1796 | ExtType = ISD::SEXTLOAD; | ||||||
1797 | break; | ||||||
1798 | case CCValAssign::ZExt: | ||||||
1799 | ExtType = ISD::ZEXTLOAD; | ||||||
1800 | break; | ||||||
1801 | case CCValAssign::AExt: | ||||||
1802 | ExtType = ISD::EXTLOAD; | ||||||
1803 | break; | ||||||
1804 | } | ||||||
1805 | |||||||
1806 | ArgValue = DAG.getExtLoad( | ||||||
1807 | ExtType, SL, VA.getLocVT(), Chain, FIN, | ||||||
1808 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), | ||||||
1809 | MemVT); | ||||||
1810 | return ArgValue; | ||||||
1811 | } | ||||||
1812 | |||||||
1813 | SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG, | ||||||
1814 | const SIMachineFunctionInfo &MFI, | ||||||
1815 | EVT VT, | ||||||
1816 | AMDGPUFunctionArgInfo::PreloadedValue PVID) const { | ||||||
1817 | const ArgDescriptor *Reg; | ||||||
1818 | const TargetRegisterClass *RC; | ||||||
1819 | LLT Ty; | ||||||
1820 | |||||||
1821 | std::tie(Reg, RC, Ty) = MFI.getPreloadedValue(PVID); | ||||||
1822 | if (!Reg) { | ||||||
1823 | // It's possible for a kernarg intrinsic call to appear in a kernel with no | ||||||
1824 | // allocated segment, in which case we do not add the user sgpr argument, so | ||||||
1825 | // just return null. | ||||||
1826 | assert(PVID == AMDGPUFunctionArgInfo::PreloadedValue::KERNARG_SEGMENT_PTR)(static_cast <bool> (PVID == AMDGPUFunctionArgInfo::PreloadedValue ::KERNARG_SEGMENT_PTR) ? void (0) : __assert_fail ("PVID == AMDGPUFunctionArgInfo::PreloadedValue::KERNARG_SEGMENT_PTR" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1826, __extension__ __PRETTY_FUNCTION__)); | ||||||
1827 | return DAG.getConstant(0, SDLoc(), VT); | ||||||
1828 | } | ||||||
1829 | |||||||
1830 | return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT); | ||||||
1831 | } | ||||||
1832 | |||||||
1833 | static void processPSInputArgs(SmallVectorImpl<ISD::InputArg> &Splits, | ||||||
1834 | CallingConv::ID CallConv, | ||||||
1835 | ArrayRef<ISD::InputArg> Ins, BitVector &Skipped, | ||||||
1836 | FunctionType *FType, | ||||||
1837 | SIMachineFunctionInfo *Info) { | ||||||
1838 | for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) { | ||||||
1839 | const ISD::InputArg *Arg = &Ins[I]; | ||||||
1840 | |||||||
1841 | assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&(static_cast <bool> ((!Arg->VT.isVector() || Arg-> VT.getScalarSizeInBits() == 16) && "vector type argument should have been split" ) ? void (0) : __assert_fail ("(!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && \"vector type argument should have been split\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1842, __extension__ __PRETTY_FUNCTION__)) | ||||||
1842 | "vector type argument should have been split")(static_cast <bool> ((!Arg->VT.isVector() || Arg-> VT.getScalarSizeInBits() == 16) && "vector type argument should have been split" ) ? void (0) : __assert_fail ("(!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && \"vector type argument should have been split\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1842, __extension__ __PRETTY_FUNCTION__)); | ||||||
1843 | |||||||
1844 | // First check if it's a PS input addr. | ||||||
1845 | if (CallConv == CallingConv::AMDGPU_PS && | ||||||
1846 | !Arg->Flags.isInReg() && PSInputNum <= 15) { | ||||||
1847 | bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum); | ||||||
1848 | |||||||
1849 | // Inconveniently only the first part of the split is marked as isSplit, | ||||||
1850 | // so skip to the end. We only want to increment PSInputNum once for the | ||||||
1851 | // entire split argument. | ||||||
1852 | if (Arg->Flags.isSplit()) { | ||||||
1853 | while (!Arg->Flags.isSplitEnd()) { | ||||||
1854 | assert((!Arg->VT.isVector() ||(static_cast <bool> ((!Arg->VT.isVector() || Arg-> VT.getScalarSizeInBits() == 16) && "unexpected vector split in ps argument type" ) ? void (0) : __assert_fail ("(!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && \"unexpected vector split in ps argument type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1856, __extension__ __PRETTY_FUNCTION__)) | ||||||
1855 | Arg->VT.getScalarSizeInBits() == 16) &&(static_cast <bool> ((!Arg->VT.isVector() || Arg-> VT.getScalarSizeInBits() == 16) && "unexpected vector split in ps argument type" ) ? void (0) : __assert_fail ("(!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && \"unexpected vector split in ps argument type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1856, __extension__ __PRETTY_FUNCTION__)) | ||||||
1856 | "unexpected vector split in ps argument type")(static_cast <bool> ((!Arg->VT.isVector() || Arg-> VT.getScalarSizeInBits() == 16) && "unexpected vector split in ps argument type" ) ? void (0) : __assert_fail ("(!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && \"unexpected vector split in ps argument type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1856, __extension__ __PRETTY_FUNCTION__)); | ||||||
1857 | if (!SkipArg) | ||||||
1858 | Splits.push_back(*Arg); | ||||||
1859 | Arg = &Ins[++I]; | ||||||
1860 | } | ||||||
1861 | } | ||||||
1862 | |||||||
1863 | if (SkipArg) { | ||||||
1864 | // We can safely skip PS inputs. | ||||||
1865 | Skipped.set(Arg->getOrigArgIndex()); | ||||||
1866 | ++PSInputNum; | ||||||
1867 | continue; | ||||||
1868 | } | ||||||
1869 | |||||||
1870 | Info->markPSInputAllocated(PSInputNum); | ||||||
1871 | if (Arg->Used) | ||||||
1872 | Info->markPSInputEnabled(PSInputNum); | ||||||
1873 | |||||||
1874 | ++PSInputNum; | ||||||
1875 | } | ||||||
1876 | |||||||
1877 | Splits.push_back(*Arg); | ||||||
1878 | } | ||||||
1879 | } | ||||||
1880 | |||||||
1881 | // Allocate special inputs passed in VGPRs. | ||||||
1882 | void SITargetLowering::allocateSpecialEntryInputVGPRs(CCState &CCInfo, | ||||||
1883 | MachineFunction &MF, | ||||||
1884 | const SIRegisterInfo &TRI, | ||||||
1885 | SIMachineFunctionInfo &Info) const { | ||||||
1886 | const LLT S32 = LLT::scalar(32); | ||||||
1887 | MachineRegisterInfo &MRI = MF.getRegInfo(); | ||||||
1888 | |||||||
1889 | if (Info.hasWorkItemIDX()) { | ||||||
1890 | Register Reg = AMDGPU::VGPR0; | ||||||
1891 | MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); | ||||||
1892 | |||||||
1893 | CCInfo.AllocateReg(Reg); | ||||||
1894 | unsigned Mask = (Subtarget->hasPackedTID() && | ||||||
1895 | Info.hasWorkItemIDY()) ? 0x3ff : ~0u; | ||||||
1896 | Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg, Mask)); | ||||||
1897 | } | ||||||
1898 | |||||||
1899 | if (Info.hasWorkItemIDY()) { | ||||||
1900 | assert(Info.hasWorkItemIDX())(static_cast <bool> (Info.hasWorkItemIDX()) ? void (0) : __assert_fail ("Info.hasWorkItemIDX()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1900, __extension__ __PRETTY_FUNCTION__)); | ||||||
1901 | if (Subtarget->hasPackedTID()) { | ||||||
1902 | Info.setWorkItemIDY(ArgDescriptor::createRegister(AMDGPU::VGPR0, | ||||||
1903 | 0x3ff << 10)); | ||||||
1904 | } else { | ||||||
1905 | unsigned Reg = AMDGPU::VGPR1; | ||||||
1906 | MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); | ||||||
1907 | |||||||
1908 | CCInfo.AllocateReg(Reg); | ||||||
1909 | Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg)); | ||||||
1910 | } | ||||||
1911 | } | ||||||
1912 | |||||||
1913 | if (Info.hasWorkItemIDZ()) { | ||||||
1914 | assert(Info.hasWorkItemIDX() && Info.hasWorkItemIDY())(static_cast <bool> (Info.hasWorkItemIDX() && Info .hasWorkItemIDY()) ? void (0) : __assert_fail ("Info.hasWorkItemIDX() && Info.hasWorkItemIDY()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1914, __extension__ __PRETTY_FUNCTION__)); | ||||||
1915 | if (Subtarget->hasPackedTID()) { | ||||||
1916 | Info.setWorkItemIDZ(ArgDescriptor::createRegister(AMDGPU::VGPR0, | ||||||
1917 | 0x3ff << 20)); | ||||||
1918 | } else { | ||||||
1919 | unsigned Reg = AMDGPU::VGPR2; | ||||||
1920 | MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); | ||||||
1921 | |||||||
1922 | CCInfo.AllocateReg(Reg); | ||||||
1923 | Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg)); | ||||||
1924 | } | ||||||
1925 | } | ||||||
1926 | } | ||||||
1927 | |||||||
1928 | // Try to allocate a VGPR at the end of the argument list, or if no argument | ||||||
1929 | // VGPRs are left allocating a stack slot. | ||||||
1930 | // If \p Mask is is given it indicates bitfield position in the register. | ||||||
1931 | // If \p Arg is given use it with new ]p Mask instead of allocating new. | ||||||
1932 | static ArgDescriptor allocateVGPR32Input(CCState &CCInfo, unsigned Mask = ~0u, | ||||||
1933 | ArgDescriptor Arg = ArgDescriptor()) { | ||||||
1934 | if (Arg.isSet()) | ||||||
1935 | return ArgDescriptor::createArg(Arg, Mask); | ||||||
1936 | |||||||
1937 | ArrayRef<MCPhysReg> ArgVGPRs | ||||||
1938 | = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32); | ||||||
1939 | unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs); | ||||||
1940 | if (RegIdx == ArgVGPRs.size()) { | ||||||
1941 | // Spill to stack required. | ||||||
1942 | int64_t Offset = CCInfo.AllocateStack(4, Align(4)); | ||||||
1943 | |||||||
1944 | return ArgDescriptor::createStack(Offset, Mask); | ||||||
1945 | } | ||||||
1946 | |||||||
1947 | unsigned Reg = ArgVGPRs[RegIdx]; | ||||||
1948 | Reg = CCInfo.AllocateReg(Reg); | ||||||
1949 | assert(Reg != AMDGPU::NoRegister)(static_cast <bool> (Reg != AMDGPU::NoRegister) ? void ( 0) : __assert_fail ("Reg != AMDGPU::NoRegister", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1949, __extension__ __PRETTY_FUNCTION__)); | ||||||
1950 | |||||||
1951 | MachineFunction &MF = CCInfo.getMachineFunction(); | ||||||
1952 | Register LiveInVReg = MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); | ||||||
1953 | MF.getRegInfo().setType(LiveInVReg, LLT::scalar(32)); | ||||||
1954 | return ArgDescriptor::createRegister(Reg, Mask); | ||||||
1955 | } | ||||||
1956 | |||||||
1957 | static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo, | ||||||
1958 | const TargetRegisterClass *RC, | ||||||
1959 | unsigned NumArgRegs) { | ||||||
1960 | ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32); | ||||||
1961 | unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs); | ||||||
1962 | if (RegIdx == ArgSGPRs.size()) | ||||||
1963 | report_fatal_error("ran out of SGPRs for arguments"); | ||||||
1964 | |||||||
1965 | unsigned Reg = ArgSGPRs[RegIdx]; | ||||||
1966 | Reg = CCInfo.AllocateReg(Reg); | ||||||
1967 | assert(Reg != AMDGPU::NoRegister)(static_cast <bool> (Reg != AMDGPU::NoRegister) ? void ( 0) : __assert_fail ("Reg != AMDGPU::NoRegister", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1967, __extension__ __PRETTY_FUNCTION__)); | ||||||
1968 | |||||||
1969 | MachineFunction &MF = CCInfo.getMachineFunction(); | ||||||
1970 | MF.addLiveIn(Reg, RC); | ||||||
1971 | return ArgDescriptor::createRegister(Reg); | ||||||
1972 | } | ||||||
1973 | |||||||
1974 | // If this has a fixed position, we still should allocate the register in the | ||||||
1975 | // CCInfo state. Technically we could get away with this for values passed | ||||||
1976 | // outside of the normal argument range. | ||||||
1977 | static void allocateFixedSGPRInputImpl(CCState &CCInfo, | ||||||
1978 | const TargetRegisterClass *RC, | ||||||
1979 | MCRegister Reg) { | ||||||
1980 | Reg = CCInfo.AllocateReg(Reg); | ||||||
1981 | assert(Reg != AMDGPU::NoRegister)(static_cast <bool> (Reg != AMDGPU::NoRegister) ? void ( 0) : __assert_fail ("Reg != AMDGPU::NoRegister", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1981, __extension__ __PRETTY_FUNCTION__)); | ||||||
1982 | MachineFunction &MF = CCInfo.getMachineFunction(); | ||||||
1983 | MF.addLiveIn(Reg, RC); | ||||||
1984 | } | ||||||
1985 | |||||||
1986 | static void allocateSGPR32Input(CCState &CCInfo, ArgDescriptor &Arg) { | ||||||
1987 | if (Arg) { | ||||||
1988 | allocateFixedSGPRInputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, | ||||||
1989 | Arg.getRegister()); | ||||||
1990 | } else | ||||||
1991 | Arg = allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32); | ||||||
1992 | } | ||||||
1993 | |||||||
1994 | static void allocateSGPR64Input(CCState &CCInfo, ArgDescriptor &Arg) { | ||||||
1995 | if (Arg) { | ||||||
1996 | allocateFixedSGPRInputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, | ||||||
1997 | Arg.getRegister()); | ||||||
1998 | } else | ||||||
1999 | Arg = allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16); | ||||||
2000 | } | ||||||
2001 | |||||||
2002 | /// Allocate implicit function VGPR arguments at the end of allocated user | ||||||
2003 | /// arguments. | ||||||
2004 | void SITargetLowering::allocateSpecialInputVGPRs( | ||||||
2005 | CCState &CCInfo, MachineFunction &MF, | ||||||
2006 | const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const { | ||||||
2007 | const unsigned Mask = 0x3ff; | ||||||
2008 | ArgDescriptor Arg; | ||||||
2009 | |||||||
2010 | if (Info.hasWorkItemIDX()) { | ||||||
2011 | Arg = allocateVGPR32Input(CCInfo, Mask); | ||||||
2012 | Info.setWorkItemIDX(Arg); | ||||||
2013 | } | ||||||
2014 | |||||||
2015 | if (Info.hasWorkItemIDY()) { | ||||||
2016 | Arg = allocateVGPR32Input(CCInfo, Mask << 10, Arg); | ||||||
2017 | Info.setWorkItemIDY(Arg); | ||||||
2018 | } | ||||||
2019 | |||||||
2020 | if (Info.hasWorkItemIDZ()) | ||||||
2021 | Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo, Mask << 20, Arg)); | ||||||
2022 | } | ||||||
2023 | |||||||
2024 | /// Allocate implicit function VGPR arguments in fixed registers. | ||||||
2025 | void SITargetLowering::allocateSpecialInputVGPRsFixed( | ||||||
2026 | CCState &CCInfo, MachineFunction &MF, | ||||||
2027 | const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const { | ||||||
2028 | Register Reg = CCInfo.AllocateReg(AMDGPU::VGPR31); | ||||||
2029 | if (!Reg) | ||||||
2030 | report_fatal_error("failed to allocated VGPR for implicit arguments"); | ||||||
2031 | |||||||
2032 | const unsigned Mask = 0x3ff; | ||||||
2033 | Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg, Mask)); | ||||||
2034 | Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg, Mask << 10)); | ||||||
2035 | Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg, Mask << 20)); | ||||||
2036 | } | ||||||
2037 | |||||||
2038 | void SITargetLowering::allocateSpecialInputSGPRs( | ||||||
2039 | CCState &CCInfo, | ||||||
2040 | MachineFunction &MF, | ||||||
2041 | const SIRegisterInfo &TRI, | ||||||
2042 | SIMachineFunctionInfo &Info) const { | ||||||
2043 | auto &ArgInfo = Info.getArgInfo(); | ||||||
2044 | |||||||
2045 | // TODO: Unify handling with private memory pointers. | ||||||
2046 | |||||||
2047 | if (Info.hasDispatchPtr()) | ||||||
2048 | allocateSGPR64Input(CCInfo, ArgInfo.DispatchPtr); | ||||||
2049 | |||||||
2050 | if (Info.hasQueuePtr()) | ||||||
2051 | allocateSGPR64Input(CCInfo, ArgInfo.QueuePtr); | ||||||
2052 | |||||||
2053 | // Implicit arg ptr takes the place of the kernarg segment pointer. This is a | ||||||
2054 | // constant offset from the kernarg segment. | ||||||
2055 | if (Info.hasImplicitArgPtr()) | ||||||
2056 | allocateSGPR64Input(CCInfo, ArgInfo.ImplicitArgPtr); | ||||||
2057 | |||||||
2058 | if (Info.hasDispatchID()) | ||||||
2059 | allocateSGPR64Input(CCInfo, ArgInfo.DispatchID); | ||||||
2060 | |||||||
2061 | // flat_scratch_init is not applicable for non-kernel functions. | ||||||
2062 | |||||||
2063 | if (Info.hasWorkGroupIDX()) | ||||||
2064 | allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDX); | ||||||
2065 | |||||||
2066 | if (Info.hasWorkGroupIDY()) | ||||||
2067 | allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDY); | ||||||
2068 | |||||||
2069 | if (Info.hasWorkGroupIDZ()) | ||||||
2070 | allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDZ); | ||||||
2071 | } | ||||||
2072 | |||||||
2073 | // Allocate special inputs passed in user SGPRs. | ||||||
2074 | void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo, | ||||||
2075 | MachineFunction &MF, | ||||||
2076 | const SIRegisterInfo &TRI, | ||||||
2077 | SIMachineFunctionInfo &Info) const { | ||||||
2078 | if (Info.hasImplicitBufferPtr()) { | ||||||
2079 | Register ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI); | ||||||
2080 | MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); | ||||||
2081 | CCInfo.AllocateReg(ImplicitBufferPtrReg); | ||||||
2082 | } | ||||||
2083 | |||||||
2084 | // FIXME: How should these inputs interact with inreg / custom SGPR inputs? | ||||||
2085 | if (Info.hasPrivateSegmentBuffer()) { | ||||||
2086 | Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); | ||||||
2087 | MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); | ||||||
2088 | CCInfo.AllocateReg(PrivateSegmentBufferReg); | ||||||
2089 | } | ||||||
2090 | |||||||
2091 | if (Info.hasDispatchPtr()) { | ||||||
2092 | Register DispatchPtrReg = Info.addDispatchPtr(TRI); | ||||||
2093 | MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); | ||||||
2094 | CCInfo.AllocateReg(DispatchPtrReg); | ||||||
2095 | } | ||||||
2096 | |||||||
2097 | if (Info.hasQueuePtr()) { | ||||||
2098 | Register QueuePtrReg = Info.addQueuePtr(TRI); | ||||||
2099 | MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); | ||||||
2100 | CCInfo.AllocateReg(QueuePtrReg); | ||||||
2101 | } | ||||||
2102 | |||||||
2103 | if (Info.hasKernargSegmentPtr()) { | ||||||
2104 | MachineRegisterInfo &MRI = MF.getRegInfo(); | ||||||
2105 | Register InputPtrReg = Info.addKernargSegmentPtr(TRI); | ||||||
2106 | CCInfo.AllocateReg(InputPtrReg); | ||||||
2107 | |||||||
2108 | Register VReg = MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass); | ||||||
2109 | MRI.setType(VReg, LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); | ||||||
2110 | } | ||||||
2111 | |||||||
2112 | if (Info.hasDispatchID()) { | ||||||
2113 | Register DispatchIDReg = Info.addDispatchID(TRI); | ||||||
2114 | MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); | ||||||
2115 | CCInfo.AllocateReg(DispatchIDReg); | ||||||
2116 | } | ||||||
2117 | |||||||
2118 | if (Info.hasFlatScratchInit() && !getSubtarget()->isAmdPalOS()) { | ||||||
2119 | Register FlatScratchInitReg = Info.addFlatScratchInit(TRI); | ||||||
2120 | MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); | ||||||
2121 | CCInfo.AllocateReg(FlatScratchInitReg); | ||||||
2122 | } | ||||||
2123 | |||||||
2124 | // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read | ||||||
2125 | // these from the dispatch pointer. | ||||||
2126 | } | ||||||
2127 | |||||||
2128 | // Allocate special input registers that are initialized per-wave. | ||||||
2129 | void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo, | ||||||
2130 | MachineFunction &MF, | ||||||
2131 | SIMachineFunctionInfo &Info, | ||||||
2132 | CallingConv::ID CallConv, | ||||||
2133 | bool IsShader) const { | ||||||
2134 | if (Info.hasWorkGroupIDX()) { | ||||||
2135 | Register Reg = Info.addWorkGroupIDX(); | ||||||
2136 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); | ||||||
2137 | CCInfo.AllocateReg(Reg); | ||||||
2138 | } | ||||||
2139 | |||||||
2140 | if (Info.hasWorkGroupIDY()) { | ||||||
2141 | Register Reg = Info.addWorkGroupIDY(); | ||||||
2142 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); | ||||||
2143 | CCInfo.AllocateReg(Reg); | ||||||
2144 | } | ||||||
2145 | |||||||
2146 | if (Info.hasWorkGroupIDZ()) { | ||||||
2147 | Register Reg = Info.addWorkGroupIDZ(); | ||||||
2148 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); | ||||||
2149 | CCInfo.AllocateReg(Reg); | ||||||
2150 | } | ||||||
2151 | |||||||
2152 | if (Info.hasWorkGroupInfo()) { | ||||||
2153 | Register Reg = Info.addWorkGroupInfo(); | ||||||
2154 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); | ||||||
2155 | CCInfo.AllocateReg(Reg); | ||||||
2156 | } | ||||||
2157 | |||||||
2158 | if (Info.hasPrivateSegmentWaveByteOffset()) { | ||||||
2159 | // Scratch wave offset passed in system SGPR. | ||||||
2160 | unsigned PrivateSegmentWaveByteOffsetReg; | ||||||
2161 | |||||||
2162 | if (IsShader) { | ||||||
2163 | PrivateSegmentWaveByteOffsetReg = | ||||||
2164 | Info.getPrivateSegmentWaveByteOffsetSystemSGPR(); | ||||||
2165 | |||||||
2166 | // This is true if the scratch wave byte offset doesn't have a fixed | ||||||
2167 | // location. | ||||||
2168 | if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) { | ||||||
2169 | PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); | ||||||
2170 | Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); | ||||||
2171 | } | ||||||
2172 | } else | ||||||
2173 | PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset(); | ||||||
2174 | |||||||
2175 | MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); | ||||||
2176 | CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); | ||||||
2177 | } | ||||||
2178 | } | ||||||
2179 | |||||||
2180 | static void reservePrivateMemoryRegs(const TargetMachine &TM, | ||||||
2181 | MachineFunction &MF, | ||||||
2182 | const SIRegisterInfo &TRI, | ||||||
2183 | SIMachineFunctionInfo &Info) { | ||||||
2184 | // Now that we've figured out where the scratch register inputs are, see if | ||||||
2185 | // should reserve the arguments and use them directly. | ||||||
2186 | MachineFrameInfo &MFI = MF.getFrameInfo(); | ||||||
2187 | bool HasStackObjects = MFI.hasStackObjects(); | ||||||
2188 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | ||||||
2189 | |||||||
2190 | // Record that we know we have non-spill stack objects so we don't need to | ||||||
2191 | // check all stack objects later. | ||||||
2192 | if (HasStackObjects) | ||||||
2193 | Info.setHasNonSpillStackObjects(true); | ||||||
2194 | |||||||
2195 | // Everything live out of a block is spilled with fast regalloc, so it's | ||||||
2196 | // almost certain that spilling will be required. | ||||||
2197 | if (TM.getOptLevel() == CodeGenOpt::None) | ||||||
2198 | HasStackObjects = true; | ||||||
2199 | |||||||
2200 | // For now assume stack access is needed in any callee functions, so we need | ||||||
2201 | // the scratch registers to pass in. | ||||||
2202 | bool RequiresStackAccess = HasStackObjects || MFI.hasCalls(); | ||||||
2203 | |||||||
2204 | if (!ST.enableFlatScratch()) { | ||||||
2205 | if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) { | ||||||
2206 | // If we have stack objects, we unquestionably need the private buffer | ||||||
2207 | // resource. For the Code Object V2 ABI, this will be the first 4 user | ||||||
2208 | // SGPR inputs. We can reserve those and use them directly. | ||||||
2209 | |||||||
2210 | Register PrivateSegmentBufferReg = | ||||||
2211 | Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); | ||||||
2212 | Info.setScratchRSrcReg(PrivateSegmentBufferReg); | ||||||
2213 | } else { | ||||||
2214 | unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF); | ||||||
2215 | // We tentatively reserve the last registers (skipping the last registers | ||||||
2216 | // which may contain VCC, FLAT_SCR, and XNACK). After register allocation, | ||||||
2217 | // we'll replace these with the ones immediately after those which were | ||||||
2218 | // really allocated. In the prologue copies will be inserted from the | ||||||
2219 | // argument to these reserved registers. | ||||||
2220 | |||||||
2221 | // Without HSA, relocations are used for the scratch pointer and the | ||||||
2222 | // buffer resource setup is always inserted in the prologue. Scratch wave | ||||||
2223 | // offset is still in an input SGPR. | ||||||
2224 | Info.setScratchRSrcReg(ReservedBufferReg); | ||||||
2225 | } | ||||||
2226 | } | ||||||
2227 | |||||||
2228 | MachineRegisterInfo &MRI = MF.getRegInfo(); | ||||||
2229 | |||||||
2230 | // For entry functions we have to set up the stack pointer if we use it, | ||||||
2231 | // whereas non-entry functions get this "for free". This means there is no | ||||||
2232 | // intrinsic advantage to using S32 over S34 in cases where we do not have | ||||||
2233 | // calls but do need a frame pointer (i.e. if we are requested to have one | ||||||
2234 | // because frame pointer elimination is disabled). To keep things simple we | ||||||
2235 | // only ever use S32 as the call ABI stack pointer, and so using it does not | ||||||
2236 | // imply we need a separate frame pointer. | ||||||
2237 | // | ||||||
2238 | // Try to use s32 as the SP, but move it if it would interfere with input | ||||||
2239 | // arguments. This won't work with calls though. | ||||||
2240 | // | ||||||
2241 | // FIXME: Move SP to avoid any possible inputs, or find a way to spill input | ||||||
2242 | // registers. | ||||||
2243 | if (!MRI.isLiveIn(AMDGPU::SGPR32)) { | ||||||
2244 | Info.setStackPtrOffsetReg(AMDGPU::SGPR32); | ||||||
2245 | } else { | ||||||
2246 | assert(AMDGPU::isShader(MF.getFunction().getCallingConv()))(static_cast <bool> (AMDGPU::isShader(MF.getFunction(). getCallingConv())) ? void (0) : __assert_fail ("AMDGPU::isShader(MF.getFunction().getCallingConv())" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2246, __extension__ __PRETTY_FUNCTION__)); | ||||||
2247 | |||||||
2248 | if (MFI.hasCalls()) | ||||||
2249 | report_fatal_error("call in graphics shader with too many input SGPRs"); | ||||||
2250 | |||||||
2251 | for (unsigned Reg : AMDGPU::SGPR_32RegClass) { | ||||||
2252 | if (!MRI.isLiveIn(Reg)) { | ||||||
2253 | Info.setStackPtrOffsetReg(Reg); | ||||||
2254 | break; | ||||||
2255 | } | ||||||
2256 | } | ||||||
2257 | |||||||
2258 | if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG) | ||||||
2259 | report_fatal_error("failed to find register for SP"); | ||||||
2260 | } | ||||||
2261 | |||||||
2262 | // hasFP should be accurate for entry functions even before the frame is | ||||||
2263 | // finalized, because it does not rely on the known stack size, only | ||||||
2264 | // properties like whether variable sized objects are present. | ||||||
2265 | if (ST.getFrameLowering()->hasFP(MF)) { | ||||||
2266 | Info.setFrameOffsetReg(AMDGPU::SGPR33); | ||||||
2267 | } | ||||||
2268 | } | ||||||
2269 | |||||||
2270 | bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const { | ||||||
2271 | const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); | ||||||
2272 | return !Info->isEntryFunction(); | ||||||
2273 | } | ||||||
2274 | |||||||
2275 | void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { | ||||||
2276 | |||||||
2277 | } | ||||||
2278 | |||||||
2279 | void SITargetLowering::insertCopiesSplitCSR( | ||||||
2280 | MachineBasicBlock *Entry, | ||||||
2281 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const { | ||||||
2282 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); | ||||||
2283 | |||||||
2284 | const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); | ||||||
2285 | if (!IStart) | ||||||
2286 | return; | ||||||
2287 | |||||||
2288 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | ||||||
2289 | MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); | ||||||
2290 | MachineBasicBlock::iterator MBBI = Entry->begin(); | ||||||
2291 | for (const MCPhysReg *I = IStart; *I; ++I) { | ||||||
2292 | const TargetRegisterClass *RC = nullptr; | ||||||
2293 | if (AMDGPU::SReg_64RegClass.contains(*I)) | ||||||
2294 | RC = &AMDGPU::SGPR_64RegClass; | ||||||
2295 | else if (AMDGPU::SReg_32RegClass.contains(*I)) | ||||||
2296 | RC = &AMDGPU::SGPR_32RegClass; | ||||||
2297 | else | ||||||
2298 | llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2298); | ||||||
2299 | |||||||
2300 | Register NewVR = MRI->createVirtualRegister(RC); | ||||||
2301 | // Create copy from CSR to a virtual register. | ||||||
2302 | Entry->addLiveIn(*I); | ||||||
2303 | BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) | ||||||
2304 | .addReg(*I); | ||||||
2305 | |||||||
2306 | // Insert the copy-back instructions right before the terminator. | ||||||
2307 | for (auto *Exit : Exits) | ||||||
2308 | BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), | ||||||
2309 | TII->get(TargetOpcode::COPY), *I) | ||||||
2310 | .addReg(NewVR); | ||||||
2311 | } | ||||||
2312 | } | ||||||
2313 | |||||||
2314 | SDValue SITargetLowering::LowerFormalArguments( | ||||||
2315 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, | ||||||
2316 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, | ||||||
2317 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { | ||||||
2318 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); | ||||||
2319 | |||||||
2320 | MachineFunction &MF = DAG.getMachineFunction(); | ||||||
2321 | const Function &Fn = MF.getFunction(); | ||||||
2322 | FunctionType *FType = MF.getFunction().getFunctionType(); | ||||||
2323 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
2324 | |||||||
2325 | if (Subtarget->isAmdHsaOS() && AMDGPU::isGraphics(CallConv)) { | ||||||
2326 | DiagnosticInfoUnsupported NoGraphicsHSA( | ||||||
2327 | Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); | ||||||
2328 | DAG.getContext()->diagnose(NoGraphicsHSA); | ||||||
2329 | return DAG.getEntryNode(); | ||||||
2330 | } | ||||||
2331 | |||||||
2332 | Info->allocateModuleLDSGlobal(Fn.getParent()); | ||||||
2333 | |||||||
2334 | SmallVector<ISD::InputArg, 16> Splits; | ||||||
2335 | SmallVector<CCValAssign, 16> ArgLocs; | ||||||
2336 | BitVector Skipped(Ins.size()); | ||||||
2337 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, | ||||||
2338 | *DAG.getContext()); | ||||||
2339 | |||||||
2340 | bool IsGraphics = AMDGPU::isGraphics(CallConv); | ||||||
2341 | bool IsKernel = AMDGPU::isKernel(CallConv); | ||||||
2342 | bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv); | ||||||
2343 | |||||||
2344 | if (IsGraphics) { | ||||||
2345 | assert(!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() &&(static_cast <bool> (!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit () || Subtarget->enableFlatScratch()) && !Info-> hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo () && !Info->hasWorkItemIDX() && !Info-> hasWorkItemIDY() && !Info->hasWorkItemIDZ()) ? void (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2350, __extension__ __PRETTY_FUNCTION__)) | ||||||
2346 | (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) &&(static_cast <bool> (!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit () || Subtarget->enableFlatScratch()) && !Info-> hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo () && !Info->hasWorkItemIDX() && !Info-> hasWorkItemIDY() && !Info->hasWorkItemIDZ()) ? void (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2350, __extension__ __PRETTY_FUNCTION__)) | ||||||
2347 | !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&(static_cast <bool> (!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit () || Subtarget->enableFlatScratch()) && !Info-> hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo () && !Info->hasWorkItemIDX() && !Info-> hasWorkItemIDY() && !Info->hasWorkItemIDZ()) ? void (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2350, __extension__ __PRETTY_FUNCTION__)) | ||||||
2348 | !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&(static_cast <bool> (!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit () || Subtarget->enableFlatScratch()) && !Info-> hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo () && !Info->hasWorkItemIDX() && !Info-> hasWorkItemIDY() && !Info->hasWorkItemIDZ()) ? void (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2350, __extension__ __PRETTY_FUNCTION__)) | ||||||
2349 | !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&(static_cast <bool> (!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit () || Subtarget->enableFlatScratch()) && !Info-> hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo () && !Info->hasWorkItemIDX() && !Info-> hasWorkItemIDY() && !Info->hasWorkItemIDZ()) ? void (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2350, __extension__ __PRETTY_FUNCTION__)) | ||||||
2350 | !Info->hasWorkItemIDZ())(static_cast <bool> (!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit () || Subtarget->enableFlatScratch()) && !Info-> hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo () && !Info->hasWorkItemIDX() && !Info-> hasWorkItemIDY() && !Info->hasWorkItemIDZ()) ? void (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2350, __extension__ __PRETTY_FUNCTION__)); | ||||||
2351 | } | ||||||
2352 | |||||||
2353 | if (CallConv == CallingConv::AMDGPU_PS) { | ||||||
2354 | processPSInputArgs(Splits, CallConv, Ins, Skipped, FType, Info); | ||||||
2355 | |||||||
2356 | // At least one interpolation mode must be enabled or else the GPU will | ||||||
2357 | // hang. | ||||||
2358 | // | ||||||
2359 | // Check PSInputAddr instead of PSInputEnable. The idea is that if the user | ||||||
2360 | // set PSInputAddr, the user wants to enable some bits after the compilation | ||||||
2361 | // based on run-time states. Since we can't know what the final PSInputEna | ||||||
2362 | // will look like, so we shouldn't do anything here and the user should take | ||||||
2363 | // responsibility for the correct programming. | ||||||
2364 | // | ||||||
2365 | // Otherwise, the following restrictions apply: | ||||||
2366 | // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. | ||||||
2367 | // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be | ||||||
2368 | // enabled too. | ||||||
2369 | if ((Info->getPSInputAddr() & 0x7F) == 0 || | ||||||
2370 | ((Info->getPSInputAddr() & 0xF) == 0 && Info->isPSInputAllocated(11))) { | ||||||
2371 | CCInfo.AllocateReg(AMDGPU::VGPR0); | ||||||
2372 | CCInfo.AllocateReg(AMDGPU::VGPR1); | ||||||
2373 | Info->markPSInputAllocated(0); | ||||||
2374 | Info->markPSInputEnabled(0); | ||||||
2375 | } | ||||||
2376 | if (Subtarget->isAmdPalOS()) { | ||||||
2377 | // For isAmdPalOS, the user does not enable some bits after compilation | ||||||
2378 | // based on run-time states; the register values being generated here are | ||||||
2379 | // the final ones set in hardware. Therefore we need to apply the | ||||||
2380 | // workaround to PSInputAddr and PSInputEnable together. (The case where | ||||||
2381 | // a bit is set in PSInputAddr but not PSInputEnable is where the | ||||||
2382 | // frontend set up an input arg for a particular interpolation mode, but | ||||||
2383 | // nothing uses that input arg. Really we should have an earlier pass | ||||||
2384 | // that removes such an arg.) | ||||||
2385 | unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); | ||||||
2386 | if ((PsInputBits & 0x7F) == 0 || | ||||||
2387 | ((PsInputBits & 0xF) == 0 && (PsInputBits >> 11 & 1))) | ||||||
2388 | Info->markPSInputEnabled( | ||||||
2389 | countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); | ||||||
2390 | } | ||||||
2391 | } else if (IsKernel) { | ||||||
2392 | assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX())(static_cast <bool> (Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()) ? void (0) : __assert_fail ("Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2392, __extension__ __PRETTY_FUNCTION__)); | ||||||
2393 | } else { | ||||||
2394 | Splits.append(Ins.begin(), Ins.end()); | ||||||
2395 | } | ||||||
2396 | |||||||
2397 | if (IsEntryFunc) { | ||||||
2398 | allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); | ||||||
2399 | allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info); | ||||||
2400 | } else { | ||||||
2401 | // For the fixed ABI, pass workitem IDs in the last argument register. | ||||||
2402 | if (AMDGPUTargetMachine::EnableFixedFunctionABI) | ||||||
2403 | allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info); | ||||||
2404 | } | ||||||
2405 | |||||||
2406 | if (IsKernel) { | ||||||
2407 | analyzeFormalArgumentsCompute(CCInfo, Ins); | ||||||
2408 | } else { | ||||||
2409 | CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg); | ||||||
2410 | CCInfo.AnalyzeFormalArguments(Splits, AssignFn); | ||||||
2411 | } | ||||||
2412 | |||||||
2413 | SmallVector<SDValue, 16> Chains; | ||||||
2414 | |||||||
2415 | // FIXME: This is the minimum kernel argument alignment. We should improve | ||||||
2416 | // this to the maximum alignment of the arguments. | ||||||
2417 | // | ||||||
2418 | // FIXME: Alignment of explicit arguments totally broken with non-0 explicit | ||||||
2419 | // kern arg offset. | ||||||
2420 | const Align KernelArgBaseAlign = Align(16); | ||||||
2421 | |||||||
2422 | for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { | ||||||
2423 | const ISD::InputArg &Arg = Ins[i]; | ||||||
2424 | if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) { | ||||||
2425 | InVals.push_back(DAG.getUNDEF(Arg.VT)); | ||||||
2426 | continue; | ||||||
2427 | } | ||||||
2428 | |||||||
2429 | CCValAssign &VA = ArgLocs[ArgIdx++]; | ||||||
2430 | MVT VT = VA.getLocVT(); | ||||||
2431 | |||||||
2432 | if (IsEntryFunc && VA.isMemLoc()) { | ||||||
2433 | VT = Ins[i].VT; | ||||||
2434 | EVT MemVT = VA.getLocVT(); | ||||||
2435 | |||||||
2436 | const uint64_t Offset = VA.getLocMemOffset(); | ||||||
2437 | Align Alignment = commonAlignment(KernelArgBaseAlign, Offset); | ||||||
2438 | |||||||
2439 | if (Arg.Flags.isByRef()) { | ||||||
2440 | SDValue Ptr = lowerKernArgParameterPtr(DAG, DL, Chain, Offset); | ||||||
2441 | |||||||
2442 | const GCNTargetMachine &TM = | ||||||
2443 | static_cast<const GCNTargetMachine &>(getTargetMachine()); | ||||||
2444 | if (!TM.isNoopAddrSpaceCast(AMDGPUAS::CONSTANT_ADDRESS, | ||||||
2445 | Arg.Flags.getPointerAddrSpace())) { | ||||||
2446 | Ptr = DAG.getAddrSpaceCast(DL, VT, Ptr, AMDGPUAS::CONSTANT_ADDRESS, | ||||||
2447 | Arg.Flags.getPointerAddrSpace()); | ||||||
2448 | } | ||||||
2449 | |||||||
2450 | InVals.push_back(Ptr); | ||||||
2451 | continue; | ||||||
2452 | } | ||||||
2453 | |||||||
2454 | SDValue Arg = lowerKernargMemParameter( | ||||||
2455 | DAG, VT, MemVT, DL, Chain, Offset, Alignment, Ins[i].Flags.isSExt(), &Ins[i]); | ||||||
2456 | Chains.push_back(Arg.getValue(1)); | ||||||
2457 | |||||||
2458 | auto *ParamTy = | ||||||
2459 | dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); | ||||||
2460 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && | ||||||
2461 | ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || | ||||||
2462 | ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) { | ||||||
2463 | // On SI local pointers are just offsets into LDS, so they are always | ||||||
2464 | // less than 16-bits. On CI and newer they could potentially be | ||||||
2465 | // real pointers, so we can't guarantee their size. | ||||||
2466 | Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, | ||||||
2467 | DAG.getValueType(MVT::i16)); | ||||||
2468 | } | ||||||
2469 | |||||||
2470 | InVals.push_back(Arg); | ||||||
2471 | continue; | ||||||
2472 | } else if (!IsEntryFunc && VA.isMemLoc()) { | ||||||
2473 | SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg); | ||||||
2474 | InVals.push_back(Val); | ||||||
2475 | if (!Arg.Flags.isByVal()) | ||||||
2476 | Chains.push_back(Val.getValue(1)); | ||||||
2477 | continue; | ||||||
2478 | } | ||||||
2479 | |||||||
2480 | assert(VA.isRegLoc() && "Parameter must be in a register!")(static_cast <bool> (VA.isRegLoc() && "Parameter must be in a register!" ) ? void (0) : __assert_fail ("VA.isRegLoc() && \"Parameter must be in a register!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2480, __extension__ __PRETTY_FUNCTION__)); | ||||||
2481 | |||||||
2482 | Register Reg = VA.getLocReg(); | ||||||
2483 | const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); | ||||||
2484 | EVT ValVT = VA.getValVT(); | ||||||
2485 | |||||||
2486 | Reg = MF.addLiveIn(Reg, RC); | ||||||
2487 | SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); | ||||||
2488 | |||||||
2489 | if (Arg.Flags.isSRet()) { | ||||||
2490 | // The return object should be reasonably addressable. | ||||||
2491 | |||||||
2492 | // FIXME: This helps when the return is a real sret. If it is a | ||||||
2493 | // automatically inserted sret (i.e. CanLowerReturn returns false), an | ||||||
2494 | // extra copy is inserted in SelectionDAGBuilder which obscures this. | ||||||
2495 | unsigned NumBits | ||||||
2496 | = 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex(); | ||||||
2497 | Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, | ||||||
2498 | DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits))); | ||||||
2499 | } | ||||||
2500 | |||||||
2501 | // If this is an 8 or 16-bit value, it is really passed promoted | ||||||
2502 | // to 32 bits. Insert an assert[sz]ext to capture this, then | ||||||
2503 | // truncate to the right size. | ||||||
2504 | switch (VA.getLocInfo()) { | ||||||
2505 | case CCValAssign::Full: | ||||||
2506 | break; | ||||||
2507 | case CCValAssign::BCvt: | ||||||
2508 | Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val); | ||||||
2509 | break; | ||||||
2510 | case CCValAssign::SExt: | ||||||
2511 | Val = DAG.getNode(ISD::AssertSext, DL, VT, Val, | ||||||
2512 | DAG.getValueType(ValVT)); | ||||||
2513 | Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); | ||||||
2514 | break; | ||||||
2515 | case CCValAssign::ZExt: | ||||||
2516 | Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, | ||||||
2517 | DAG.getValueType(ValVT)); | ||||||
2518 | Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); | ||||||
2519 | break; | ||||||
2520 | case CCValAssign::AExt: | ||||||
2521 | Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); | ||||||
2522 | break; | ||||||
2523 | default: | ||||||
2524 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2524); | ||||||
2525 | } | ||||||
2526 | |||||||
2527 | InVals.push_back(Val); | ||||||
2528 | } | ||||||
2529 | |||||||
2530 | if (!IsEntryFunc && !AMDGPUTargetMachine::EnableFixedFunctionABI) { | ||||||
2531 | // Special inputs come after user arguments. | ||||||
2532 | allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); | ||||||
2533 | } | ||||||
2534 | |||||||
2535 | // Start adding system SGPRs. | ||||||
2536 | if (IsEntryFunc) { | ||||||
2537 | allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsGraphics); | ||||||
2538 | } else { | ||||||
2539 | CCInfo.AllocateReg(Info->getScratchRSrcReg()); | ||||||
2540 | allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); | ||||||
2541 | } | ||||||
2542 | |||||||
2543 | auto &ArgUsageInfo = | ||||||
2544 | DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); | ||||||
2545 | ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo()); | ||||||
2546 | |||||||
2547 | unsigned StackArgSize = CCInfo.getNextStackOffset(); | ||||||
2548 | Info->setBytesInStackArgArea(StackArgSize); | ||||||
2549 | |||||||
2550 | return Chains.empty() ? Chain : | ||||||
2551 | DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); | ||||||
2552 | } | ||||||
2553 | |||||||
2554 | // TODO: If return values can't fit in registers, we should return as many as | ||||||
2555 | // possible in registers before passing on stack. | ||||||
2556 | bool SITargetLowering::CanLowerReturn( | ||||||
2557 | CallingConv::ID CallConv, | ||||||
2558 | MachineFunction &MF, bool IsVarArg, | ||||||
2559 | const SmallVectorImpl<ISD::OutputArg> &Outs, | ||||||
2560 | LLVMContext &Context) const { | ||||||
2561 | // Replacing returns with sret/stack usage doesn't make sense for shaders. | ||||||
2562 | // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn | ||||||
2563 | // for shaders. Vector types should be explicitly handled by CC. | ||||||
2564 | if (AMDGPU::isEntryFunctionCC(CallConv)) | ||||||
2565 | return true; | ||||||
2566 | |||||||
2567 | SmallVector<CCValAssign, 16> RVLocs; | ||||||
2568 | CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); | ||||||
2569 | return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg)); | ||||||
2570 | } | ||||||
2571 | |||||||
2572 | SDValue | ||||||
2573 | SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, | ||||||
2574 | bool isVarArg, | ||||||
2575 | const SmallVectorImpl<ISD::OutputArg> &Outs, | ||||||
2576 | const SmallVectorImpl<SDValue> &OutVals, | ||||||
2577 | const SDLoc &DL, SelectionDAG &DAG) const { | ||||||
2578 | MachineFunction &MF = DAG.getMachineFunction(); | ||||||
2579 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
2580 | |||||||
2581 | if (AMDGPU::isKernel(CallConv)) { | ||||||
2582 | return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, | ||||||
2583 | OutVals, DL, DAG); | ||||||
2584 | } | ||||||
2585 | |||||||
2586 | bool IsShader = AMDGPU::isShader(CallConv); | ||||||
2587 | |||||||
2588 | Info->setIfReturnsVoid(Outs.empty()); | ||||||
2589 | bool IsWaveEnd = Info->returnsVoid() && IsShader; | ||||||
2590 | |||||||
2591 | // CCValAssign - represent the assignment of the return value to a location. | ||||||
2592 | SmallVector<CCValAssign, 48> RVLocs; | ||||||
2593 | SmallVector<ISD::OutputArg, 48> Splits; | ||||||
2594 | |||||||
2595 | // CCState - Info about the registers and stack slots. | ||||||
2596 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, | ||||||
2597 | *DAG.getContext()); | ||||||
2598 | |||||||
2599 | // Analyze outgoing return values. | ||||||
2600 | CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); | ||||||
2601 | |||||||
2602 | SDValue Flag; | ||||||
2603 | SmallVector<SDValue, 48> RetOps; | ||||||
2604 | RetOps.push_back(Chain); // Operand #0 = Chain (updated below) | ||||||
2605 | |||||||
2606 | // Add return address for callable functions. | ||||||
2607 | if (!Info->isEntryFunction()) { | ||||||
2608 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); | ||||||
2609 | SDValue ReturnAddrReg = CreateLiveInRegister( | ||||||
2610 | DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); | ||||||
2611 | |||||||
2612 | SDValue ReturnAddrVirtualReg = DAG.getRegister( | ||||||
2613 | MF.getRegInfo().createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass), | ||||||
2614 | MVT::i64); | ||||||
2615 | Chain = | ||||||
2616 | DAG.getCopyToReg(Chain, DL, ReturnAddrVirtualReg, ReturnAddrReg, Flag); | ||||||
2617 | Flag = Chain.getValue(1); | ||||||
2618 | RetOps.push_back(ReturnAddrVirtualReg); | ||||||
2619 | } | ||||||
2620 | |||||||
2621 | // Copy the result values into the output registers. | ||||||
2622 | for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E; | ||||||
2623 | ++I, ++RealRVLocIdx) { | ||||||
2624 | CCValAssign &VA = RVLocs[I]; | ||||||
2625 | assert(VA.isRegLoc() && "Can only return in registers!")(static_cast <bool> (VA.isRegLoc() && "Can only return in registers!" ) ? void (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2625, __extension__ __PRETTY_FUNCTION__)); | ||||||
2626 | // TODO: Partially return in registers if return values don't fit. | ||||||
2627 | SDValue Arg = OutVals[RealRVLocIdx]; | ||||||
2628 | |||||||
2629 | // Copied from other backends. | ||||||
2630 | switch (VA.getLocInfo()) { | ||||||
2631 | case CCValAssign::Full: | ||||||
2632 | break; | ||||||
2633 | case CCValAssign::BCvt: | ||||||
2634 | Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); | ||||||
2635 | break; | ||||||
2636 | case CCValAssign::SExt: | ||||||
2637 | Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); | ||||||
2638 | break; | ||||||
2639 | case CCValAssign::ZExt: | ||||||
2640 | Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); | ||||||
2641 | break; | ||||||
2642 | case CCValAssign::AExt: | ||||||
2643 | Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); | ||||||
2644 | break; | ||||||
2645 | default: | ||||||
2646 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2646); | ||||||
2647 | } | ||||||
2648 | |||||||
2649 | Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); | ||||||
2650 | Flag = Chain.getValue(1); | ||||||
2651 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); | ||||||
2652 | } | ||||||
2653 | |||||||
2654 | // FIXME: Does sret work properly? | ||||||
2655 | if (!Info->isEntryFunction()) { | ||||||
2656 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); | ||||||
2657 | const MCPhysReg *I = | ||||||
2658 | TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); | ||||||
2659 | if (I) { | ||||||
2660 | for (; *I; ++I) { | ||||||
2661 | if (AMDGPU::SReg_64RegClass.contains(*I)) | ||||||
2662 | RetOps.push_back(DAG.getRegister(*I, MVT::i64)); | ||||||
2663 | else if (AMDGPU::SReg_32RegClass.contains(*I)) | ||||||
2664 | RetOps.push_back(DAG.getRegister(*I, MVT::i32)); | ||||||
2665 | else | ||||||
2666 | llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2666); | ||||||
2667 | } | ||||||
2668 | } | ||||||
2669 | } | ||||||
2670 | |||||||
2671 | // Update chain and glue. | ||||||
2672 | RetOps[0] = Chain; | ||||||
2673 | if (Flag.getNode()) | ||||||
2674 | RetOps.push_back(Flag); | ||||||
2675 | |||||||
2676 | unsigned Opc = AMDGPUISD::ENDPGM; | ||||||
2677 | if (!IsWaveEnd) | ||||||
2678 | Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG; | ||||||
2679 | return DAG.getNode(Opc, DL, MVT::Other, RetOps); | ||||||
2680 | } | ||||||
2681 | |||||||
2682 | SDValue SITargetLowering::LowerCallResult( | ||||||
2683 | SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, | ||||||
2684 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, | ||||||
2685 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn, | ||||||
2686 | SDValue ThisVal) const { | ||||||
2687 | CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg); | ||||||
2688 | |||||||
2689 | // Assign locations to each value returned by this call. | ||||||
2690 | SmallVector<CCValAssign, 16> RVLocs; | ||||||
2691 | CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, | ||||||
2692 | *DAG.getContext()); | ||||||
2693 | CCInfo.AnalyzeCallResult(Ins, RetCC); | ||||||
2694 | |||||||
2695 | // Copy all of the result registers out of their specified physreg. | ||||||
2696 | for (unsigned i = 0; i != RVLocs.size(); ++i) { | ||||||
2697 | CCValAssign VA = RVLocs[i]; | ||||||
2698 | SDValue Val; | ||||||
2699 | |||||||
2700 | if (VA.isRegLoc()) { | ||||||
2701 | Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag); | ||||||
2702 | Chain = Val.getValue(1); | ||||||
2703 | InFlag = Val.getValue(2); | ||||||
2704 | } else if (VA.isMemLoc()) { | ||||||
2705 | report_fatal_error("TODO: return values in memory"); | ||||||
2706 | } else | ||||||
2707 | llvm_unreachable("unknown argument location type")::llvm::llvm_unreachable_internal("unknown argument location type" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2707); | ||||||
2708 | |||||||
2709 | switch (VA.getLocInfo()) { | ||||||
2710 | case CCValAssign::Full: | ||||||
2711 | break; | ||||||
2712 | case CCValAssign::BCvt: | ||||||
2713 | Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); | ||||||
2714 | break; | ||||||
2715 | case CCValAssign::ZExt: | ||||||
2716 | Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val, | ||||||
2717 | DAG.getValueType(VA.getValVT())); | ||||||
2718 | Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); | ||||||
2719 | break; | ||||||
2720 | case CCValAssign::SExt: | ||||||
2721 | Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val, | ||||||
2722 | DAG.getValueType(VA.getValVT())); | ||||||
2723 | Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); | ||||||
2724 | break; | ||||||
2725 | case CCValAssign::AExt: | ||||||
2726 | Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); | ||||||
2727 | break; | ||||||
2728 | default: | ||||||
2729 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2729); | ||||||
2730 | } | ||||||
2731 | |||||||
2732 | InVals.push_back(Val); | ||||||
2733 | } | ||||||
2734 | |||||||
2735 | return Chain; | ||||||
2736 | } | ||||||
2737 | |||||||
2738 | // Add code to pass special inputs required depending on used features separate | ||||||
2739 | // from the explicit user arguments present in the IR. | ||||||
2740 | void SITargetLowering::passSpecialInputs( | ||||||
2741 | CallLoweringInfo &CLI, | ||||||
2742 | CCState &CCInfo, | ||||||
2743 | const SIMachineFunctionInfo &Info, | ||||||
2744 | SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, | ||||||
2745 | SmallVectorImpl<SDValue> &MemOpChains, | ||||||
2746 | SDValue Chain) const { | ||||||
2747 | // If we don't have a call site, this was a call inserted by | ||||||
2748 | // legalization. These can never use special inputs. | ||||||
2749 | if (!CLI.CB) | ||||||
2750 | return; | ||||||
2751 | |||||||
2752 | SelectionDAG &DAG = CLI.DAG; | ||||||
2753 | const SDLoc &DL = CLI.DL; | ||||||
2754 | |||||||
2755 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); | ||||||
2756 | const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo(); | ||||||
2757 | |||||||
2758 | const AMDGPUFunctionArgInfo *CalleeArgInfo | ||||||
2759 | = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo; | ||||||
2760 | if (const Function *CalleeFunc = CLI.CB->getCalledFunction()) { | ||||||
2761 | auto &ArgUsageInfo = | ||||||
2762 | DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); | ||||||
2763 | CalleeArgInfo = &ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc); | ||||||
2764 | } | ||||||
2765 | |||||||
2766 | // TODO: Unify with private memory register handling. This is complicated by | ||||||
2767 | // the fact that at least in kernels, the input argument is not necessarily | ||||||
2768 | // in the same location as the input. | ||||||
2769 | AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { | ||||||
2770 | AMDGPUFunctionArgInfo::DISPATCH_PTR, | ||||||
2771 | AMDGPUFunctionArgInfo::QUEUE_PTR, | ||||||
2772 | AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR, | ||||||
2773 | AMDGPUFunctionArgInfo::DISPATCH_ID, | ||||||
2774 | AMDGPUFunctionArgInfo::WORKGROUP_ID_X, | ||||||
2775 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, | ||||||
2776 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Z | ||||||
2777 | }; | ||||||
2778 | |||||||
2779 | for (auto InputID : InputRegs) { | ||||||
2780 | const ArgDescriptor *OutgoingArg; | ||||||
2781 | const TargetRegisterClass *ArgRC; | ||||||
2782 | LLT ArgTy; | ||||||
2783 | |||||||
2784 | std::tie(OutgoingArg, ArgRC, ArgTy) = | ||||||
2785 | CalleeArgInfo->getPreloadedValue(InputID); | ||||||
2786 | if (!OutgoingArg) | ||||||
2787 | continue; | ||||||
2788 | |||||||
2789 | const ArgDescriptor *IncomingArg; | ||||||
2790 | const TargetRegisterClass *IncomingArgRC; | ||||||
2791 | LLT Ty; | ||||||
2792 | std::tie(IncomingArg, IncomingArgRC, Ty) = | ||||||
2793 | CallerArgInfo.getPreloadedValue(InputID); | ||||||
2794 | assert(IncomingArgRC == ArgRC)(static_cast <bool> (IncomingArgRC == ArgRC) ? void (0) : __assert_fail ("IncomingArgRC == ArgRC", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2794, __extension__ __PRETTY_FUNCTION__)); | ||||||
2795 | |||||||
2796 | // All special arguments are ints for now. | ||||||
2797 | EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32; | ||||||
2798 | SDValue InputReg; | ||||||
2799 | |||||||
2800 | if (IncomingArg) { | ||||||
2801 | InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg); | ||||||
2802 | } else { | ||||||
2803 | // The implicit arg ptr is special because it doesn't have a corresponding | ||||||
2804 | // input for kernels, and is computed from the kernarg segment pointer. | ||||||
2805 | assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR)(static_cast <bool> (InputID == AMDGPUFunctionArgInfo:: IMPLICIT_ARG_PTR) ? void (0) : __assert_fail ("InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2805, __extension__ __PRETTY_FUNCTION__)); | ||||||
2806 | InputReg = getImplicitArgPtr(DAG, DL); | ||||||
2807 | } | ||||||
2808 | |||||||
2809 | if (OutgoingArg->isRegister()) { | ||||||
2810 | RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); | ||||||
2811 | if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) | ||||||
2812 | report_fatal_error("failed to allocate implicit input argument"); | ||||||
2813 | } else { | ||||||
2814 | unsigned SpecialArgOffset = | ||||||
2815 | CCInfo.AllocateStack(ArgVT.getStoreSize(), Align(4)); | ||||||
2816 | SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg, | ||||||
2817 | SpecialArgOffset); | ||||||
2818 | MemOpChains.push_back(ArgStore); | ||||||
2819 | } | ||||||
2820 | } | ||||||
2821 | |||||||
2822 | // Pack workitem IDs into a single register or pass it as is if already | ||||||
2823 | // packed. | ||||||
2824 | const ArgDescriptor *OutgoingArg; | ||||||
2825 | const TargetRegisterClass *ArgRC; | ||||||
2826 | LLT Ty; | ||||||
2827 | |||||||
2828 | std::tie(OutgoingArg, ArgRC, Ty) = | ||||||
2829 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); | ||||||
2830 | if (!OutgoingArg) | ||||||
2831 | std::tie(OutgoingArg, ArgRC, Ty) = | ||||||
2832 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); | ||||||
2833 | if (!OutgoingArg) | ||||||
2834 | std::tie(OutgoingArg, ArgRC, Ty) = | ||||||
2835 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); | ||||||
2836 | if (!OutgoingArg) | ||||||
2837 | return; | ||||||
2838 | |||||||
2839 | const ArgDescriptor *IncomingArgX = std::get<0>( | ||||||
2840 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X)); | ||||||
2841 | const ArgDescriptor *IncomingArgY = std::get<0>( | ||||||
2842 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y)); | ||||||
2843 | const ArgDescriptor *IncomingArgZ = std::get<0>( | ||||||
2844 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z)); | ||||||
2845 | |||||||
2846 | SDValue InputReg; | ||||||
2847 | SDLoc SL; | ||||||
2848 | |||||||
2849 | // If incoming ids are not packed we need to pack them. | ||||||
2850 | if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX) | ||||||
2851 | InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX); | ||||||
2852 | |||||||
2853 | if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY) { | ||||||
2854 | SDValue Y = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgY); | ||||||
2855 | Y = DAG.getNode(ISD::SHL, SL, MVT::i32, Y, | ||||||
2856 | DAG.getShiftAmountConstant(10, MVT::i32, SL)); | ||||||
2857 | InputReg = InputReg.getNode() ? | ||||||
2858 | DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Y) : Y; | ||||||
2859 | } | ||||||
2860 | |||||||
2861 | if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ) { | ||||||
2862 | SDValue Z = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgZ); | ||||||
2863 | Z = DAG.getNode(ISD::SHL, SL, MVT::i32, Z, | ||||||
2864 | DAG.getShiftAmountConstant(20, MVT::i32, SL)); | ||||||
2865 | InputReg = InputReg.getNode() ? | ||||||
2866 | DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Z) : Z; | ||||||
2867 | } | ||||||
2868 | |||||||
2869 | if (!InputReg.getNode()) { | ||||||
2870 | // Workitem ids are already packed, any of present incoming arguments | ||||||
2871 | // will carry all required fields. | ||||||
2872 | ArgDescriptor IncomingArg = ArgDescriptor::createArg( | ||||||
2873 | IncomingArgX ? *IncomingArgX : | ||||||
2874 | IncomingArgY ? *IncomingArgY : | ||||||
2875 | *IncomingArgZ, ~0u); | ||||||
2876 | InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, IncomingArg); | ||||||
2877 | } | ||||||
2878 | |||||||
2879 | if (OutgoingArg->isRegister()) { | ||||||
2880 | RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); | ||||||
2881 | CCInfo.AllocateReg(OutgoingArg->getRegister()); | ||||||
2882 | } else { | ||||||
2883 | unsigned SpecialArgOffset = CCInfo.AllocateStack(4, Align(4)); | ||||||
2884 | SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg, | ||||||
2885 | SpecialArgOffset); | ||||||
2886 | MemOpChains.push_back(ArgStore); | ||||||
2887 | } | ||||||
2888 | } | ||||||
2889 | |||||||
2890 | static bool canGuaranteeTCO(CallingConv::ID CC) { | ||||||
2891 | return CC == CallingConv::Fast; | ||||||
2892 | } | ||||||
2893 | |||||||
2894 | /// Return true if we might ever do TCO for calls with this calling convention. | ||||||
2895 | static bool mayTailCallThisCC(CallingConv::ID CC) { | ||||||
2896 | switch (CC) { | ||||||
2897 | case CallingConv::C: | ||||||
2898 | case CallingConv::AMDGPU_Gfx: | ||||||
2899 | return true; | ||||||
2900 | default: | ||||||
2901 | return canGuaranteeTCO(CC); | ||||||
2902 | } | ||||||
2903 | } | ||||||
2904 | |||||||
2905 | bool SITargetLowering::isEligibleForTailCallOptimization( | ||||||
2906 | SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg, | ||||||
2907 | const SmallVectorImpl<ISD::OutputArg> &Outs, | ||||||
2908 | const SmallVectorImpl<SDValue> &OutVals, | ||||||
2909 | const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { | ||||||
2910 | if (!mayTailCallThisCC(CalleeCC)) | ||||||
2911 | return false; | ||||||
2912 | |||||||
2913 | // For a divergent call target, we need to do a waterfall loop over the | ||||||
2914 | // possible callees which precludes us from using a simple jump. | ||||||
2915 | if (Callee->isDivergent()) | ||||||
2916 | return false; | ||||||
2917 | |||||||
2918 | MachineFunction &MF = DAG.getMachineFunction(); | ||||||
2919 | const Function &CallerF = MF.getFunction(); | ||||||
2920 | CallingConv::ID CallerCC = CallerF.getCallingConv(); | ||||||
2921 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); | ||||||
2922 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); | ||||||
2923 | |||||||
2924 | // Kernels aren't callable, and don't have a live in return address so it | ||||||
2925 | // doesn't make sense to do a tail call with entry functions. | ||||||
2926 | if (!CallerPreserved) | ||||||
2927 | return false; | ||||||
2928 | |||||||
2929 | bool CCMatch = CallerCC == CalleeCC; | ||||||
2930 | |||||||
2931 | if (DAG.getTarget().Options.GuaranteedTailCallOpt) { | ||||||
2932 | if (canGuaranteeTCO(CalleeCC) && CCMatch) | ||||||
2933 | return true; | ||||||
2934 | return false; | ||||||
2935 | } | ||||||
2936 | |||||||
2937 | // TODO: Can we handle var args? | ||||||
2938 | if (IsVarArg) | ||||||
2939 | return false; | ||||||
2940 | |||||||
2941 | for (const Argument &Arg : CallerF.args()) { | ||||||
2942 | if (Arg.hasByValAttr()) | ||||||
2943 | return false; | ||||||
2944 | } | ||||||
2945 | |||||||
2946 | LLVMContext &Ctx = *DAG.getContext(); | ||||||
2947 | |||||||
2948 | // Check that the call results are passed in the same way. | ||||||
2949 | if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins, | ||||||
2950 | CCAssignFnForCall(CalleeCC, IsVarArg), | ||||||
2951 | CCAssignFnForCall(CallerCC, IsVarArg))) | ||||||
2952 | return false; | ||||||
2953 | |||||||
2954 | // The callee has to preserve all registers the caller needs to preserve. | ||||||
2955 | if (!CCMatch) { | ||||||
2956 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); | ||||||
2957 | if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) | ||||||
2958 | return false; | ||||||
2959 | } | ||||||
2960 | |||||||
2961 | // Nothing more to check if the callee is taking no arguments. | ||||||
2962 | if (Outs.empty()) | ||||||
2963 | return true; | ||||||
2964 | |||||||
2965 | SmallVector<CCValAssign, 16> ArgLocs; | ||||||
2966 | CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx); | ||||||
2967 | |||||||
2968 | CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg)); | ||||||
2969 | |||||||
2970 | const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
2971 | // If the stack arguments for this call do not fit into our own save area then | ||||||
2972 | // the call cannot be made tail. | ||||||
2973 | // TODO: Is this really necessary? | ||||||
2974 | if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) | ||||||
2975 | return false; | ||||||
2976 | |||||||
2977 | const MachineRegisterInfo &MRI = MF.getRegInfo(); | ||||||
2978 | return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals); | ||||||
2979 | } | ||||||
2980 | |||||||
2981 | bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { | ||||||
2982 | if (!CI->isTailCall()) | ||||||
2983 | return false; | ||||||
2984 | |||||||
2985 | const Function *ParentFn = CI->getParent()->getParent(); | ||||||
2986 | if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv())) | ||||||
2987 | return false; | ||||||
2988 | return true; | ||||||
2989 | } | ||||||
2990 | |||||||
2991 | // The wave scratch offset register is used as the global base pointer. | ||||||
2992 | SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI, | ||||||
2993 | SmallVectorImpl<SDValue> &InVals) const { | ||||||
2994 | SelectionDAG &DAG = CLI.DAG; | ||||||
2995 | const SDLoc &DL = CLI.DL; | ||||||
2996 | SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; | ||||||
2997 | SmallVector<SDValue, 32> &OutVals = CLI.OutVals; | ||||||
2998 | SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; | ||||||
2999 | SDValue Chain = CLI.Chain; | ||||||
3000 | SDValue Callee = CLI.Callee; | ||||||
3001 | bool &IsTailCall = CLI.IsTailCall; | ||||||
3002 | CallingConv::ID CallConv = CLI.CallConv; | ||||||
3003 | bool IsVarArg = CLI.IsVarArg; | ||||||
3004 | bool IsSibCall = false; | ||||||
3005 | bool IsThisReturn = false; | ||||||
3006 | MachineFunction &MF = DAG.getMachineFunction(); | ||||||
3007 | |||||||
3008 | if (Callee.isUndef() || isNullConstant(Callee)) { | ||||||
3009 | if (!CLI.IsTailCall) { | ||||||
3010 | for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I) | ||||||
3011 | InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT)); | ||||||
3012 | } | ||||||
3013 | |||||||
3014 | return Chain; | ||||||
3015 | } | ||||||
3016 | |||||||
3017 | if (IsVarArg) { | ||||||
3018 | return lowerUnhandledCall(CLI, InVals, | ||||||
3019 | "unsupported call to variadic function "); | ||||||
3020 | } | ||||||
3021 | |||||||
3022 | if (!CLI.CB) | ||||||
3023 | report_fatal_error("unsupported libcall legalization"); | ||||||
3024 | |||||||
3025 | if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) { | ||||||
3026 | return lowerUnhandledCall(CLI, InVals, | ||||||
3027 | "unsupported required tail call to function "); | ||||||
3028 | } | ||||||
3029 | |||||||
3030 | if (AMDGPU::isShader(CallConv)) { | ||||||
3031 | // Note the issue is with the CC of the called function, not of the call | ||||||
3032 | // itself. | ||||||
3033 | return lowerUnhandledCall(CLI, InVals, | ||||||
3034 | "unsupported call to a shader function "); | ||||||
3035 | } | ||||||
3036 | |||||||
3037 | if (AMDGPU::isShader(MF.getFunction().getCallingConv()) && | ||||||
3038 | CallConv != CallingConv::AMDGPU_Gfx) { | ||||||
3039 | // Only allow calls with specific calling conventions. | ||||||
3040 | return lowerUnhandledCall(CLI, InVals, | ||||||
3041 | "unsupported calling convention for call from " | ||||||
3042 | "graphics shader of function "); | ||||||
3043 | } | ||||||
3044 | |||||||
3045 | if (IsTailCall) { | ||||||
3046 | IsTailCall = isEligibleForTailCallOptimization( | ||||||
3047 | Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG); | ||||||
3048 | if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall()) { | ||||||
3049 | report_fatal_error("failed to perform tail call elimination on a call " | ||||||
3050 | "site marked musttail"); | ||||||
3051 | } | ||||||
3052 | |||||||
3053 | bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; | ||||||
3054 | |||||||
3055 | // A sibling call is one where we're under the usual C ABI and not planning | ||||||
3056 | // to change that but can still do a tail call: | ||||||
3057 | if (!TailCallOpt && IsTailCall) | ||||||
3058 | IsSibCall = true; | ||||||
3059 | |||||||
3060 | if (IsTailCall) | ||||||
3061 | ++NumTailCalls; | ||||||
3062 | } | ||||||
3063 | |||||||
3064 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
3065 | SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; | ||||||
3066 | SmallVector<SDValue, 8> MemOpChains; | ||||||
3067 | |||||||
3068 | // Analyze operands of the call, assigning locations to each operand. | ||||||
3069 | SmallVector<CCValAssign, 16> ArgLocs; | ||||||
3070 | CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); | ||||||
3071 | CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg); | ||||||
3072 | |||||||
3073 | if (AMDGPUTargetMachine::EnableFixedFunctionABI && | ||||||
3074 | CallConv != CallingConv::AMDGPU_Gfx) { | ||||||
3075 | // With a fixed ABI, allocate fixed registers before user arguments. | ||||||
3076 | passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain); | ||||||
3077 | } | ||||||
3078 | |||||||
3079 | CCInfo.AnalyzeCallOperands(Outs, AssignFn); | ||||||
3080 | |||||||
3081 | // Get a count of how many bytes are to be pushed on the stack. | ||||||
3082 | unsigned NumBytes = CCInfo.getNextStackOffset(); | ||||||
3083 | |||||||
3084 | if (IsSibCall) { | ||||||
3085 | // Since we're not changing the ABI to make this a tail call, the memory | ||||||
3086 | // operands are already available in the caller's incoming argument space. | ||||||
3087 | NumBytes = 0; | ||||||
3088 | } | ||||||
3089 | |||||||
3090 | // FPDiff is the byte offset of the call's argument area from the callee's. | ||||||
3091 | // Stores to callee stack arguments will be placed in FixedStackSlots offset | ||||||
3092 | // by this amount for a tail call. In a sibling call it must be 0 because the | ||||||
3093 | // caller will deallocate the entire stack and the callee still expects its | ||||||
3094 | // arguments to begin at SP+0. Completely unused for non-tail calls. | ||||||
3095 | int32_t FPDiff = 0; | ||||||
3096 | MachineFrameInfo &MFI = MF.getFrameInfo(); | ||||||
3097 | |||||||
3098 | // Adjust the stack pointer for the new arguments... | ||||||
3099 | // These operations are automatically eliminated by the prolog/epilog pass | ||||||
3100 | if (!IsSibCall) { | ||||||
3101 | Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL); | ||||||
3102 | |||||||
3103 | if (!Subtarget->enableFlatScratch()) { | ||||||
3104 | SmallVector<SDValue, 4> CopyFromChains; | ||||||
3105 | |||||||
3106 | // In the HSA case, this should be an identity copy. | ||||||
3107 | SDValue ScratchRSrcReg | ||||||
3108 | = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32); | ||||||
3109 | RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); | ||||||
3110 | CopyFromChains.push_back(ScratchRSrcReg.getValue(1)); | ||||||
3111 | Chain = DAG.getTokenFactor(DL, CopyFromChains); | ||||||
3112 | } | ||||||
3113 | } | ||||||
3114 | |||||||
3115 | MVT PtrVT = MVT::i32; | ||||||
3116 | |||||||
3117 | // Walk the register/memloc assignments, inserting copies/loads. | ||||||
3118 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { | ||||||
3119 | CCValAssign &VA = ArgLocs[i]; | ||||||
3120 | SDValue Arg = OutVals[i]; | ||||||
3121 | |||||||
3122 | // Promote the value if needed. | ||||||
3123 | switch (VA.getLocInfo()) { | ||||||
3124 | case CCValAssign::Full: | ||||||
3125 | break; | ||||||
3126 | case CCValAssign::BCvt: | ||||||
3127 | Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); | ||||||
3128 | break; | ||||||
3129 | case CCValAssign::ZExt: | ||||||
3130 | Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); | ||||||
3131 | break; | ||||||
3132 | case CCValAssign::SExt: | ||||||
3133 | Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); | ||||||
3134 | break; | ||||||
3135 | case CCValAssign::AExt: | ||||||
3136 | Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); | ||||||
3137 | break; | ||||||
3138 | case CCValAssign::FPExt: | ||||||
3139 | Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg); | ||||||
3140 | break; | ||||||
3141 | default: | ||||||
3142 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3142); | ||||||
3143 | } | ||||||
3144 | |||||||
3145 | if (VA.isRegLoc()) { | ||||||
3146 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); | ||||||
3147 | } else { | ||||||
3148 | assert(VA.isMemLoc())(static_cast <bool> (VA.isMemLoc()) ? void (0) : __assert_fail ("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3148, __extension__ __PRETTY_FUNCTION__)); | ||||||
3149 | |||||||
3150 | SDValue DstAddr; | ||||||
3151 | MachinePointerInfo DstInfo; | ||||||
3152 | |||||||
3153 | unsigned LocMemOffset = VA.getLocMemOffset(); | ||||||
3154 | int32_t Offset = LocMemOffset; | ||||||
3155 | |||||||
3156 | SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT); | ||||||
3157 | MaybeAlign Alignment; | ||||||
3158 | |||||||
3159 | if (IsTailCall) { | ||||||
3160 | ISD::ArgFlagsTy Flags = Outs[i].Flags; | ||||||
3161 | unsigned OpSize = Flags.isByVal() ? | ||||||
3162 | Flags.getByValSize() : VA.getValVT().getStoreSize(); | ||||||
3163 | |||||||
3164 | // FIXME: We can have better than the minimum byval required alignment. | ||||||
3165 | Alignment = | ||||||
3166 | Flags.isByVal() | ||||||
3167 | ? Flags.getNonZeroByValAlign() | ||||||
3168 | : commonAlignment(Subtarget->getStackAlignment(), Offset); | ||||||
3169 | |||||||
3170 | Offset = Offset + FPDiff; | ||||||
3171 | int FI = MFI.CreateFixedObject(OpSize, Offset, true); | ||||||
3172 | |||||||
3173 | DstAddr = DAG.getFrameIndex(FI, PtrVT); | ||||||
3174 | DstInfo = MachinePointerInfo::getFixedStack(MF, FI); | ||||||
3175 | |||||||
3176 | // Make sure any stack arguments overlapping with where we're storing | ||||||
3177 | // are loaded before this eventual operation. Otherwise they'll be | ||||||
3178 | // clobbered. | ||||||
3179 | |||||||
3180 | // FIXME: Why is this really necessary? This seems to just result in a | ||||||
3181 | // lot of code to copy the stack and write them back to the same | ||||||
3182 | // locations, which are supposed to be immutable? | ||||||
3183 | Chain = addTokenForArgument(Chain, DAG, MFI, FI); | ||||||
3184 | } else { | ||||||
3185 | // Stores to the argument stack area are relative to the stack pointer. | ||||||
3186 | SDValue SP = DAG.getCopyFromReg(Chain, DL, Info->getStackPtrOffsetReg(), | ||||||
3187 | MVT::i32); | ||||||
3188 | DstAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, SP, PtrOff); | ||||||
3189 | DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset); | ||||||
3190 | Alignment = | ||||||
3191 | commonAlignment(Subtarget->getStackAlignment(), LocMemOffset); | ||||||
3192 | } | ||||||
3193 | |||||||
3194 | if (Outs[i].Flags.isByVal()) { | ||||||
3195 | SDValue SizeNode = | ||||||
3196 | DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32); | ||||||
3197 | SDValue Cpy = | ||||||
3198 | DAG.getMemcpy(Chain, DL, DstAddr, Arg, SizeNode, | ||||||
3199 | Outs[i].Flags.getNonZeroByValAlign(), | ||||||
3200 | /*isVol = */ false, /*AlwaysInline = */ true, | ||||||
3201 | /*isTailCall = */ false, DstInfo, | ||||||
3202 | MachinePointerInfo(AMDGPUAS::PRIVATE_ADDRESS)); | ||||||
3203 | |||||||
3204 | MemOpChains.push_back(Cpy); | ||||||
3205 | } else { | ||||||
3206 | SDValue Store = | ||||||
3207 | DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Alignment); | ||||||
3208 | MemOpChains.push_back(Store); | ||||||
3209 | } | ||||||
3210 | } | ||||||
3211 | } | ||||||
3212 | |||||||
3213 | if (!AMDGPUTargetMachine::EnableFixedFunctionABI && | ||||||
3214 | CallConv != CallingConv::AMDGPU_Gfx) { | ||||||
3215 | // Copy special input registers after user input arguments. | ||||||
3216 | passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain); | ||||||
3217 | } | ||||||
3218 | |||||||
3219 | if (!MemOpChains.empty()) | ||||||
3220 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); | ||||||
3221 | |||||||
3222 | // Build a sequence of copy-to-reg nodes chained together with token chain | ||||||
3223 | // and flag operands which copy the outgoing args into the appropriate regs. | ||||||
3224 | SDValue InFlag; | ||||||
3225 | for (auto &RegToPass : RegsToPass) { | ||||||
3226 | Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first, | ||||||
3227 | RegToPass.second, InFlag); | ||||||
3228 | InFlag = Chain.getValue(1); | ||||||
3229 | } | ||||||
3230 | |||||||
3231 | |||||||
3232 | SDValue PhysReturnAddrReg; | ||||||
3233 | if (IsTailCall) { | ||||||
3234 | // Since the return is being combined with the call, we need to pass on the | ||||||
3235 | // return address. | ||||||
3236 | |||||||
3237 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); | ||||||
3238 | SDValue ReturnAddrReg = CreateLiveInRegister( | ||||||
3239 | DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); | ||||||
3240 | |||||||
3241 | PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF), | ||||||
3242 | MVT::i64); | ||||||
3243 | Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag); | ||||||
3244 | InFlag = Chain.getValue(1); | ||||||
3245 | } | ||||||
3246 | |||||||
3247 | // We don't usually want to end the call-sequence here because we would tidy | ||||||
3248 | // the frame up *after* the call, however in the ABI-changing tail-call case | ||||||
3249 | // we've carefully laid out the parameters so that when sp is reset they'll be | ||||||
3250 | // in the correct location. | ||||||
3251 | if (IsTailCall && !IsSibCall) { | ||||||
3252 | Chain = DAG.getCALLSEQ_END(Chain, | ||||||
3253 | DAG.getTargetConstant(NumBytes, DL, MVT::i32), | ||||||
3254 | DAG.getTargetConstant(0, DL, MVT::i32), | ||||||
3255 | InFlag, DL); | ||||||
3256 | InFlag = Chain.getValue(1); | ||||||
3257 | } | ||||||
3258 | |||||||
3259 | std::vector<SDValue> Ops; | ||||||
3260 | Ops.push_back(Chain); | ||||||
3261 | Ops.push_back(Callee); | ||||||
3262 | // Add a redundant copy of the callee global which will not be legalized, as | ||||||
3263 | // we need direct access to the callee later. | ||||||
3264 | if (GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(Callee)) { | ||||||
3265 | const GlobalValue *GV = GSD->getGlobal(); | ||||||
3266 | Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64)); | ||||||
3267 | } else { | ||||||
3268 | Ops.push_back(DAG.getTargetConstant(0, DL, MVT::i64)); | ||||||
3269 | } | ||||||
3270 | |||||||
3271 | if (IsTailCall) { | ||||||
3272 | // Each tail call may have to adjust the stack by a different amount, so | ||||||
3273 | // this information must travel along with the operation for eventual | ||||||
3274 | // consumption by emitEpilogue. | ||||||
3275 | Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32)); | ||||||
3276 | |||||||
3277 | Ops.push_back(PhysReturnAddrReg); | ||||||
3278 | } | ||||||
3279 | |||||||
3280 | // Add argument registers to the end of the list so that they are known live | ||||||
3281 | // into the call. | ||||||
3282 | for (auto &RegToPass : RegsToPass) { | ||||||
3283 | Ops.push_back(DAG.getRegister(RegToPass.first, | ||||||
3284 | RegToPass.second.getValueType())); | ||||||
3285 | } | ||||||
3286 | |||||||
3287 | // Add a register mask operand representing the call-preserved registers. | ||||||
3288 | |||||||
3289 | auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo()); | ||||||
3290 | const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); | ||||||
3291 | assert(Mask && "Missing call preserved mask for calling convention")(static_cast <bool> (Mask && "Missing call preserved mask for calling convention" ) ? void (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3291, __extension__ __PRETTY_FUNCTION__)); | ||||||
3292 | Ops.push_back(DAG.getRegisterMask(Mask)); | ||||||
3293 | |||||||
3294 | if (InFlag.getNode()) | ||||||
3295 | Ops.push_back(InFlag); | ||||||
3296 | |||||||
3297 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | ||||||
3298 | |||||||
3299 | // If we're doing a tall call, use a TC_RETURN here rather than an | ||||||
3300 | // actual call instruction. | ||||||
3301 | if (IsTailCall) { | ||||||
3302 | MFI.setHasTailCall(); | ||||||
3303 | return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops); | ||||||
3304 | } | ||||||
3305 | |||||||
3306 | // Returns a chain and a flag for retval copy to use. | ||||||
3307 | SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops); | ||||||
3308 | Chain = Call.getValue(0); | ||||||
3309 | InFlag = Call.getValue(1); | ||||||
3310 | |||||||
3311 | uint64_t CalleePopBytes = NumBytes; | ||||||
3312 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32), | ||||||
3313 | DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32), | ||||||
3314 | InFlag, DL); | ||||||
3315 | if (!Ins.empty()) | ||||||
3316 | InFlag = Chain.getValue(1); | ||||||
3317 | |||||||
3318 | // Handle result values, copying them out of physregs into vregs that we | ||||||
3319 | // return. | ||||||
3320 | return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG, | ||||||
3321 | InVals, IsThisReturn, | ||||||
3322 | IsThisReturn ? OutVals[0] : SDValue()); | ||||||
3323 | } | ||||||
3324 | |||||||
3325 | // This is identical to the default implementation in ExpandDYNAMIC_STACKALLOC, | ||||||
3326 | // except for applying the wave size scale to the increment amount. | ||||||
3327 | SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl( | ||||||
3328 | SDValue Op, SelectionDAG &DAG) const { | ||||||
3329 | const MachineFunction &MF = DAG.getMachineFunction(); | ||||||
3330 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
3331 | |||||||
3332 | SDLoc dl(Op); | ||||||
3333 | EVT VT = Op.getValueType(); | ||||||
3334 | SDValue Tmp1 = Op; | ||||||
3335 | SDValue Tmp2 = Op.getValue(1); | ||||||
3336 | SDValue Tmp3 = Op.getOperand(2); | ||||||
3337 | SDValue Chain = Tmp1.getOperand(0); | ||||||
3338 | |||||||
3339 | Register SPReg = Info->getStackPtrOffsetReg(); | ||||||
3340 | |||||||
3341 | // Chain the dynamic stack allocation so that it doesn't modify the stack | ||||||
3342 | // pointer when other instructions are using the stack. | ||||||
3343 | Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl); | ||||||
3344 | |||||||
3345 | SDValue Size = Tmp2.getOperand(1); | ||||||
3346 | SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); | ||||||
3347 | Chain = SP.getValue(1); | ||||||
3348 | MaybeAlign Alignment = cast<ConstantSDNode>(Tmp3)->getMaybeAlignValue(); | ||||||
3349 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | ||||||
3350 | const TargetFrameLowering *TFL = ST.getFrameLowering(); | ||||||
3351 | unsigned Opc = | ||||||
3352 | TFL->getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp ? | ||||||
3353 | ISD::ADD : ISD::SUB; | ||||||
3354 | |||||||
3355 | SDValue ScaledSize = DAG.getNode( | ||||||
3356 | ISD::SHL, dl, VT, Size, | ||||||
3357 | DAG.getConstant(ST.getWavefrontSizeLog2(), dl, MVT::i32)); | ||||||
3358 | |||||||
3359 | Align StackAlign = TFL->getStackAlign(); | ||||||
3360 | Tmp1 = DAG.getNode(Opc, dl, VT, SP, ScaledSize); // Value | ||||||
3361 | if (Alignment && *Alignment > StackAlign) { | ||||||
3362 | Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1, | ||||||
3363 | DAG.getConstant(-(uint64_t)Alignment->value() | ||||||
3364 | << ST.getWavefrontSizeLog2(), | ||||||
3365 | dl, VT)); | ||||||
3366 | } | ||||||
3367 | |||||||
3368 | Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain | ||||||
3369 | Tmp2 = DAG.getCALLSEQ_END( | ||||||
3370 | Chain, DAG.getIntPtrConstant(0, dl, true), | ||||||
3371 | DAG.getIntPtrConstant(0, dl, true), SDValue(), dl); | ||||||
3372 | |||||||
3373 | return DAG.getMergeValues({Tmp1, Tmp2}, dl); | ||||||
3374 | } | ||||||
3375 | |||||||
3376 | SDValue SITargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, | ||||||
3377 | SelectionDAG &DAG) const { | ||||||
3378 | // We only handle constant sizes here to allow non-entry block, static sized | ||||||
3379 | // allocas. A truly dynamic value is more difficult to support because we | ||||||
3380 | // don't know if the size value is uniform or not. If the size isn't uniform, | ||||||
3381 | // we would need to do a wave reduction to get the maximum size to know how | ||||||
3382 | // much to increment the uniform stack pointer. | ||||||
3383 | SDValue Size = Op.getOperand(1); | ||||||
3384 | if (isa<ConstantSDNode>(Size)) | ||||||
3385 | return lowerDYNAMIC_STACKALLOCImpl(Op, DAG); // Use "generic" expansion. | ||||||
3386 | |||||||
3387 | return AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(Op, DAG); | ||||||
3388 | } | ||||||
3389 | |||||||
3390 | Register SITargetLowering::getRegisterByName(const char* RegName, LLT VT, | ||||||
3391 | const MachineFunction &MF) const { | ||||||
3392 | Register Reg = StringSwitch<Register>(RegName) | ||||||
3393 | .Case("m0", AMDGPU::M0) | ||||||
3394 | .Case("exec", AMDGPU::EXEC) | ||||||
3395 | .Case("exec_lo", AMDGPU::EXEC_LO) | ||||||
3396 | .Case("exec_hi", AMDGPU::EXEC_HI) | ||||||
3397 | .Case("flat_scratch", AMDGPU::FLAT_SCR) | ||||||
3398 | .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) | ||||||
3399 | .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) | ||||||
3400 | .Default(Register()); | ||||||
3401 | |||||||
3402 | if (Reg == AMDGPU::NoRegister) { | ||||||
3403 | report_fatal_error(Twine("invalid register name \"" | ||||||
3404 | + StringRef(RegName) + "\".")); | ||||||
3405 | |||||||
3406 | } | ||||||
3407 | |||||||
3408 | if (!Subtarget->hasFlatScrRegister() && | ||||||
3409 | Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { | ||||||
3410 | report_fatal_error(Twine("invalid register \"" | ||||||
3411 | + StringRef(RegName) + "\" for subtarget.")); | ||||||
3412 | } | ||||||
3413 | |||||||
3414 | switch (Reg) { | ||||||
3415 | case AMDGPU::M0: | ||||||
3416 | case AMDGPU::EXEC_LO: | ||||||
3417 | case AMDGPU::EXEC_HI: | ||||||
3418 | case AMDGPU::FLAT_SCR_LO: | ||||||
3419 | case AMDGPU::FLAT_SCR_HI: | ||||||
3420 | if (VT.getSizeInBits() == 32) | ||||||
3421 | return Reg; | ||||||
3422 | break; | ||||||
3423 | case AMDGPU::EXEC: | ||||||
3424 | case AMDGPU::FLAT_SCR: | ||||||
3425 | if (VT.getSizeInBits() == 64) | ||||||
3426 | return Reg; | ||||||
3427 | break; | ||||||
3428 | default: | ||||||
3429 | llvm_unreachable("missing register type checking")::llvm::llvm_unreachable_internal("missing register type checking" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3429); | ||||||
3430 | } | ||||||
3431 | |||||||
3432 | report_fatal_error(Twine("invalid type for register \"" | ||||||
3433 | + StringRef(RegName) + "\".")); | ||||||
3434 | } | ||||||
3435 | |||||||
3436 | // If kill is not the last instruction, split the block so kill is always a | ||||||
3437 | // proper terminator. | ||||||
3438 | MachineBasicBlock * | ||||||
3439 | SITargetLowering::splitKillBlock(MachineInstr &MI, | ||||||
3440 | MachineBasicBlock *BB) const { | ||||||
3441 | MachineBasicBlock *SplitBB = BB->splitAt(MI, false /*UpdateLiveIns*/); | ||||||
3442 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | ||||||
3443 | MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode())); | ||||||
3444 | return SplitBB; | ||||||
3445 | } | ||||||
3446 | |||||||
3447 | // Split block \p MBB at \p MI, as to insert a loop. If \p InstInLoop is true, | ||||||
3448 | // \p MI will be the only instruction in the loop body block. Otherwise, it will | ||||||
3449 | // be the first instruction in the remainder block. | ||||||
3450 | // | ||||||
3451 | /// \returns { LoopBody, Remainder } | ||||||
3452 | static std::pair<MachineBasicBlock *, MachineBasicBlock *> | ||||||
3453 | splitBlockForLoop(MachineInstr &MI, MachineBasicBlock &MBB, bool InstInLoop) { | ||||||
3454 | MachineFunction *MF = MBB.getParent(); | ||||||
3455 | MachineBasicBlock::iterator I(&MI); | ||||||
3456 | |||||||
3457 | // To insert the loop we need to split the block. Move everything after this | ||||||
3458 | // point to a new block, and insert a new empty block between the two. | ||||||
3459 | MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); | ||||||
3460 | MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); | ||||||
3461 | MachineFunction::iterator MBBI(MBB); | ||||||
3462 | ++MBBI; | ||||||
3463 | |||||||
3464 | MF->insert(MBBI, LoopBB); | ||||||
3465 | MF->insert(MBBI, RemainderBB); | ||||||
3466 | |||||||
3467 | LoopBB->addSuccessor(LoopBB); | ||||||
3468 | LoopBB->addSuccessor(RemainderBB); | ||||||
3469 | |||||||
3470 | // Move the rest of the block into a new block. | ||||||
3471 | RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); | ||||||
3472 | |||||||
3473 | if (InstInLoop) { | ||||||
3474 | auto Next = std::next(I); | ||||||
3475 | |||||||
3476 | // Move instruction to loop body. | ||||||
3477 | LoopBB->splice(LoopBB->begin(), &MBB, I, Next); | ||||||
3478 | |||||||
3479 | // Move the rest of the block. | ||||||
3480 | RemainderBB->splice(RemainderBB->begin(), &MBB, Next, MBB.end()); | ||||||
3481 | } else { | ||||||
3482 | RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); | ||||||
3483 | } | ||||||
3484 | |||||||
3485 | MBB.addSuccessor(LoopBB); | ||||||
3486 | |||||||
3487 | return std::make_pair(LoopBB, RemainderBB); | ||||||
3488 | } | ||||||
3489 | |||||||
3490 | /// Insert \p MI into a BUNDLE with an S_WAITCNT 0 immediately following it. | ||||||
3491 | void SITargetLowering::bundleInstWithWaitcnt(MachineInstr &MI) const { | ||||||
3492 | MachineBasicBlock *MBB = MI.getParent(); | ||||||
3493 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | ||||||
3494 | auto I = MI.getIterator(); | ||||||
3495 | auto E = std::next(I); | ||||||
3496 | |||||||
3497 | BuildMI(*MBB, E, MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT)) | ||||||
3498 | .addImm(0); | ||||||
3499 | |||||||
3500 | MIBundleBuilder Bundler(*MBB, I, E); | ||||||
3501 | finalizeBundle(*MBB, Bundler.begin()); | ||||||
3502 | } | ||||||
3503 | |||||||
3504 | MachineBasicBlock * | ||||||
3505 | SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI, | ||||||
3506 | MachineBasicBlock *BB) const { | ||||||
3507 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
3508 | |||||||
3509 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); | ||||||
3510 | |||||||
3511 | MachineBasicBlock *LoopBB; | ||||||
3512 | MachineBasicBlock *RemainderBB; | ||||||
3513 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | ||||||
3514 | |||||||
3515 | // Apparently kill flags are only valid if the def is in the same block? | ||||||
3516 | if (MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0)) | ||||||
3517 | Src->setIsKill(false); | ||||||
3518 | |||||||
3519 | std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, *BB, true); | ||||||
3520 | |||||||
3521 | MachineBasicBlock::iterator I = LoopBB->end(); | ||||||
3522 | |||||||
3523 | const unsigned EncodedReg = AMDGPU::Hwreg::encodeHwreg( | ||||||
3524 | AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1); | ||||||
3525 | |||||||
3526 | // Clear TRAP_STS.MEM_VIOL | ||||||
3527 | BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::S_SETREG_IMM32_B32)) | ||||||
3528 | .addImm(0) | ||||||
3529 | .addImm(EncodedReg); | ||||||
3530 | |||||||
3531 | bundleInstWithWaitcnt(MI); | ||||||
3532 | |||||||
3533 | Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); | ||||||
3534 | |||||||
3535 | // Load and check TRAP_STS.MEM_VIOL | ||||||
3536 | BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), Reg) | ||||||
3537 | .addImm(EncodedReg); | ||||||
3538 | |||||||
3539 | // FIXME: Do we need to use an isel pseudo that may clobber scc? | ||||||
3540 | BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CMP_LG_U32)) | ||||||
3541 | .addReg(Reg, RegState::Kill) | ||||||
3542 | .addImm(0); | ||||||
3543 | BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) | ||||||
3544 | .addMBB(LoopBB); | ||||||
3545 | |||||||
3546 | return RemainderBB; | ||||||
3547 | } | ||||||
3548 | |||||||
3549 | // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the | ||||||
3550 | // wavefront. If the value is uniform and just happens to be in a VGPR, this | ||||||
3551 | // will only do one iteration. In the worst case, this will loop 64 times. | ||||||
3552 | // | ||||||
3553 | // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value. | ||||||
3554 | static MachineBasicBlock::iterator | ||||||
3555 | emitLoadM0FromVGPRLoop(const SIInstrInfo *TII, MachineRegisterInfo &MRI, | ||||||
3556 | MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, | ||||||
3557 | const DebugLoc &DL, const MachineOperand &Idx, | ||||||
3558 | unsigned InitReg, unsigned ResultReg, unsigned PhiReg, | ||||||
3559 | unsigned InitSaveExecReg, int Offset, bool UseGPRIdxMode, | ||||||
3560 | Register &SGPRIdxReg) { | ||||||
3561 | |||||||
3562 | MachineFunction *MF = OrigBB.getParent(); | ||||||
3563 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | ||||||
3564 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | ||||||
3565 | MachineBasicBlock::iterator I = LoopBB.begin(); | ||||||
3566 | |||||||
3567 | const TargetRegisterClass *BoolRC = TRI->getBoolRC(); | ||||||
3568 | Register PhiExec = MRI.createVirtualRegister(BoolRC); | ||||||
3569 | Register NewExec = MRI.createVirtualRegister(BoolRC); | ||||||
3570 | Register CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); | ||||||
3571 | Register CondReg = MRI.createVirtualRegister(BoolRC); | ||||||
3572 | |||||||
3573 | BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) | ||||||
3574 | .addReg(InitReg) | ||||||
3575 | .addMBB(&OrigBB) | ||||||
3576 | .addReg(ResultReg) | ||||||
3577 | .addMBB(&LoopBB); | ||||||
3578 | |||||||
3579 | BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) | ||||||
3580 | .addReg(InitSaveExecReg) | ||||||
3581 | .addMBB(&OrigBB) | ||||||
3582 | .addReg(NewExec) | ||||||
3583 | .addMBB(&LoopBB); | ||||||
3584 | |||||||
3585 | // Read the next variant <- also loop target. | ||||||
3586 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) | ||||||
3587 | .addReg(Idx.getReg(), getUndefRegState(Idx.isUndef())); | ||||||
3588 | |||||||
3589 | // Compare the just read M0 value to all possible Idx values. | ||||||
3590 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) | ||||||
3591 | .addReg(CurrentIdxReg) | ||||||
3592 | .addReg(Idx.getReg(), 0, Idx.getSubReg()); | ||||||
3593 | |||||||
3594 | // Update EXEC, save the original EXEC value to VCC. | ||||||
3595 | BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 | ||||||
3596 | : AMDGPU::S_AND_SAVEEXEC_B64), | ||||||
3597 | NewExec) | ||||||
3598 | .addReg(CondReg, RegState::Kill); | ||||||
3599 | |||||||
3600 | MRI.setSimpleHint(NewExec, CondReg); | ||||||
3601 | |||||||
3602 | if (UseGPRIdxMode) { | ||||||
3603 | if (Offset == 0) { | ||||||
3604 | SGPRIdxReg = CurrentIdxReg; | ||||||
3605 | } else { | ||||||
3606 | SGPRIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); | ||||||
3607 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), SGPRIdxReg) | ||||||
3608 | .addReg(CurrentIdxReg, RegState::Kill) | ||||||
3609 | .addImm(Offset); | ||||||
3610 | } | ||||||
3611 | } else { | ||||||
3612 | // Move index from VCC into M0 | ||||||
3613 | if (Offset == 0) { | ||||||
3614 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) | ||||||
3615 | .addReg(CurrentIdxReg, RegState::Kill); | ||||||
3616 | } else { | ||||||
3617 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) | ||||||
3618 | .addReg(CurrentIdxReg, RegState::Kill) | ||||||
3619 | .addImm(Offset); | ||||||
3620 | } | ||||||
3621 | } | ||||||
3622 | |||||||
3623 | // Update EXEC, switch all done bits to 0 and all todo bits to 1. | ||||||
3624 | unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; | ||||||
3625 | MachineInstr *InsertPt = | ||||||
3626 | BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term | ||||||
3627 | : AMDGPU::S_XOR_B64_term), Exec) | ||||||
3628 | .addReg(Exec) | ||||||
3629 | .addReg(NewExec); | ||||||
3630 | |||||||
3631 | // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use | ||||||
3632 | // s_cbranch_scc0? | ||||||
3633 | |||||||
3634 | // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. | ||||||
3635 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) | ||||||
3636 | .addMBB(&LoopBB); | ||||||
3637 | |||||||
3638 | return InsertPt->getIterator(); | ||||||
3639 | } | ||||||
3640 | |||||||
3641 | // This has slightly sub-optimal regalloc when the source vector is killed by | ||||||
3642 | // the read. The register allocator does not understand that the kill is | ||||||
3643 | // per-workitem, so is kept alive for the whole loop so we end up not re-using a | ||||||
3644 | // subregister from it, using 1 more VGPR than necessary. This was saved when | ||||||
3645 | // this was expanded after register allocation. | ||||||
3646 | static MachineBasicBlock::iterator | ||||||
3647 | loadM0FromVGPR(const SIInstrInfo *TII, MachineBasicBlock &MBB, MachineInstr &MI, | ||||||
3648 | unsigned InitResultReg, unsigned PhiReg, int Offset, | ||||||
3649 | bool UseGPRIdxMode, Register &SGPRIdxReg) { | ||||||
3650 | MachineFunction *MF = MBB.getParent(); | ||||||
3651 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | ||||||
3652 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | ||||||
3653 | MachineRegisterInfo &MRI = MF->getRegInfo(); | ||||||
3654 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
3655 | MachineBasicBlock::iterator I(&MI); | ||||||
3656 | |||||||
3657 | const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); | ||||||
3658 | Register DstReg = MI.getOperand(0).getReg(); | ||||||
3659 | Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); | ||||||
3660 | Register TmpExec = MRI.createVirtualRegister(BoolXExecRC); | ||||||
3661 | unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; | ||||||
3662 | unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; | ||||||
3663 | |||||||
3664 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); | ||||||
3665 | |||||||
3666 | // Save the EXEC mask | ||||||
3667 | BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec) | ||||||
3668 | .addReg(Exec); | ||||||
3669 | |||||||
3670 | MachineBasicBlock *LoopBB; | ||||||
3671 | MachineBasicBlock *RemainderBB; | ||||||
3672 | std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, MBB, false); | ||||||
3673 | |||||||
3674 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); | ||||||
3675 | |||||||
3676 | auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, | ||||||
3677 | InitResultReg, DstReg, PhiReg, TmpExec, | ||||||
3678 | Offset, UseGPRIdxMode, SGPRIdxReg); | ||||||
3679 | |||||||
3680 | MachineBasicBlock* LandingPad = MF->CreateMachineBasicBlock(); | ||||||
3681 | MachineFunction::iterator MBBI(LoopBB); | ||||||
3682 | ++MBBI; | ||||||
3683 | MF->insert(MBBI, LandingPad); | ||||||
3684 | LoopBB->removeSuccessor(RemainderBB); | ||||||
3685 | LandingPad->addSuccessor(RemainderBB); | ||||||
3686 | LoopBB->addSuccessor(LandingPad); | ||||||
3687 | MachineBasicBlock::iterator First = LandingPad->begin(); | ||||||
3688 | BuildMI(*LandingPad, First, DL, TII->get(MovExecOpc), Exec) | ||||||
3689 | .addReg(SaveExec); | ||||||
3690 | |||||||
3691 | return InsPt; | ||||||
3692 | } | ||||||
3693 | |||||||
3694 | // Returns subreg index, offset | ||||||
3695 | static std::pair<unsigned, int> | ||||||
3696 | computeIndirectRegAndOffset(const SIRegisterInfo &TRI, | ||||||
3697 | const TargetRegisterClass *SuperRC, | ||||||
3698 | unsigned VecReg, | ||||||
3699 | int Offset) { | ||||||
3700 | int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32; | ||||||
3701 | |||||||
3702 | // Skip out of bounds offsets, or else we would end up using an undefined | ||||||
3703 | // register. | ||||||
3704 | if (Offset >= NumElts || Offset < 0) | ||||||
3705 | return std::make_pair(AMDGPU::sub0, Offset); | ||||||
3706 | |||||||
3707 | return std::make_pair(SIRegisterInfo::getSubRegFromChannel(Offset), 0); | ||||||
3708 | } | ||||||
3709 | |||||||
3710 | static void setM0ToIndexFromSGPR(const SIInstrInfo *TII, | ||||||
3711 | MachineRegisterInfo &MRI, MachineInstr &MI, | ||||||
3712 | int Offset) { | ||||||
3713 | MachineBasicBlock *MBB = MI.getParent(); | ||||||
3714 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
3715 | MachineBasicBlock::iterator I(&MI); | ||||||
3716 | |||||||
3717 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); | ||||||
3718 | |||||||
3719 | assert(Idx->getReg() != AMDGPU::NoRegister)(static_cast <bool> (Idx->getReg() != AMDGPU::NoRegister ) ? void (0) : __assert_fail ("Idx->getReg() != AMDGPU::NoRegister" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3719, __extension__ __PRETTY_FUNCTION__)); | ||||||
3720 | |||||||
3721 | if (Offset == 0) { | ||||||
3722 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0).add(*Idx); | ||||||
3723 | } else { | ||||||
3724 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) | ||||||
3725 | .add(*Idx) | ||||||
3726 | .addImm(Offset); | ||||||
3727 | } | ||||||
3728 | } | ||||||
3729 | |||||||
3730 | static Register getIndirectSGPRIdx(const SIInstrInfo *TII, | ||||||
3731 | MachineRegisterInfo &MRI, MachineInstr &MI, | ||||||
3732 | int Offset) { | ||||||
3733 | MachineBasicBlock *MBB = MI.getParent(); | ||||||
3734 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
3735 | MachineBasicBlock::iterator I(&MI); | ||||||
3736 | |||||||
3737 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); | ||||||
3738 | |||||||
3739 | if (Offset == 0) | ||||||
3740 | return Idx->getReg(); | ||||||
3741 | |||||||
3742 | Register Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); | ||||||
3743 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) | ||||||
3744 | .add(*Idx) | ||||||
3745 | .addImm(Offset); | ||||||
3746 | return Tmp; | ||||||
3747 | } | ||||||
3748 | |||||||
3749 | static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, | ||||||
3750 | MachineBasicBlock &MBB, | ||||||
3751 | const GCNSubtarget &ST) { | ||||||
3752 | const SIInstrInfo *TII = ST.getInstrInfo(); | ||||||
3753 | const SIRegisterInfo &TRI = TII->getRegisterInfo(); | ||||||
3754 | MachineFunction *MF = MBB.getParent(); | ||||||
3755 | MachineRegisterInfo &MRI = MF->getRegInfo(); | ||||||
3756 | |||||||
3757 | Register Dst = MI.getOperand(0).getReg(); | ||||||
3758 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); | ||||||
3759 | Register SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); | ||||||
3760 | int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); | ||||||
3761 | |||||||
3762 | const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); | ||||||
3763 | const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); | ||||||
3764 | |||||||
3765 | unsigned SubReg; | ||||||
3766 | std::tie(SubReg, Offset) | ||||||
3767 | = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); | ||||||
3768 | |||||||
3769 | const bool UseGPRIdxMode = ST.useVGPRIndexMode(); | ||||||
3770 | |||||||
3771 | // Check for a SGPR index. | ||||||
3772 | if (TII->getRegisterInfo().isSGPRClass(IdxRC)) { | ||||||
3773 | MachineBasicBlock::iterator I(&MI); | ||||||
3774 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
3775 | |||||||
3776 | if (UseGPRIdxMode) { | ||||||
3777 | // TODO: Look at the uses to avoid the copy. This may require rescheduling | ||||||
3778 | // to avoid interfering with other uses, so probably requires a new | ||||||
3779 | // optimization pass. | ||||||
3780 | Register Idx = getIndirectSGPRIdx(TII, MRI, MI, Offset); | ||||||
3781 | |||||||
3782 | const MCInstrDesc &GPRIDXDesc = | ||||||
3783 | TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), true); | ||||||
3784 | BuildMI(MBB, I, DL, GPRIDXDesc, Dst) | ||||||
3785 | .addReg(SrcReg) | ||||||
3786 | .addReg(Idx) | ||||||
3787 | .addImm(SubReg); | ||||||
3788 | } else { | ||||||
3789 | setM0ToIndexFromSGPR(TII, MRI, MI, Offset); | ||||||
3790 | |||||||
3791 | BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) | ||||||
3792 | .addReg(SrcReg, 0, SubReg) | ||||||
3793 | .addReg(SrcReg, RegState::Implicit); | ||||||
3794 | } | ||||||
3795 | |||||||
3796 | MI.eraseFromParent(); | ||||||
3797 | |||||||
3798 | return &MBB; | ||||||
3799 | } | ||||||
3800 | |||||||
3801 | // Control flow needs to be inserted if indexing with a VGPR. | ||||||
3802 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
3803 | MachineBasicBlock::iterator I(&MI); | ||||||
3804 | |||||||
3805 | Register PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | ||||||
3806 | Register InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | ||||||
3807 | |||||||
3808 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); | ||||||
3809 | |||||||
3810 | Register SGPRIdxReg; | ||||||
3811 | auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, | ||||||
3812 | UseGPRIdxMode, SGPRIdxReg); | ||||||
3813 | |||||||
3814 | MachineBasicBlock *LoopBB = InsPt->getParent(); | ||||||
3815 | |||||||
3816 | if (UseGPRIdxMode) { | ||||||
3817 | const MCInstrDesc &GPRIDXDesc = | ||||||
3818 | TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), true); | ||||||
3819 | |||||||
3820 | BuildMI(*LoopBB, InsPt, DL, GPRIDXDesc, Dst) | ||||||
3821 | .addReg(SrcReg) | ||||||
3822 | .addReg(SGPRIdxReg) | ||||||
3823 | .addImm(SubReg); | ||||||
3824 | } else { | ||||||
3825 | BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) | ||||||
3826 | .addReg(SrcReg, 0, SubReg) | ||||||
3827 | .addReg(SrcReg, RegState::Implicit); | ||||||
3828 | } | ||||||
3829 | |||||||
3830 | MI.eraseFromParent(); | ||||||
3831 | |||||||
3832 | return LoopBB; | ||||||
3833 | } | ||||||
3834 | |||||||
3835 | static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, | ||||||
3836 | MachineBasicBlock &MBB, | ||||||
3837 | const GCNSubtarget &ST) { | ||||||
3838 | const SIInstrInfo *TII = ST.getInstrInfo(); | ||||||
3839 | const SIRegisterInfo &TRI = TII->getRegisterInfo(); | ||||||
3840 | MachineFunction *MF = MBB.getParent(); | ||||||
3841 | MachineRegisterInfo &MRI = MF->getRegInfo(); | ||||||
3842 | |||||||
3843 | Register Dst = MI.getOperand(0).getReg(); | ||||||
3844 | const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); | ||||||
3845 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); | ||||||
3846 | const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); | ||||||
3847 | int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); | ||||||
3848 | const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); | ||||||
3849 | const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); | ||||||
3850 | |||||||
3851 | // This can be an immediate, but will be folded later. | ||||||
3852 | assert(Val->getReg())(static_cast <bool> (Val->getReg()) ? void (0) : __assert_fail ("Val->getReg()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3852, __extension__ __PRETTY_FUNCTION__)); | ||||||
3853 | |||||||
3854 | unsigned SubReg; | ||||||
3855 | std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, | ||||||
3856 | SrcVec->getReg(), | ||||||
3857 | Offset); | ||||||
3858 | const bool UseGPRIdxMode = ST.useVGPRIndexMode(); | ||||||
3859 | |||||||
3860 | if (Idx->getReg() == AMDGPU::NoRegister) { | ||||||
3861 | MachineBasicBlock::iterator I(&MI); | ||||||
3862 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
3863 | |||||||
3864 | assert(Offset == 0)(static_cast <bool> (Offset == 0) ? void (0) : __assert_fail ("Offset == 0", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3864, __extension__ __PRETTY_FUNCTION__)); | ||||||
3865 | |||||||
3866 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) | ||||||
3867 | .add(*SrcVec) | ||||||
3868 | .add(*Val) | ||||||
3869 | .addImm(SubReg); | ||||||
3870 | |||||||
3871 | MI.eraseFromParent(); | ||||||
3872 | return &MBB; | ||||||
3873 | } | ||||||
3874 | |||||||
3875 | // Check for a SGPR index. | ||||||
3876 | if (TII->getRegisterInfo().isSGPRClass(IdxRC)) { | ||||||
3877 | MachineBasicBlock::iterator I(&MI); | ||||||
3878 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
3879 | |||||||
3880 | if (UseGPRIdxMode) { | ||||||
3881 | Register Idx = getIndirectSGPRIdx(TII, MRI, MI, Offset); | ||||||
3882 | |||||||
3883 | const MCInstrDesc &GPRIDXDesc = | ||||||
3884 | TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false); | ||||||
3885 | BuildMI(MBB, I, DL, GPRIDXDesc, Dst) | ||||||
3886 | .addReg(SrcVec->getReg()) | ||||||
3887 | .add(*Val) | ||||||
3888 | .addReg(Idx) | ||||||
3889 | .addImm(SubReg); | ||||||
3890 | } else { | ||||||
3891 | setM0ToIndexFromSGPR(TII, MRI, MI, Offset); | ||||||
3892 | |||||||
3893 | const MCInstrDesc &MovRelDesc = TII->getIndirectRegWriteMovRelPseudo( | ||||||
3894 | TRI.getRegSizeInBits(*VecRC), 32, false); | ||||||
3895 | BuildMI(MBB, I, DL, MovRelDesc, Dst) | ||||||
3896 | .addReg(SrcVec->getReg()) | ||||||
3897 | .add(*Val) | ||||||
3898 | .addImm(SubReg); | ||||||
3899 | } | ||||||
3900 | MI.eraseFromParent(); | ||||||
3901 | return &MBB; | ||||||
3902 | } | ||||||
3903 | |||||||
3904 | // Control flow needs to be inserted if indexing with a VGPR. | ||||||
3905 | if (Val->isReg()) | ||||||
3906 | MRI.clearKillFlags(Val->getReg()); | ||||||
3907 | |||||||
3908 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
3909 | |||||||
3910 | Register PhiReg = MRI.createVirtualRegister(VecRC); | ||||||
3911 | |||||||
3912 | Register SGPRIdxReg; | ||||||
3913 | auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, Offset, | ||||||
3914 | UseGPRIdxMode, SGPRIdxReg); | ||||||
3915 | MachineBasicBlock *LoopBB = InsPt->getParent(); | ||||||
3916 | |||||||
3917 | if (UseGPRIdxMode) { | ||||||
3918 | const MCInstrDesc &GPRIDXDesc = | ||||||
3919 | TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false); | ||||||
3920 | |||||||
3921 | BuildMI(*LoopBB, InsPt, DL, GPRIDXDesc, Dst) | ||||||
3922 | .addReg(PhiReg) | ||||||
3923 | .add(*Val) | ||||||
3924 | .addReg(SGPRIdxReg) | ||||||
3925 | .addImm(AMDGPU::sub0); | ||||||
3926 | } else { | ||||||
3927 | const MCInstrDesc &MovRelDesc = TII->getIndirectRegWriteMovRelPseudo( | ||||||
3928 | TRI.getRegSizeInBits(*VecRC), 32, false); | ||||||
3929 | BuildMI(*LoopBB, InsPt, DL, MovRelDesc, Dst) | ||||||
3930 | .addReg(PhiReg) | ||||||
3931 | .add(*Val) | ||||||
3932 | .addImm(AMDGPU::sub0); | ||||||
3933 | } | ||||||
3934 | |||||||
3935 | MI.eraseFromParent(); | ||||||
3936 | return LoopBB; | ||||||
3937 | } | ||||||
3938 | |||||||
3939 | MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( | ||||||
3940 | MachineInstr &MI, MachineBasicBlock *BB) const { | ||||||
3941 | |||||||
3942 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | ||||||
3943 | MachineFunction *MF = BB->getParent(); | ||||||
3944 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); | ||||||
3945 | |||||||
3946 | switch (MI.getOpcode()) { | ||||||
3947 | case AMDGPU::S_UADDO_PSEUDO: | ||||||
3948 | case AMDGPU::S_USUBO_PSEUDO: { | ||||||
3949 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
3950 | MachineOperand &Dest0 = MI.getOperand(0); | ||||||
3951 | MachineOperand &Dest1 = MI.getOperand(1); | ||||||
3952 | MachineOperand &Src0 = MI.getOperand(2); | ||||||
3953 | MachineOperand &Src1 = MI.getOperand(3); | ||||||
3954 | |||||||
3955 | unsigned Opc = (MI.getOpcode() == AMDGPU::S_UADDO_PSEUDO) | ||||||
3956 | ? AMDGPU::S_ADD_I32 | ||||||
3957 | : AMDGPU::S_SUB_I32; | ||||||
3958 | BuildMI(*BB, MI, DL, TII->get(Opc), Dest0.getReg()).add(Src0).add(Src1); | ||||||
3959 | |||||||
3960 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CSELECT_B64), Dest1.getReg()) | ||||||
3961 | .addImm(1) | ||||||
3962 | .addImm(0); | ||||||
3963 | |||||||
3964 | MI.eraseFromParent(); | ||||||
3965 | return BB; | ||||||
3966 | } | ||||||
3967 | case AMDGPU::S_ADD_U64_PSEUDO: | ||||||
3968 | case AMDGPU::S_SUB_U64_PSEUDO: { | ||||||
3969 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); | ||||||
3970 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | ||||||
3971 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | ||||||
3972 | const TargetRegisterClass *BoolRC = TRI->getBoolRC(); | ||||||
3973 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
3974 | |||||||
3975 | MachineOperand &Dest = MI.getOperand(0); | ||||||
3976 | MachineOperand &Src0 = MI.getOperand(1); | ||||||
3977 | MachineOperand &Src1 = MI.getOperand(2); | ||||||
3978 | |||||||
3979 | Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); | ||||||
3980 | Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); | ||||||
3981 | |||||||
3982 | MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm( | ||||||
3983 | MI, MRI, Src0, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass); | ||||||
3984 | MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm( | ||||||
3985 | MI, MRI, Src0, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass); | ||||||
3986 | |||||||
3987 | MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm( | ||||||
3988 | MI, MRI, Src1, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass); | ||||||
3989 | MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm( | ||||||
3990 | MI, MRI, Src1, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass); | ||||||
3991 | |||||||
3992 | bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); | ||||||
3993 | |||||||
3994 | unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; | ||||||
3995 | unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; | ||||||
3996 | BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0).add(Src0Sub0).add(Src1Sub0); | ||||||
3997 | BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1).add(Src0Sub1).add(Src1Sub1); | ||||||
3998 | BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg()) | ||||||
3999 | .addReg(DestSub0) | ||||||
4000 | .addImm(AMDGPU::sub0) | ||||||
4001 | .addReg(DestSub1) | ||||||
4002 | .addImm(AMDGPU::sub1); | ||||||
4003 | MI.eraseFromParent(); | ||||||
4004 | return BB; | ||||||
4005 | } | ||||||
4006 | case AMDGPU::V_ADD_U64_PSEUDO: | ||||||
4007 | case AMDGPU::V_SUB_U64_PSEUDO: { | ||||||
4008 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); | ||||||
4009 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | ||||||
4010 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | ||||||
4011 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
4012 | |||||||
4013 | bool IsAdd = (MI.getOpcode() == AMDGPU::V_ADD_U64_PSEUDO); | ||||||
4014 | |||||||
4015 | const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); | ||||||
4016 | |||||||
4017 | Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | ||||||
4018 | Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | ||||||
4019 | |||||||
4020 | Register CarryReg = MRI.createVirtualRegister(CarryRC); | ||||||
4021 | Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); | ||||||
4022 | |||||||
4023 | MachineOperand &Dest = MI.getOperand(0); | ||||||
4024 | MachineOperand &Src0 = MI.getOperand(1); | ||||||
4025 | MachineOperand &Src1 = MI.getOperand(2); | ||||||
4026 | |||||||
4027 | const TargetRegisterClass *Src0RC = Src0.isReg() | ||||||
4028 | ? MRI.getRegClass(Src0.getReg()) | ||||||
4029 | : &AMDGPU::VReg_64RegClass; | ||||||
4030 | const TargetRegisterClass *Src1RC = Src1.isReg() | ||||||
4031 | ? MRI.getRegClass(Src1.getReg()) | ||||||
4032 | : &AMDGPU::VReg_64RegClass; | ||||||
4033 | |||||||
4034 | const TargetRegisterClass *Src0SubRC = | ||||||
4035 | TRI->getSubRegClass(Src0RC, AMDGPU::sub0); | ||||||
4036 | const TargetRegisterClass *Src1SubRC = | ||||||
4037 | TRI->getSubRegClass(Src1RC, AMDGPU::sub1); | ||||||
4038 | |||||||
4039 | MachineOperand SrcReg0Sub0 = TII->buildExtractSubRegOrImm( | ||||||
4040 | MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC); | ||||||
4041 | MachineOperand SrcReg1Sub0 = TII->buildExtractSubRegOrImm( | ||||||
4042 | MI, MRI, Src1, Src1RC, AMDGPU::sub0, Src1SubRC); | ||||||
4043 | |||||||
4044 | MachineOperand SrcReg0Sub1 = TII->buildExtractSubRegOrImm( | ||||||
4045 | MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC); | ||||||
4046 | MachineOperand SrcReg1Sub1 = TII->buildExtractSubRegOrImm( | ||||||
4047 | MI, MRI, Src1, Src1RC, AMDGPU::sub1, Src1SubRC); | ||||||
4048 | |||||||
4049 | unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; | ||||||
4050 | MachineInstr *LoHalf = BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0) | ||||||
4051 | .addReg(CarryReg, RegState::Define) | ||||||
4052 | .add(SrcReg0Sub0) | ||||||
4053 | .add(SrcReg1Sub0) | ||||||
4054 | .addImm(0); // clamp bit | ||||||
4055 | |||||||
4056 | unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; | ||||||
4057 | MachineInstr *HiHalf = | ||||||
4058 | BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1) | ||||||
4059 | .addReg(DeadCarryReg, RegState::Define | RegState::Dead) | ||||||
4060 | .add(SrcReg0Sub1) | ||||||
4061 | .add(SrcReg1Sub1) | ||||||
4062 | .addReg(CarryReg, RegState::Kill) | ||||||
4063 | .addImm(0); // clamp bit | ||||||
4064 | |||||||
4065 | BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg()) | ||||||
4066 | .addReg(DestSub0) | ||||||
4067 | .addImm(AMDGPU::sub0) | ||||||
4068 | .addReg(DestSub1) | ||||||
4069 | .addImm(AMDGPU::sub1); | ||||||
4070 | TII->legalizeOperands(*LoHalf); | ||||||
4071 | TII->legalizeOperands(*HiHalf); | ||||||
4072 | MI.eraseFromParent(); | ||||||
4073 | return BB; | ||||||
4074 | } | ||||||
4075 | case AMDGPU::S_ADD_CO_PSEUDO: | ||||||
4076 | case AMDGPU::S_SUB_CO_PSEUDO: { | ||||||
4077 | // This pseudo has a chance to be selected | ||||||
4078 | // only from uniform add/subcarry node. All the VGPR operands | ||||||
4079 | // therefore assumed to be splat vectors. | ||||||
4080 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); | ||||||
4081 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | ||||||
4082 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | ||||||
4083 | MachineBasicBlock::iterator MII = MI; | ||||||
4084 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
4085 | MachineOperand &Dest = MI.getOperand(0); | ||||||
4086 | MachineOperand &CarryDest = MI.getOperand(1); | ||||||
4087 | MachineOperand &Src0 = MI.getOperand(2); | ||||||
4088 | MachineOperand &Src1 = MI.getOperand(3); | ||||||
4089 | MachineOperand &Src2 = MI.getOperand(4); | ||||||
4090 | unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) | ||||||
4091 | ? AMDGPU::S_ADDC_U32 | ||||||
4092 | : AMDGPU::S_SUBB_U32; | ||||||
4093 | if (Src0.isReg() && TRI->isVectorRegister(MRI, Src0.getReg())) { | ||||||
4094 | Register RegOp0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); | ||||||
4095 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp0) | ||||||
4096 | .addReg(Src0.getReg()); | ||||||
4097 | Src0.setReg(RegOp0); | ||||||
4098 | } | ||||||
4099 | if (Src1.isReg() && TRI->isVectorRegister(MRI, Src1.getReg())) { | ||||||
4100 | Register RegOp1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); | ||||||
4101 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp1) | ||||||
4102 | .addReg(Src1.getReg()); | ||||||
4103 | Src1.setReg(RegOp1); | ||||||
4104 | } | ||||||
4105 | Register RegOp2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); | ||||||
4106 | if (TRI->isVectorRegister(MRI, Src2.getReg())) { | ||||||
4107 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp2) | ||||||
4108 | .addReg(Src2.getReg()); | ||||||
4109 | Src2.setReg(RegOp2); | ||||||
4110 | } | ||||||
4111 | |||||||
4112 | const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg()); | ||||||
4113 | if (TRI->getRegSizeInBits(*Src2RC) == 64) { | ||||||
4114 | if (ST.hasScalarCompareEq64()) { | ||||||
4115 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U64)) | ||||||
4116 | .addReg(Src2.getReg()) | ||||||
4117 | .addImm(0); | ||||||
4118 | } else { | ||||||
4119 | const TargetRegisterClass *SubRC = | ||||||
4120 | TRI->getSubRegClass(Src2RC, AMDGPU::sub0); | ||||||
4121 | MachineOperand Src2Sub0 = TII->buildExtractSubRegOrImm( | ||||||
4122 | MII, MRI, Src2, Src2RC, AMDGPU::sub0, SubRC); | ||||||
4123 | MachineOperand Src2Sub1 = TII->buildExtractSubRegOrImm( | ||||||
4124 | MII, MRI, Src2, Src2RC, AMDGPU::sub1, SubRC); | ||||||
4125 | Register Src2_32 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); | ||||||
4126 | |||||||
4127 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_OR_B32), Src2_32) | ||||||
4128 | .add(Src2Sub0) | ||||||
4129 | .add(Src2Sub1); | ||||||
4130 | |||||||
4131 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32)) | ||||||
4132 | .addReg(Src2_32, RegState::Kill) | ||||||
4133 | .addImm(0); | ||||||
4134 | } | ||||||
4135 | } else { | ||||||
4136 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMPK_LG_U32)) | ||||||
4137 | .addReg(Src2.getReg()) | ||||||
4138 | .addImm(0); | ||||||
4139 | } | ||||||
4140 | |||||||
4141 | BuildMI(*BB, MII, DL, TII->get(Opc), Dest.getReg()).add(Src0).add(Src1); | ||||||
4142 | |||||||
4143 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::COPY), CarryDest.getReg()) | ||||||
4144 | .addReg(AMDGPU::SCC); | ||||||
4145 | MI.eraseFromParent(); | ||||||
4146 | return BB; | ||||||
4147 | } | ||||||
4148 | case AMDGPU::SI_INIT_M0: { | ||||||
4149 | BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), | ||||||
4150 | TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) | ||||||
4151 | .add(MI.getOperand(0)); | ||||||
4152 | MI.eraseFromParent(); | ||||||
4153 | return BB; | ||||||
4154 | } | ||||||
4155 | case AMDGPU::GET_GROUPSTATICSIZE: { | ||||||
4156 | assert(getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA ||(static_cast <bool> (getTargetMachine().getTargetTriple ().getOS() == Triple::AMDHSA || getTargetMachine().getTargetTriple ().getOS() == Triple::AMDPAL) ? void (0) : __assert_fail ("getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA || getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4157, __extension__ __PRETTY_FUNCTION__)) | ||||||
4157 | getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL)(static_cast <bool> (getTargetMachine().getTargetTriple ().getOS() == Triple::AMDHSA || getTargetMachine().getTargetTriple ().getOS() == Triple::AMDPAL) ? void (0) : __assert_fail ("getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA || getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4157, __extension__ __PRETTY_FUNCTION__)); | ||||||
4158 | DebugLoc DL = MI.getDebugLoc(); | ||||||
4159 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) | ||||||
4160 | .add(MI.getOperand(0)) | ||||||
4161 | .addImm(MFI->getLDSSize()); | ||||||
4162 | MI.eraseFromParent(); | ||||||
4163 | return BB; | ||||||
4164 | } | ||||||
4165 | case AMDGPU::SI_INDIRECT_SRC_V1: | ||||||
4166 | case AMDGPU::SI_INDIRECT_SRC_V2: | ||||||
4167 | case AMDGPU::SI_INDIRECT_SRC_V4: | ||||||
4168 | case AMDGPU::SI_INDIRECT_SRC_V8: | ||||||
4169 | case AMDGPU::SI_INDIRECT_SRC_V16: | ||||||
4170 | case AMDGPU::SI_INDIRECT_SRC_V32: | ||||||
4171 | return emitIndirectSrc(MI, *BB, *getSubtarget()); | ||||||
4172 | case AMDGPU::SI_INDIRECT_DST_V1: | ||||||
4173 | case AMDGPU::SI_INDIRECT_DST_V2: | ||||||
4174 | case AMDGPU::SI_INDIRECT_DST_V4: | ||||||
4175 | case AMDGPU::SI_INDIRECT_DST_V8: | ||||||
4176 | case AMDGPU::SI_INDIRECT_DST_V16: | ||||||
4177 | case AMDGPU::SI_INDIRECT_DST_V32: | ||||||
4178 | return emitIndirectDst(MI, *BB, *getSubtarget()); | ||||||
4179 | case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: | ||||||
4180 | case AMDGPU::SI_KILL_I1_PSEUDO: | ||||||
4181 | return splitKillBlock(MI, BB); | ||||||
4182 | case AMDGPU::V_CNDMASK_B64_PSEUDO: { | ||||||
4183 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); | ||||||
4184 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | ||||||
4185 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | ||||||
4186 | |||||||
4187 | Register Dst = MI.getOperand(0).getReg(); | ||||||
4188 | Register Src0 = MI.getOperand(1).getReg(); | ||||||
4189 | Register Src1 = MI.getOperand(2).getReg(); | ||||||
4190 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
4191 | Register SrcCond = MI.getOperand(3).getReg(); | ||||||
4192 | |||||||
4193 | Register DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | ||||||
4194 | Register DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | ||||||
4195 | const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); | ||||||
4196 | Register SrcCondCopy = MRI.createVirtualRegister(CondRC); | ||||||
4197 | |||||||
4198 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy) | ||||||
4199 | .addReg(SrcCond); | ||||||
4200 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) | ||||||
4201 | .addImm(0) | ||||||
4202 | .addReg(Src0, 0, AMDGPU::sub0) | ||||||
4203 | .addImm(0) | ||||||
4204 | .addReg(Src1, 0, AMDGPU::sub0) | ||||||
4205 | .addReg(SrcCondCopy); | ||||||
4206 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) | ||||||
4207 | .addImm(0) | ||||||
4208 | .addReg(Src0, 0, AMDGPU::sub1) | ||||||
4209 | .addImm(0) | ||||||
4210 | .addReg(Src1, 0, AMDGPU::sub1) | ||||||
4211 | .addReg(SrcCondCopy); | ||||||
4212 | |||||||
4213 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) | ||||||
4214 | .addReg(DstLo) | ||||||
4215 | .addImm(AMDGPU::sub0) | ||||||
4216 | .addReg(DstHi) | ||||||
4217 | .addImm(AMDGPU::sub1); | ||||||
4218 | MI.eraseFromParent(); | ||||||
4219 | return BB; | ||||||
4220 | } | ||||||
4221 | case AMDGPU::SI_BR_UNDEF: { | ||||||
4222 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | ||||||
4223 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
4224 | MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) | ||||||
4225 | .add(MI.getOperand(0)); | ||||||
4226 | Br->getOperand(1).setIsUndef(true); // read undef SCC | ||||||
4227 | MI.eraseFromParent(); | ||||||
4228 | return BB; | ||||||
4229 | } | ||||||
4230 | case AMDGPU::ADJCALLSTACKUP: | ||||||
4231 | case AMDGPU::ADJCALLSTACKDOWN: { | ||||||
4232 | const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); | ||||||
4233 | MachineInstrBuilder MIB(*MF, &MI); | ||||||
4234 | MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine) | ||||||
4235 | .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit); | ||||||
4236 | return BB; | ||||||
4237 | } | ||||||
4238 | case AMDGPU::SI_CALL_ISEL: { | ||||||
4239 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | ||||||
4240 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
4241 | |||||||
4242 | unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF); | ||||||
4243 | |||||||
4244 | MachineInstrBuilder MIB; | ||||||
4245 | MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg); | ||||||
4246 | |||||||
4247 | for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) | ||||||
4248 | MIB.add(MI.getOperand(I)); | ||||||
4249 | |||||||
4250 | MIB.cloneMemRefs(MI); | ||||||
4251 | MI.eraseFromParent(); | ||||||
4252 | return BB; | ||||||
4253 | } | ||||||
4254 | case AMDGPU::V_ADD_CO_U32_e32: | ||||||
4255 | case AMDGPU::V_SUB_CO_U32_e32: | ||||||
4256 | case AMDGPU::V_SUBREV_CO_U32_e32: { | ||||||
4257 | // TODO: Define distinct V_*_I32_Pseudo instructions instead. | ||||||
4258 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
4259 | unsigned Opc = MI.getOpcode(); | ||||||
4260 | |||||||
4261 | bool NeedClampOperand = false; | ||||||
4262 | if (TII->pseudoToMCOpcode(Opc) == -1) { | ||||||
4263 | Opc = AMDGPU::getVOPe64(Opc); | ||||||
4264 | NeedClampOperand = true; | ||||||
4265 | } | ||||||
4266 | |||||||
4267 | auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg()); | ||||||
4268 | if (TII->isVOP3(*I)) { | ||||||
4269 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | ||||||
4270 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | ||||||
4271 | I.addReg(TRI->getVCC(), RegState::Define); | ||||||
4272 | } | ||||||
4273 | I.add(MI.getOperand(1)) | ||||||
4274 | .add(MI.getOperand(2)); | ||||||
4275 | if (NeedClampOperand) | ||||||
4276 | I.addImm(0); // clamp bit for e64 encoding | ||||||
4277 | |||||||
4278 | TII->legalizeOperands(*I); | ||||||
4279 | |||||||
4280 | MI.eraseFromParent(); | ||||||
4281 | return BB; | ||||||
4282 | } | ||||||
4283 | case AMDGPU::V_ADDC_U32_e32: | ||||||
4284 | case AMDGPU::V_SUBB_U32_e32: | ||||||
4285 | case AMDGPU::V_SUBBREV_U32_e32: | ||||||
4286 | // These instructions have an implicit use of vcc which counts towards the | ||||||
4287 | // constant bus limit. | ||||||
4288 | TII->legalizeOperands(MI); | ||||||
4289 | return BB; | ||||||
4290 | case AMDGPU::DS_GWS_INIT: | ||||||
4291 | case AMDGPU::DS_GWS_SEMA_BR: | ||||||
4292 | case AMDGPU::DS_GWS_BARRIER: | ||||||
4293 | if (Subtarget->needsAlignedVGPRs()) { | ||||||
4294 | // Add implicit aligned super-reg to force alignment on the data operand. | ||||||
4295 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
4296 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); | ||||||
4297 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); | ||||||
4298 | MachineOperand *Op = TII->getNamedOperand(MI, AMDGPU::OpName::data0); | ||||||
4299 | Register DataReg = Op->getReg(); | ||||||
4300 | bool IsAGPR = TRI->isAGPR(MRI, DataReg); | ||||||
4301 | Register Undef = MRI.createVirtualRegister( | ||||||
4302 | IsAGPR ? &AMDGPU::AGPR_32RegClass : &AMDGPU::VGPR_32RegClass); | ||||||
4303 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::IMPLICIT_DEF), Undef); | ||||||
4304 | Register NewVR = | ||||||
4305 | MRI.createVirtualRegister(IsAGPR ? &AMDGPU::AReg_64_Align2RegClass | ||||||
4306 | : &AMDGPU::VReg_64_Align2RegClass); | ||||||
4307 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), NewVR) | ||||||
4308 | .addReg(DataReg, 0, Op->getSubReg()) | ||||||
4309 | .addImm(AMDGPU::sub0) | ||||||
4310 | .addReg(Undef) | ||||||
4311 | .addImm(AMDGPU::sub1); | ||||||
4312 | Op->setReg(NewVR); | ||||||
4313 | Op->setSubReg(AMDGPU::sub0); | ||||||
4314 | MI.addOperand(MachineOperand::CreateReg(NewVR, false, true)); | ||||||
4315 | } | ||||||
4316 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||||
4317 | case AMDGPU::DS_GWS_SEMA_V: | ||||||
4318 | case AMDGPU::DS_GWS_SEMA_P: | ||||||
4319 | case AMDGPU::DS_GWS_SEMA_RELEASE_ALL: | ||||||
4320 | // A s_waitcnt 0 is required to be the instruction immediately following. | ||||||
4321 | if (getSubtarget()->hasGWSAutoReplay()) { | ||||||
4322 | bundleInstWithWaitcnt(MI); | ||||||
4323 | return BB; | ||||||
4324 | } | ||||||
4325 | |||||||
4326 | return emitGWSMemViolTestLoop(MI, BB); | ||||||
4327 | case AMDGPU::S_SETREG_B32: { | ||||||
4328 | // Try to optimize cases that only set the denormal mode or rounding mode. | ||||||
4329 | // | ||||||
4330 | // If the s_setreg_b32 fully sets all of the bits in the rounding mode or | ||||||
4331 | // denormal mode to a constant, we can use s_round_mode or s_denorm_mode | ||||||
4332 | // instead. | ||||||
4333 | // | ||||||
4334 | // FIXME: This could be predicates on the immediate, but tablegen doesn't | ||||||
4335 | // allow you to have a no side effect instruction in the output of a | ||||||
4336 | // sideeffecting pattern. | ||||||
4337 | unsigned ID, Offset, Width; | ||||||
4338 | AMDGPU::Hwreg::decodeHwreg(MI.getOperand(1).getImm(), ID, Offset, Width); | ||||||
4339 | if (ID != AMDGPU::Hwreg::ID_MODE) | ||||||
4340 | return BB; | ||||||
4341 | |||||||
4342 | const unsigned WidthMask = maskTrailingOnes<unsigned>(Width); | ||||||
4343 | const unsigned SetMask = WidthMask << Offset; | ||||||
4344 | |||||||
4345 | if (getSubtarget()->hasDenormModeInst()) { | ||||||
4346 | unsigned SetDenormOp = 0; | ||||||
4347 | unsigned SetRoundOp = 0; | ||||||
4348 | |||||||
4349 | // The dedicated instructions can only set the whole denorm or round mode | ||||||
4350 | // at once, not a subset of bits in either. | ||||||
4351 | if (SetMask == | ||||||
4352 | (AMDGPU::Hwreg::FP_ROUND_MASK | AMDGPU::Hwreg::FP_DENORM_MASK)) { | ||||||
4353 | // If this fully sets both the round and denorm mode, emit the two | ||||||
4354 | // dedicated instructions for these. | ||||||
4355 | SetRoundOp = AMDGPU::S_ROUND_MODE; | ||||||
4356 | SetDenormOp = AMDGPU::S_DENORM_MODE; | ||||||
4357 | } else if (SetMask == AMDGPU::Hwreg::FP_ROUND_MASK) { | ||||||
4358 | SetRoundOp = AMDGPU::S_ROUND_MODE; | ||||||
4359 | } else if (SetMask == AMDGPU::Hwreg::FP_DENORM_MASK) { | ||||||
4360 | SetDenormOp = AMDGPU::S_DENORM_MODE; | ||||||
4361 | } | ||||||
4362 | |||||||
4363 | if (SetRoundOp || SetDenormOp) { | ||||||
4364 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); | ||||||
4365 | MachineInstr *Def = MRI.getVRegDef(MI.getOperand(0).getReg()); | ||||||
4366 | if (Def && Def->isMoveImmediate() && Def->getOperand(1).isImm()) { | ||||||
4367 | unsigned ImmVal = Def->getOperand(1).getImm(); | ||||||
4368 | if (SetRoundOp) { | ||||||
4369 | BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetRoundOp)) | ||||||
4370 | .addImm(ImmVal & 0xf); | ||||||
4371 | |||||||
4372 | // If we also have the denorm mode, get just the denorm mode bits. | ||||||
4373 | ImmVal >>= 4; | ||||||
4374 | } | ||||||
4375 | |||||||
4376 | if (SetDenormOp) { | ||||||
4377 | BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetDenormOp)) | ||||||
4378 | .addImm(ImmVal & 0xf); | ||||||
4379 | } | ||||||
4380 | |||||||
4381 | MI.eraseFromParent(); | ||||||
4382 | return BB; | ||||||
4383 | } | ||||||
4384 | } | ||||||
4385 | } | ||||||
4386 | |||||||
4387 | // If only FP bits are touched, used the no side effects pseudo. | ||||||
4388 | if ((SetMask & (AMDGPU::Hwreg::FP_ROUND_MASK | | ||||||
4389 | AMDGPU::Hwreg::FP_DENORM_MASK)) == SetMask) | ||||||
4390 | MI.setDesc(TII->get(AMDGPU::S_SETREG_B32_mode)); | ||||||
4391 | |||||||
4392 | return BB; | ||||||
4393 | } | ||||||
4394 | default: | ||||||
4395 | return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); | ||||||
4396 | } | ||||||
4397 | } | ||||||
4398 | |||||||
4399 | bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const { | ||||||
4400 | return isTypeLegal(VT.getScalarType()); | ||||||
4401 | } | ||||||
4402 | |||||||
4403 | bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { | ||||||
4404 | // This currently forces unfolding various combinations of fsub into fma with | ||||||
4405 | // free fneg'd operands. As long as we have fast FMA (controlled by | ||||||
4406 | // isFMAFasterThanFMulAndFAdd), we should perform these. | ||||||
4407 | |||||||
4408 | // When fma is quarter rate, for f64 where add / sub are at best half rate, | ||||||
4409 | // most of these combines appear to be cycle neutral but save on instruction | ||||||
4410 | // count / code size. | ||||||
4411 | return true; | ||||||
4412 | } | ||||||
4413 | |||||||
4414 | EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, | ||||||
4415 | EVT VT) const { | ||||||
4416 | if (!VT.isVector()) { | ||||||
4417 | return MVT::i1; | ||||||
4418 | } | ||||||
4419 | return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); | ||||||
4420 | } | ||||||
4421 | |||||||
4422 | MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const { | ||||||
4423 | // TODO: Should i16 be used always if legal? For now it would force VALU | ||||||
4424 | // shifts. | ||||||
4425 | return (VT == MVT::i16) ? MVT::i16 : MVT::i32; | ||||||
4426 | } | ||||||
4427 | |||||||
4428 | LLT SITargetLowering::getPreferredShiftAmountTy(LLT Ty) const { | ||||||
4429 | return (Ty.getScalarSizeInBits() <= 16 && Subtarget->has16BitInsts()) | ||||||
4430 | ? Ty.changeElementSize(16) | ||||||
4431 | : Ty.changeElementSize(32); | ||||||
4432 | } | ||||||
4433 | |||||||
4434 | // Answering this is somewhat tricky and depends on the specific device which | ||||||
4435 | // have different rates for fma or all f64 operations. | ||||||
4436 | // | ||||||
4437 | // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other | ||||||
4438 | // regardless of which device (although the number of cycles differs between | ||||||
4439 | // devices), so it is always profitable for f64. | ||||||
4440 | // | ||||||
4441 | // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable | ||||||
4442 | // only on full rate devices. Normally, we should prefer selecting v_mad_f32 | ||||||
4443 | // which we can always do even without fused FP ops since it returns the same | ||||||
4444 | // result as the separate operations and since it is always full | ||||||
4445 | // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 | ||||||
4446 | // however does not support denormals, so we do report fma as faster if we have | ||||||
4447 | // a fast fma device and require denormals. | ||||||
4448 | // | ||||||
4449 | bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, | ||||||
4450 | EVT VT) const { | ||||||
4451 | VT = VT.getScalarType(); | ||||||
4452 | |||||||
4453 | switch (VT.getSimpleVT().SimpleTy) { | ||||||
4454 | case MVT::f32: { | ||||||
4455 | // If mad is not available this depends only on if f32 fma is full rate. | ||||||
4456 | if (!Subtarget->hasMadMacF32Insts()) | ||||||
4457 | return Subtarget->hasFastFMAF32(); | ||||||
4458 | |||||||
4459 | // Otherwise f32 mad is always full rate and returns the same result as | ||||||
4460 | // the separate operations so should be preferred over fma. | ||||||
4461 | // However does not support denomals. | ||||||
4462 | if (hasFP32Denormals(MF)) | ||||||
4463 | return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts(); | ||||||
4464 | |||||||
4465 | // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32. | ||||||
4466 | return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts(); | ||||||
4467 | } | ||||||
4468 | case MVT::f64: | ||||||
4469 | return true; | ||||||
4470 | case MVT::f16: | ||||||
4471 | return Subtarget->has16BitInsts() && hasFP64FP16Denormals(MF); | ||||||
4472 | default: | ||||||
4473 | break; | ||||||
4474 | } | ||||||
4475 | |||||||
4476 | return false; | ||||||
4477 | } | ||||||
4478 | |||||||
4479 | bool SITargetLowering::isFMADLegal(const SelectionDAG &DAG, | ||||||
4480 | const SDNode *N) const { | ||||||
4481 | // TODO: Check future ftz flag | ||||||
4482 | // v_mad_f32/v_mac_f32 do not support denormals. | ||||||
4483 | EVT VT = N->getValueType(0); | ||||||
4484 | if (VT == MVT::f32) | ||||||
4485 | return Subtarget->hasMadMacF32Insts() && | ||||||
4486 | !hasFP32Denormals(DAG.getMachineFunction()); | ||||||
4487 | if (VT == MVT::f16) { | ||||||
4488 | return Subtarget->hasMadF16() && | ||||||
4489 | !hasFP64FP16Denormals(DAG.getMachineFunction()); | ||||||
4490 | } | ||||||
4491 | |||||||
4492 | return false; | ||||||
4493 | } | ||||||
4494 | |||||||
4495 | //===----------------------------------------------------------------------===// | ||||||
4496 | // Custom DAG Lowering Operations | ||||||
4497 | //===----------------------------------------------------------------------===// | ||||||
4498 | |||||||
4499 | // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the | ||||||
4500 | // wider vector type is legal. | ||||||
4501 | SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op, | ||||||
4502 | SelectionDAG &DAG) const { | ||||||
4503 | unsigned Opc = Op.getOpcode(); | ||||||
4504 | EVT VT = Op.getValueType(); | ||||||
4505 | assert(VT == MVT::v4f16 || VT == MVT::v4i16)(static_cast <bool> (VT == MVT::v4f16 || VT == MVT::v4i16 ) ? void (0) : __assert_fail ("VT == MVT::v4f16 || VT == MVT::v4i16" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4505, __extension__ __PRETTY_FUNCTION__)); | ||||||
4506 | |||||||
4507 | SDValue Lo, Hi; | ||||||
4508 | std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0); | ||||||
4509 | |||||||
4510 | SDLoc SL(Op); | ||||||
4511 | SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo, | ||||||
4512 | Op->getFlags()); | ||||||
4513 | SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi, | ||||||
4514 | Op->getFlags()); | ||||||
4515 | |||||||
4516 | return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); | ||||||
4517 | } | ||||||
4518 | |||||||
4519 | // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the | ||||||
4520 | // wider vector type is legal. | ||||||
4521 | SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op, | ||||||
4522 | SelectionDAG &DAG) const { | ||||||
4523 | unsigned Opc = Op.getOpcode(); | ||||||
4524 | EVT VT = Op.getValueType(); | ||||||
4525 | assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 ||(static_cast <bool> (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32) ? void (0) : __assert_fail ("VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4526, __extension__ __PRETTY_FUNCTION__)) | ||||||
4526 | VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32)(static_cast <bool> (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32) ? void (0) : __assert_fail ("VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4526, __extension__ __PRETTY_FUNCTION__)); | ||||||
4527 | |||||||
4528 | SDValue Lo0, Hi0; | ||||||
4529 | std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0); | ||||||
4530 | SDValue Lo1, Hi1; | ||||||
4531 | std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1); | ||||||
4532 | |||||||
4533 | SDLoc SL(Op); | ||||||
4534 | |||||||
4535 | SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, | ||||||
4536 | Op->getFlags()); | ||||||
4537 | SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, | ||||||
4538 | Op->getFlags()); | ||||||
4539 | |||||||
4540 | return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); | ||||||
4541 | } | ||||||
4542 | |||||||
4543 | SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op, | ||||||
4544 | SelectionDAG &DAG) const { | ||||||
4545 | unsigned Opc = Op.getOpcode(); | ||||||
4546 | EVT VT = Op.getValueType(); | ||||||
4547 | assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 ||(static_cast <bool> (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32) ? void (0) : __assert_fail ("VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4548, __extension__ __PRETTY_FUNCTION__)) | ||||||
4548 | VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32)(static_cast <bool> (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32) ? void (0) : __assert_fail ("VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4548, __extension__ __PRETTY_FUNCTION__)); | ||||||
4549 | |||||||
4550 | SDValue Lo0, Hi0; | ||||||
4551 | std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0); | ||||||
4552 | SDValue Lo1, Hi1; | ||||||
4553 | std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1); | ||||||
4554 | SDValue Lo2, Hi2; | ||||||
4555 | std::tie(Lo2, Hi2) = DAG.SplitVectorOperand(Op.getNode(), 2); | ||||||
4556 | |||||||
4557 | SDLoc SL(Op); | ||||||
4558 | |||||||
4559 | SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, Lo2, | ||||||
4560 | Op->getFlags()); | ||||||
4561 | SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, Hi2, | ||||||
4562 | Op->getFlags()); | ||||||
4563 | |||||||
4564 | return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); | ||||||
4565 | } | ||||||
4566 | |||||||
4567 | |||||||
4568 | SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { | ||||||
4569 | switch (Op.getOpcode()) { | ||||||
4570 | default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); | ||||||
4571 | case ISD::BRCOND: return LowerBRCOND(Op, DAG); | ||||||
4572 | case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); | ||||||
4573 | case ISD::LOAD: { | ||||||
4574 | SDValue Result = LowerLOAD(Op, DAG); | ||||||
4575 | assert((!Result.getNode() ||(static_cast <bool> ((!Result.getNode() || Result.getNode ()->getNumValues() == 2) && "Load should return a value and a chain" ) ? void (0) : __assert_fail ("(!Result.getNode() || Result.getNode()->getNumValues() == 2) && \"Load should return a value and a chain\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4577, __extension__ __PRETTY_FUNCTION__)) | ||||||
4576 | Result.getNode()->getNumValues() == 2) &&(static_cast <bool> ((!Result.getNode() || Result.getNode ()->getNumValues() == 2) && "Load should return a value and a chain" ) ? void (0) : __assert_fail ("(!Result.getNode() || Result.getNode()->getNumValues() == 2) && \"Load should return a value and a chain\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4577, __extension__ __PRETTY_FUNCTION__)) | ||||||
4577 | "Load should return a value and a chain")(static_cast <bool> ((!Result.getNode() || Result.getNode ()->getNumValues() == 2) && "Load should return a value and a chain" ) ? void (0) : __assert_fail ("(!Result.getNode() || Result.getNode()->getNumValues() == 2) && \"Load should return a value and a chain\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 4577, __extension__ __PRETTY_FUNCTION__)); | ||||||
4578 | return Result; | ||||||
4579 | } | ||||||
4580 | |||||||
4581 | case ISD::FSIN: | ||||||
4582 | case ISD::FCOS: | ||||||
4583 | return LowerTrig(Op, DAG); | ||||||
4584 | case ISD::SELECT: return LowerSELECT(Op, DAG); | ||||||
4585 | case ISD::FDIV: return LowerFDIV(Op, DAG); | ||||||
4586 | case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); | ||||||
4587 | case ISD::STORE: return LowerSTORE(Op, DAG); | ||||||
4588 | case ISD::GlobalAddress: { | ||||||
4589 | MachineFunction &MF = DAG.getMachineFunction(); | ||||||
4590 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
4591 | return LowerGlobalAddress(MFI, Op, DAG); | ||||||
4592 | } | ||||||
4593 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); | ||||||
4594 | case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); | ||||||
4595 | case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); | ||||||
4596 | case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); | ||||||
4597 | case ISD::INSERT_SUBVECTOR: | ||||||
4598 | return lowerINSERT_SUBVECTOR(Op, DAG); | ||||||
4599 | case ISD::INSERT_VECTOR_ELT: | ||||||
4600 | return lowerINSERT_VECTOR_ELT(Op, DAG); | ||||||
4601 | case ISD::EXTRACT_VECTOR_ELT: | ||||||
4602 | return lowerEXTRACT_VECTOR_ELT(Op, DAG); | ||||||
4603 | case ISD::VECTOR_SHUFFLE: | ||||||
4604 | return lowerVECTOR_SHUFFLE(Op, DAG); | ||||||
4605 | case ISD::BUILD_VECTOR: | ||||||
4606 | return lowerBUILD_VECTOR(Op, DAG); | ||||||
4607 | case ISD::FP_ROUND: | ||||||
4608 | return lowerFP_ROUND(Op, DAG); | ||||||
4609 | case ISD::TRAP: | ||||||
4610 | return lowerTRAP(Op, DAG); | ||||||
4611 | case ISD::DEBUGTRAP: | ||||||
4612 | return lowerDEBUGTRAP(Op, DAG); | ||||||
4613 | case ISD::FABS: | ||||||
4614 | case ISD::FNEG: | ||||||
4615 | case ISD::FCANONICALIZE: | ||||||
4616 | case ISD::BSWAP: | ||||||
4617 | return splitUnaryVectorOp(Op, DAG); | ||||||
4618 | case ISD::FMINNUM: | ||||||
4619 | case ISD::FMAXNUM: | ||||||
4620 | return lowerFMINNUM_FMAXNUM(Op, DAG); | ||||||
4621 | case ISD::FMA: | ||||||
4622 | return splitTernaryVectorOp(Op, DAG); | ||||||
4623 | case ISD::FP_TO_SINT: | ||||||
4624 | case ISD::FP_TO_UINT: | ||||||
4625 | return LowerFP_TO_INT(Op, DAG); | ||||||
4626 | case ISD::SHL: | ||||||
4627 | case ISD::SRA: | ||||||
4628 | case ISD::SRL: | ||||||
4629 | case ISD::ADD: | ||||||
4630 | case ISD::SUB: | ||||||
4631 | case ISD::MUL: | ||||||
4632 | case ISD::SMIN: | ||||||
4633 | case ISD::SMAX: | ||||||
4634 | case ISD::UMIN: | ||||||
4635 | case ISD::UMAX: | ||||||
4636 | case ISD::FADD: | ||||||
4637 | case ISD::FMUL: | ||||||
4638 | case ISD::FMINNUM_IEEE: | ||||||
4639 | case ISD::FMAXNUM_IEEE: | ||||||
4640 | case ISD::UADDSAT: | ||||||
4641 | case ISD::USUBSAT: | ||||||
4642 | case ISD::SADDSAT: | ||||||
4643 | case ISD::SSUBSAT: | ||||||
4644 | return splitBinaryVectorOp(Op, DAG); | ||||||
4645 | case ISD::SMULO: | ||||||
4646 | case ISD::UMULO: | ||||||
4647 | return lowerXMULO(Op, DAG); | ||||||
4648 | case ISD::DYNAMIC_STACKALLOC: | ||||||
4649 | return LowerDYNAMIC_STACKALLOC(Op, DAG); | ||||||
4650 | } | ||||||
4651 | return SDValue(); | ||||||
4652 | } | ||||||
4653 | |||||||
4654 | // Used for D16: Casts the result of an instruction into the right vector, | ||||||
4655 | // packs values if loads return unpacked values. | ||||||
4656 | static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT, | ||||||
4657 | const SDLoc &DL, | ||||||
4658 | SelectionDAG &DAG, bool Unpacked) { | ||||||
4659 | if (!LoadVT.isVector()) | ||||||
4660 | return Result; | ||||||
4661 | |||||||
4662 | // Cast back to the original packed type or to a larger type that is a | ||||||
4663 | // multiple of 32 bit for D16. Widening the return type is a required for | ||||||
4664 | // legalization. | ||||||
4665 | EVT FittingLoadVT = LoadVT; | ||||||
4666 | if ((LoadVT.getVectorNumElements() % 2) == 1) { | ||||||
4667 | FittingLoadVT = | ||||||
4668 | EVT::getVectorVT(*DAG.getContext(), LoadVT.getVectorElementType(), | ||||||
4669 | LoadVT.getVectorNumElements() + 1); | ||||||
4670 | } | ||||||
4671 | |||||||
4672 | if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16. | ||||||
4673 | // Truncate to v2i16/v4i16. | ||||||
4674 | EVT IntLoadVT = FittingLoadVT.changeTypeToInteger(); | ||||||
4675 | |||||||
4676 | // Workaround legalizer not scalarizing truncate after vector op | ||||||
4677 | // legalization but not creating intermediate vector trunc. | ||||||
4678 | SmallVector<SDValue, 4> Elts; | ||||||
4679 | DAG.ExtractVectorElements(Result, Elts); | ||||||
4680 | for (SDValue &Elt : Elts) | ||||||
4681 | Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt); | ||||||
4682 | |||||||
4683 | // Pad illegal v1i16/v3fi6 to v4i16 | ||||||
4684 | if ((LoadVT.getVectorNumElements() % 2) == 1) | ||||||
4685 | Elts.push_back(DAG.getUNDEF(MVT::i16)); | ||||||
4686 | |||||||
4687 | Result = DAG.getBuildVector(IntLoadVT, DL, Elts); | ||||||
4688 | |||||||
4689 | // Bitcast to original type (v2f16/v4f16). | ||||||
4690 | return DAG.getNode(ISD::BITCAST, DL, FittingLoadVT, Result); | ||||||
4691 | } | ||||||
4692 | |||||||
4693 | // Cast back to the original packed type. | ||||||
4694 | return DAG.getNode(ISD::BITCAST, DL, FittingLoadVT, Result); | ||||||
4695 | } | ||||||
4696 | |||||||
4697 | SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode, | ||||||
4698 | MemSDNode *M, | ||||||
4699 | SelectionDAG &DAG, | ||||||
4700 | ArrayRef<SDValue> Ops, | ||||||
4701 | bool IsIntrinsic) const { | ||||||
4702 | SDLoc DL(M); | ||||||
4703 | |||||||
4704 | bool Unpacked = Subtarget->hasUnpackedD16VMem(); | ||||||
4705 | EVT LoadVT = M->getValueType(0); | ||||||
4706 | |||||||
4707 | EVT EquivLoadVT = LoadVT; | ||||||
4708 | if (LoadVT.isVector()) { | ||||||
4709 | if (Unpacked) { | ||||||
4710 | EquivLoadVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, | ||||||
4711 | LoadVT.getVectorNumElements()); | ||||||
4712 | } else if ((LoadVT.getVectorNumElements() % 2) == 1) { | ||||||
4713 | // Widen v3f16 to legal type | ||||||
4714 | EquivLoadVT = | ||||||
4715 | EVT::getVectorVT(*DAG.getContext(), LoadVT.getVectorElementType(), | ||||||
4716 | LoadVT.getVectorNumElements() + 1); | ||||||
4717 | } | ||||||
4718 | } | ||||||
4719 | |||||||
4720 | // Change from v4f16/v2f16 to EquivLoadVT. | ||||||
4721 | SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other); | ||||||
4722 | |||||||
4723 | SDValue Load | ||||||
4724 | = DAG.getMemIntrinsicNode( | ||||||
4725 | IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL, | ||||||
4726 | VTList, Ops, M->getMemoryVT(), | ||||||
4727 | M->getMemOperand()); | ||||||
4728 | |||||||
4729 | SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked); | ||||||
4730 | |||||||
4731 | return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL); | ||||||
4732 | } | ||||||
4733 | |||||||
4734 | SDValue SITargetLowering::lowerIntrinsicLoad(MemSDNode *M, bool IsFormat, | ||||||
4735 | SelectionDAG &DAG, | ||||||
4736 | ArrayRef<SDValue> Ops) const { | ||||||
4737 | SDLoc DL(M); | ||||||
4738 | EVT LoadVT = M->getValueType(0); | ||||||
4739 | EVT EltType = LoadVT.getScalarType(); | ||||||
4740 | EVT IntVT = LoadVT.changeTypeToInteger(); | ||||||
4741 | |||||||
4742 | bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); | ||||||
4743 | |||||||
4744 | unsigned Opc = | ||||||
4745 | IsFormat ? AMDGPUISD::BUFFER_LOAD_FORMAT : AMDGPUISD::BUFFER_LOAD; | ||||||
4746 | |||||||
4747 | if (IsD16) { | ||||||
4748 | return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG, Ops); | ||||||
4749 | } | ||||||
4750 | |||||||
4751 | // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics | ||||||
4752 | if (!IsD16 && !LoadVT.isVector() && EltType.getSizeInBits() < 32) | ||||||
4753 | return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); | ||||||
4754 | |||||||
4755 | if (isTypeLegal(LoadVT)) { | ||||||
4756 | return getMemIntrinsicNode(Opc, DL, M->getVTList(), Ops, IntVT, | ||||||
4757 | M->getMemOperand(), DAG); | ||||||
4758 | } | ||||||
4759 | |||||||
4760 | EVT CastVT = getEquivalentMemType(*DAG.getContext(), LoadVT); | ||||||
4761 | SDVTList VTList = DAG.getVTList(CastVT, MVT::Other); | ||||||
4762 | SDValue MemNode = getMemIntrinsicNode(Opc, DL, VTList, Ops, CastVT, | ||||||
4763 | M->getMemOperand(), DAG); | ||||||
4764 | return DAG.getMergeValues( | ||||||
4765 | {DAG.getNode(ISD::BITCAST, DL, LoadVT, MemNode), MemNode.getValue(1)}, | ||||||
4766 | DL); | ||||||
4767 | } | ||||||
4768 | |||||||
4769 | static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI, | ||||||
4770 | SDNode *N, SelectionDAG &DAG) { | ||||||
4771 | EVT VT = N->getValueType(0); | ||||||
4772 | const auto *CD = cast<ConstantSDNode>(N->getOperand(3)); | ||||||
4773 | unsigned CondCode = CD->getZExtValue(); | ||||||
4774 | if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(CondCode))) | ||||||
4775 | return DAG.getUNDEF(VT); | ||||||
4776 | |||||||
4777 | ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); | ||||||
4778 | |||||||
4779 | SDValue LHS = N->getOperand(1); | ||||||
4780 | SDValue RHS = N->getOperand(2); | ||||||
4781 | |||||||
4782 | SDLoc DL(N); | ||||||
4783 | |||||||
4784 | EVT CmpVT = LHS.getValueType(); | ||||||
4785 | if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) { | ||||||
4786 | unsigned PromoteOp = ICmpInst::isSigned(IcInput) ? | ||||||
4787 | ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; | ||||||
4788 | LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS); | ||||||
4789 | RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS); | ||||||
4790 | } | ||||||
4791 | |||||||
4792 | ISD::CondCode CCOpcode = getICmpCondCode(IcInput); | ||||||
4793 | |||||||
4794 | unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize(); | ||||||
4795 | EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize); | ||||||
4796 | |||||||
4797 | SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, DL, CCVT, LHS, RHS, | ||||||
4798 | DAG.getCondCode(CCOpcode)); | ||||||
4799 | if (VT.bitsEq(CCVT)) | ||||||
4800 | return SetCC; | ||||||
4801 | return DAG.getZExtOrTrunc(SetCC, DL, VT); | ||||||
4802 | } | ||||||
4803 | |||||||
4804 | static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI, | ||||||
4805 | SDNode *N, SelectionDAG &DAG) { | ||||||
4806 | EVT VT = N->getValueType(0); | ||||||
4807 | const auto *CD = cast<ConstantSDNode>(N->getOperand(3)); | ||||||
4808 | |||||||
4809 | unsigned CondCode = CD->getZExtValue(); | ||||||
4810 | if (!FCmpInst::isFPPredicate(static_cast<FCmpInst::Predicate>(CondCode))) | ||||||
4811 | return DAG.getUNDEF(VT); | ||||||
4812 | |||||||
4813 | SDValue Src0 = N->getOperand(1); | ||||||
4814 | SDValue Src1 = N->getOperand(2); | ||||||
4815 | EVT CmpVT = Src0.getValueType(); | ||||||
4816 | SDLoc SL(N); | ||||||
4817 | |||||||
4818 | if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) { | ||||||
4819 | Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); | ||||||
4820 | Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); | ||||||
4821 | } | ||||||
4822 | |||||||
4823 | FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); | ||||||
4824 | ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); | ||||||
4825 | unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize(); | ||||||
4826 | EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize); | ||||||
4827 | SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, SL, CCVT, Src0, | ||||||
4828 | Src1, DAG.getCondCode(CCOpcode)); | ||||||
4829 | if (VT.bitsEq(CCVT)) | ||||||
4830 | return SetCC; | ||||||
4831 | return DAG.getZExtOrTrunc(SetCC, SL, VT); | ||||||
4832 | } | ||||||
4833 | |||||||
4834 | static SDValue lowerBALLOTIntrinsic(const SITargetLowering &TLI, SDNode *N, | ||||||
4835 | SelectionDAG &DAG) { | ||||||
4836 | EVT VT = N->getValueType(0); | ||||||
4837 | SDValue Src = N->getOperand(1); | ||||||
4838 | SDLoc SL(N); | ||||||
4839 | |||||||
4840 | if (Src.getOpcode() == ISD::SETCC) { | ||||||
4841 | // (ballot (ISD::SETCC ...)) -> (AMDGPUISD::SETCC ...) | ||||||
4842 | return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src.getOperand(0), | ||||||
4843 | Src.getOperand(1), Src.getOperand(2)); | ||||||
4844 | } | ||||||
4845 | if (const ConstantSDNode *Arg = dyn_cast<ConstantSDNode>(Src)) { | ||||||
4846 | // (ballot 0) -> 0 | ||||||
4847 | if (Arg->isNullValue()) | ||||||
4848 | return DAG.getConstant(0, SL, VT); | ||||||
4849 | |||||||
4850 | // (ballot 1) -> EXEC/EXEC_LO | ||||||
4851 | if (Arg->isOne()) { | ||||||
4852 | Register Exec; | ||||||
4853 | if (VT.getScalarSizeInBits() == 32) | ||||||
4854 | Exec = AMDGPU::EXEC_LO; | ||||||
4855 | else if (VT.getScalarSizeInBits() == 64) | ||||||
4856 | Exec = AMDGPU::EXEC; | ||||||
4857 | else | ||||||
4858 | return SDValue(); | ||||||
4859 | |||||||
4860 | return DAG.getCopyFromReg(DAG.getEntryNode(), SL, Exec, VT); | ||||||
4861 | } | ||||||
4862 | } | ||||||
4863 | |||||||
4864 | // (ballot (i1 $src)) -> (AMDGPUISD::SETCC (i32 (zext $src)) (i32 0) | ||||||
4865 | // ISD::SETNE) | ||||||
4866 | return DAG.getNode( | ||||||
4867 | AMDGPUISD::SETCC, SL, VT, DAG.getZExtOrTrunc(Src, SL, MVT::i32), | ||||||
4868 | DAG.getConstant(0, SL, MVT::i32), DAG.getCondCode(ISD::SETNE)); | ||||||
4869 | } | ||||||
4870 | |||||||
4871 | void SITargetLowering::ReplaceNodeResults(SDNode *N, | ||||||
4872 | SmallVectorImpl<SDValue> &Results, | ||||||
4873 | SelectionDAG &DAG) const { | ||||||
4874 | switch (N->getOpcode()) { | ||||||
4875 | case ISD::INSERT_VECTOR_ELT: { | ||||||
4876 | if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG)) | ||||||
4877 | Results.push_back(Res); | ||||||
4878 | return; | ||||||
4879 | } | ||||||
4880 | case ISD::EXTRACT_VECTOR_ELT: { | ||||||
4881 | if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG)) | ||||||
4882 | Results.push_back(Res); | ||||||
4883 | return; | ||||||
4884 | } | ||||||
4885 | case ISD::INTRINSIC_WO_CHAIN: { | ||||||
4886 | unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); | ||||||
4887 | switch (IID) { | ||||||
4888 | case Intrinsic::amdgcn_cvt_pkrtz: { | ||||||
4889 | SDValue Src0 = N->getOperand(1); | ||||||
4890 | SDValue Src1 = N->getOperand(2); | ||||||
4891 | SDLoc SL(N); | ||||||
4892 | SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32, | ||||||
4893 | Src0, Src1); | ||||||
4894 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt)); | ||||||
4895 | return; | ||||||
4896 | } | ||||||
4897 | case Intrinsic::amdgcn_cvt_pknorm_i16: | ||||||
4898 | case Intrinsic::amdgcn_cvt_pknorm_u16: | ||||||
4899 | case Intrinsic::amdgcn_cvt_pk_i16: | ||||||
4900 | case Intrinsic::amdgcn_cvt_pk_u16: { | ||||||
4901 | SDValue Src0 = N->getOperand(1); | ||||||
4902 | SDValue Src1 = N->getOperand(2); | ||||||
4903 | SDLoc SL(N); | ||||||
4904 | unsigned Opcode; | ||||||
4905 | |||||||
4906 | if (IID == Intrinsic::amdgcn_cvt_pknorm_i16) | ||||||
4907 | Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; | ||||||
4908 | else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16) | ||||||
4909 | Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; | ||||||
4910 | else if (IID == Intrinsic::amdgcn_cvt_pk_i16) | ||||||
4911 | Opcode = AMDGPUISD::CVT_PK_I16_I32; | ||||||
4912 | else | ||||||
4913 | Opcode = AMDGPUISD::CVT_PK_U16_U32; | ||||||
4914 | |||||||
4915 | EVT VT = N->getValueType(0); | ||||||
4916 | if (isTypeLegal(VT)) | ||||||
4917 | Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1)); | ||||||
4918 | else { | ||||||
4919 | SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1); | ||||||
4920 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt)); | ||||||
4921 | } | ||||||
4922 | return; | ||||||
4923 | } | ||||||
4924 | } | ||||||
4925 | break; | ||||||
4926 | } | ||||||
4927 | case ISD::INTRINSIC_W_CHAIN: { | ||||||
4928 | if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) { | ||||||
4929 | if (Res.getOpcode() == ISD::MERGE_VALUES) { | ||||||
4930 | // FIXME: Hacky | ||||||
4931 | for (unsigned I = 0; I < Res.getNumOperands(); I++) { | ||||||
4932 | Results.push_back(Res.getOperand(I)); | ||||||
4933 | } | ||||||
4934 | } else { | ||||||
4935 | Results.push_back(Res); | ||||||
4936 | Results.push_back(Res.getValue(1)); | ||||||
4937 | } | ||||||
4938 | return; | ||||||
4939 | } | ||||||
4940 | |||||||
4941 | break; | ||||||
4942 | } | ||||||
4943 | case ISD::SELECT: { | ||||||
4944 | SDLoc SL(N); | ||||||
4945 | EVT VT = N->getValueType(0); | ||||||
4946 | EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); | ||||||
4947 | SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1)); | ||||||
4948 | SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2)); | ||||||
4949 | |||||||
4950 | EVT SelectVT = NewVT; | ||||||
4951 | if (NewVT.bitsLT(MVT::i32)) { | ||||||
4952 | LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS); | ||||||
4953 | RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS); | ||||||
4954 | SelectVT = MVT::i32; | ||||||
4955 | } | ||||||
4956 | |||||||
4957 | SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT, | ||||||
4958 | N->getOperand(0), LHS, RHS); | ||||||
4959 | |||||||
4960 | if (NewVT != SelectVT) | ||||||
4961 | NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect); | ||||||
4962 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect)); | ||||||
4963 | return; | ||||||
4964 | } | ||||||
4965 | case ISD::FNEG: { | ||||||
4966 | if (N->getValueType(0) != MVT::v2f16) | ||||||
4967 | break; | ||||||
4968 | |||||||
4969 | SDLoc SL(N); | ||||||
4970 | SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); | ||||||
4971 | |||||||
4972 | SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32, | ||||||
4973 | BC, | ||||||
4974 | DAG.getConstant(0x80008000, SL, MVT::i32)); | ||||||
4975 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); | ||||||
4976 | return; | ||||||
4977 | } | ||||||
4978 | case ISD::FABS: { | ||||||
4979 | if (N->getValueType(0) != MVT::v2f16) | ||||||
4980 | break; | ||||||
4981 | |||||||
4982 | SDLoc SL(N); | ||||||
4983 | SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); | ||||||
4984 | |||||||
4985 | SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32, | ||||||
4986 | BC, | ||||||
4987 | DAG.getConstant(0x7fff7fff, SL, MVT::i32)); | ||||||
4988 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); | ||||||
4989 | return; | ||||||
4990 | } | ||||||
4991 | default: | ||||||
4992 | break; | ||||||
4993 | } | ||||||
4994 | } | ||||||
4995 | |||||||
4996 | /// Helper function for LowerBRCOND | ||||||
4997 | static SDNode *findUser(SDValue Value, unsigned Opcode) { | ||||||
4998 | |||||||
4999 | SDNode *Parent = Value.getNode(); | ||||||
5000 | for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); | ||||||
5001 | I != E; ++I) { | ||||||
5002 | |||||||
5003 | if (I.getUse().get() != Value) | ||||||
5004 | continue; | ||||||
5005 | |||||||
5006 | if (I->getOpcode() == Opcode) | ||||||
5007 | return *I; | ||||||
5008 | } | ||||||
5009 | return nullptr; | ||||||
5010 | } | ||||||
5011 | |||||||
5012 | unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { | ||||||
5013 | if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { | ||||||
5014 | switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { | ||||||
5015 | case Intrinsic::amdgcn_if: | ||||||
5016 | return AMDGPUISD::IF; | ||||||
5017 | case Intrinsic::amdgcn_else: | ||||||
5018 | return AMDGPUISD::ELSE; | ||||||
5019 | case Intrinsic::amdgcn_loop: | ||||||
5020 | return AMDGPUISD::LOOP; | ||||||
5021 | case Intrinsic::amdgcn_end_cf: | ||||||
5022 | llvm_unreachable("should not occur")::llvm::llvm_unreachable_internal("should not occur", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5022); | ||||||
5023 | default: | ||||||
5024 | return 0; | ||||||
5025 | } | ||||||
5026 | } | ||||||
5027 | |||||||
5028 | // break, if_break, else_break are all only used as inputs to loop, not | ||||||
5029 | // directly as branch conditions. | ||||||
5030 | return 0; | ||||||
5031 | } | ||||||
5032 | |||||||
5033 | bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { | ||||||
5034 | const Triple &TT = getTargetMachine().getTargetTriple(); | ||||||
5035 | return (GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || | ||||||
5036 | GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && | ||||||
5037 | AMDGPU::shouldEmitConstantsToTextSection(TT); | ||||||
5038 | } | ||||||
5039 | |||||||
5040 | bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { | ||||||
5041 | // FIXME: Either avoid relying on address space here or change the default | ||||||
5042 | // address space for functions to avoid the explicit check. | ||||||
5043 | return (GV->getValueType()->isFunctionTy() || | ||||||
5044 | !isNonGlobalAddrSpace(GV->getAddressSpace())) && | ||||||
5045 | !shouldEmitFixup(GV) && | ||||||
5046 | !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); | ||||||
5047 | } | ||||||
5048 | |||||||
5049 | bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { | ||||||
5050 | return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV); | ||||||
5051 | } | ||||||
5052 | |||||||
5053 | bool SITargetLowering::shouldUseLDSConstAddress(const GlobalValue *GV) const { | ||||||
5054 | if (!GV->hasExternalLinkage()) | ||||||
5055 | return true; | ||||||
5056 | |||||||
5057 | const auto OS = getTargetMachine().getTargetTriple().getOS(); | ||||||
5058 | return OS == Triple::AMDHSA || OS == Triple::AMDPAL; | ||||||
5059 | } | ||||||
5060 | |||||||
5061 | /// This transforms the control flow intrinsics to get the branch destination as | ||||||
5062 | /// last parameter, also switches branch target with BR if the need arise | ||||||
5063 | SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, | ||||||
5064 | SelectionDAG &DAG) const { | ||||||
5065 | SDLoc DL(BRCOND); | ||||||
5066 | |||||||
5067 | SDNode *Intr = BRCOND.getOperand(1).getNode(); | ||||||
5068 | SDValue Target = BRCOND.getOperand(2); | ||||||
5069 | SDNode *BR = nullptr; | ||||||
5070 | SDNode *SetCC = nullptr; | ||||||
5071 | |||||||
5072 | if (Intr->getOpcode() == ISD::SETCC) { | ||||||
5073 | // As long as we negate the condition everything is fine | ||||||
5074 | SetCC = Intr; | ||||||
5075 | Intr = SetCC->getOperand(0).getNode(); | ||||||
5076 | |||||||
5077 | } else { | ||||||
5078 | // Get the target from BR if we don't negate the condition | ||||||
5079 | BR = findUser(BRCOND, ISD::BR); | ||||||
5080 | assert(BR && "brcond missing unconditional branch user")(static_cast <bool> (BR && "brcond missing unconditional branch user" ) ? void (0) : __assert_fail ("BR && \"brcond missing unconditional branch user\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5080, __extension__ __PRETTY_FUNCTION__)); | ||||||
5081 | Target = BR->getOperand(1); | ||||||
5082 | } | ||||||
5083 | |||||||
5084 | unsigned CFNode = isCFIntrinsic(Intr); | ||||||
5085 | if (CFNode == 0) { | ||||||
5086 | // This is a uniform branch so we don't need to legalize. | ||||||
5087 | return BRCOND; | ||||||
5088 | } | ||||||
5089 | |||||||
5090 | bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || | ||||||
5091 | Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; | ||||||
5092 | |||||||
5093 | assert(!SetCC ||(static_cast <bool> (!SetCC || (SetCC->getConstantOperandVal (1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand (2).getNode())->get() == ISD::SETNE)) ? void (0) : __assert_fail ("!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5096, __extension__ __PRETTY_FUNCTION__)) | ||||||
5094 | (SetCC->getConstantOperandVal(1) == 1 &&(static_cast <bool> (!SetCC || (SetCC->getConstantOperandVal (1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand (2).getNode())->get() == ISD::SETNE)) ? void (0) : __assert_fail ("!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5096, __extension__ __PRETTY_FUNCTION__)) | ||||||
5095 | cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==(static_cast <bool> (!SetCC || (SetCC->getConstantOperandVal (1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand (2).getNode())->get() == ISD::SETNE)) ? void (0) : __assert_fail ("!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5096, __extension__ __PRETTY_FUNCTION__)) | ||||||
5096 | ISD::SETNE))(static_cast <bool> (!SetCC || (SetCC->getConstantOperandVal (1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand (2).getNode())->get() == ISD::SETNE)) ? void (0) : __assert_fail ("!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5096, __extension__ __PRETTY_FUNCTION__)); | ||||||
5097 | |||||||
5098 | // operands of the new intrinsic call | ||||||
5099 | SmallVector<SDValue, 4> Ops; | ||||||
5100 | if (HaveChain) | ||||||
5101 | Ops.push_back(BRCOND.getOperand(0)); | ||||||
5102 | |||||||
5103 | Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end()); | ||||||
5104 | Ops.push_back(Target); | ||||||
5105 | |||||||
5106 | ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); | ||||||
5107 | |||||||
5108 | // build the new intrinsic call | ||||||
5109 | SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode(); | ||||||
5110 | |||||||
5111 | if (!HaveChain) { | ||||||
5112 | SDValue Ops[] = { | ||||||
5113 | SDValue(Result, 0), | ||||||
5114 | BRCOND.getOperand(0) | ||||||
5115 | }; | ||||||
5116 | |||||||
5117 | Result = DAG.getMergeValues(Ops, DL).getNode(); | ||||||
5118 | } | ||||||
5119 | |||||||
5120 | if (BR) { | ||||||
5121 | // Give the branch instruction our target | ||||||
5122 | SDValue Ops[] = { | ||||||
5123 | BR->getOperand(0), | ||||||
5124 | BRCOND.getOperand(2) | ||||||
5125 | }; | ||||||
5126 | SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); | ||||||
5127 | DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); | ||||||
5128 | } | ||||||
5129 | |||||||
5130 | SDValue Chain = SDValue(Result, Result->getNumValues() - 1); | ||||||
5131 | |||||||
5132 | // Copy the intrinsic results to registers | ||||||
5133 | for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { | ||||||
5134 | SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); | ||||||
5135 | if (!CopyToReg) | ||||||
5136 | continue; | ||||||
5137 | |||||||
5138 | Chain = DAG.getCopyToReg( | ||||||
5139 | Chain, DL, | ||||||
5140 | CopyToReg->getOperand(1), | ||||||
5141 | SDValue(Result, i - 1), | ||||||
5142 | SDValue()); | ||||||
5143 | |||||||
5144 | DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); | ||||||
5145 | } | ||||||
5146 | |||||||
5147 | // Remove the old intrinsic from the chain | ||||||
5148 | DAG.ReplaceAllUsesOfValueWith( | ||||||
5149 | SDValue(Intr, Intr->getNumValues() - 1), | ||||||
5150 | Intr->getOperand(0)); | ||||||
5151 | |||||||
5152 | return Chain; | ||||||
5153 | } | ||||||
5154 | |||||||
5155 | SDValue SITargetLowering::LowerRETURNADDR(SDValue Op, | ||||||
5156 | SelectionDAG &DAG) const { | ||||||
5157 | MVT VT = Op.getSimpleValueType(); | ||||||
5158 | SDLoc DL(Op); | ||||||
5159 | // Checking the depth | ||||||
5160 | if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) | ||||||
5161 | return DAG.getConstant(0, DL, VT); | ||||||
5162 | |||||||
5163 | MachineFunction &MF = DAG.getMachineFunction(); | ||||||
5164 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
5165 | // Check for kernel and shader functions | ||||||
5166 | if (Info->isEntryFunction()) | ||||||
5167 | return DAG.getConstant(0, DL, VT); | ||||||
5168 | |||||||
5169 | MachineFrameInfo &MFI = MF.getFrameInfo(); | ||||||
5170 | // There is a call to @llvm.returnaddress in this function | ||||||
5171 | MFI.setReturnAddressIsTaken(true); | ||||||
5172 | |||||||
5173 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); | ||||||
5174 | // Get the return address reg and mark it as an implicit live-in | ||||||
5175 | Register Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent())); | ||||||
5176 | |||||||
5177 | return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT); | ||||||
5178 | } | ||||||
5179 | |||||||
5180 | SDValue SITargetLowering::getFPExtOrFPRound(SelectionDAG &DAG, | ||||||
5181 | SDValue Op, | ||||||
5182 | const SDLoc &DL, | ||||||
5183 | EVT VT) const { | ||||||
5184 | return Op.getValueType().bitsLE(VT) ? | ||||||
5185 | DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) : | ||||||
5186 | DAG.getNode(ISD::FP_ROUND, DL, VT, Op, | ||||||
5187 | DAG.getTargetConstant(0, DL, MVT::i32)); | ||||||
5188 | } | ||||||
5189 | |||||||
5190 | SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { | ||||||
5191 | assert(Op.getValueType() == MVT::f16 &&(static_cast <bool> (Op.getValueType() == MVT::f16 && "Do not know how to custom lower FP_ROUND for non-f16 type") ? void (0) : __assert_fail ("Op.getValueType() == MVT::f16 && \"Do not know how to custom lower FP_ROUND for non-f16 type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5192, __extension__ __PRETTY_FUNCTION__)) | ||||||
5192 | "Do not know how to custom lower FP_ROUND for non-f16 type")(static_cast <bool> (Op.getValueType() == MVT::f16 && "Do not know how to custom lower FP_ROUND for non-f16 type") ? void (0) : __assert_fail ("Op.getValueType() == MVT::f16 && \"Do not know how to custom lower FP_ROUND for non-f16 type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5192, __extension__ __PRETTY_FUNCTION__)); | ||||||
5193 | |||||||
5194 | SDValue Src = Op.getOperand(0); | ||||||
5195 | EVT SrcVT = Src.getValueType(); | ||||||
5196 | if (SrcVT != MVT::f64) | ||||||
5197 | return Op; | ||||||
5198 | |||||||
5199 | SDLoc DL(Op); | ||||||
5200 | |||||||
5201 | SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src); | ||||||
5202 | SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16); | ||||||
5203 | return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc); | ||||||
5204 | } | ||||||
5205 | |||||||
5206 | SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op, | ||||||
5207 | SelectionDAG &DAG) const { | ||||||
5208 | EVT VT = Op.getValueType(); | ||||||
5209 | const MachineFunction &MF = DAG.getMachineFunction(); | ||||||
5210 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
5211 | bool IsIEEEMode = Info->getMode().IEEE; | ||||||
5212 | |||||||
5213 | // FIXME: Assert during selection that this is only selected for | ||||||
5214 | // ieee_mode. Currently a combine can produce the ieee version for non-ieee | ||||||
5215 | // mode functions, but this happens to be OK since it's only done in cases | ||||||
5216 | // where there is known no sNaN. | ||||||
5217 | if (IsIEEEMode) | ||||||
5218 | return expandFMINNUM_FMAXNUM(Op.getNode(), DAG); | ||||||
5219 | |||||||
5220 | if (VT == MVT::v4f16) | ||||||
5221 | return splitBinaryVectorOp(Op, DAG); | ||||||
5222 | return Op; | ||||||
5223 | } | ||||||
5224 | |||||||
5225 | SDValue SITargetLowering::lowerXMULO(SDValue Op, SelectionDAG &DAG) const { | ||||||
5226 | EVT VT = Op.getValueType(); | ||||||
5227 | SDLoc SL(Op); | ||||||
5228 | SDValue LHS = Op.getOperand(0); | ||||||
5229 | SDValue RHS = Op.getOperand(1); | ||||||
5230 | bool isSigned = Op.getOpcode() == ISD::SMULO; | ||||||
5231 | |||||||
5232 | if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) { | ||||||
5233 | const APInt &C = RHSC->getAPIntValue(); | ||||||
5234 | // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X } | ||||||
5235 | if (C.isPowerOf2()) { | ||||||
5236 | // smulo(x, signed_min) is same as umulo(x, signed_min). | ||||||
5237 | bool UseArithShift = isSigned && !C.isMinSignedValue(); | ||||||
5238 | SDValue ShiftAmt = DAG.getConstant(C.logBase2(), SL, MVT::i32); | ||||||
5239 | SDValue Result = DAG.getNode(ISD::SHL, SL, VT, LHS, ShiftAmt); | ||||||
5240 | SDValue Overflow = DAG.getSetCC(SL, MVT::i1, | ||||||
5241 | DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL, | ||||||
5242 | SL, VT, Result, ShiftAmt), | ||||||
5243 | LHS, ISD::SETNE); | ||||||
5244 | return DAG.getMergeValues({ Result, Overflow }, SL); | ||||||
5245 | } | ||||||
5246 | } | ||||||
5247 | |||||||
5248 | SDValue Result = DAG.getNode(ISD::MUL, SL, VT, LHS, RHS); | ||||||
5249 | SDValue Top = DAG.getNode(isSigned ? ISD::MULHS : ISD::MULHU, | ||||||
5250 | SL, VT, LHS, RHS); | ||||||
5251 | |||||||
5252 | SDValue Sign = isSigned | ||||||
5253 | ? DAG.getNode(ISD::SRA, SL, VT, Result, | ||||||
5254 | DAG.getConstant(VT.getScalarSizeInBits() - 1, SL, MVT::i32)) | ||||||
5255 | : DAG.getConstant(0, SL, VT); | ||||||
5256 | SDValue Overflow = DAG.getSetCC(SL, MVT::i1, Top, Sign, ISD::SETNE); | ||||||
5257 | |||||||
5258 | return DAG.getMergeValues({ Result, Overflow }, SL); | ||||||
5259 | } | ||||||
5260 | |||||||
5261 | SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const { | ||||||
5262 | if (!Subtarget->isTrapHandlerEnabled() || | ||||||
5263 | Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA) | ||||||
5264 | return lowerTrapEndpgm(Op, DAG); | ||||||
5265 | |||||||
5266 | if (Optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(Subtarget)) { | ||||||
5267 | switch (*HsaAbiVer) { | ||||||
5268 | case ELF::ELFABIVERSION_AMDGPU_HSA_V2: | ||||||
5269 | case ELF::ELFABIVERSION_AMDGPU_HSA_V3: | ||||||
5270 | return lowerTrapHsaQueuePtr(Op, DAG); | ||||||
5271 | case ELF::ELFABIVERSION_AMDGPU_HSA_V4: | ||||||
5272 | return Subtarget->supportsGetDoorbellID() ? | ||||||
5273 | lowerTrapHsa(Op, DAG) : lowerTrapHsaQueuePtr(Op, DAG); | ||||||
5274 | } | ||||||
5275 | } | ||||||
5276 | |||||||
5277 | llvm_unreachable("Unknown trap handler")::llvm::llvm_unreachable_internal("Unknown trap handler", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5277); | ||||||
5278 | } | ||||||
5279 | |||||||
5280 | SDValue SITargetLowering::lowerTrapEndpgm( | ||||||
5281 | SDValue Op, SelectionDAG &DAG) const { | ||||||
5282 | SDLoc SL(Op); | ||||||
5283 | SDValue Chain = Op.getOperand(0); | ||||||
5284 | return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain); | ||||||
5285 | } | ||||||
5286 | |||||||
5287 | SDValue SITargetLowering::lowerTrapHsaQueuePtr( | ||||||
5288 | SDValue Op, SelectionDAG &DAG) const { | ||||||
5289 | SDLoc SL(Op); | ||||||
5290 | SDValue Chain = Op.getOperand(0); | ||||||
5291 | |||||||
5292 | MachineFunction &MF = DAG.getMachineFunction(); | ||||||
5293 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
5294 | Register UserSGPR = Info->getQueuePtrUserSGPR(); | ||||||
5295 | assert(UserSGPR != AMDGPU::NoRegister)(static_cast <bool> (UserSGPR != AMDGPU::NoRegister) ? void (0) : __assert_fail ("UserSGPR != AMDGPU::NoRegister", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5295, __extension__ __PRETTY_FUNCTION__)); | ||||||
5296 | SDValue QueuePtr = CreateLiveInRegister( | ||||||
5297 | DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); | ||||||
5298 | SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64); | ||||||
5299 | SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01, | ||||||
5300 | QueuePtr, SDValue()); | ||||||
5301 | |||||||
5302 | uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSATrap); | ||||||
5303 | SDValue Ops[] = { | ||||||
5304 | ToReg, | ||||||
5305 | DAG.getTargetConstant(TrapID, SL, MVT::i16), | ||||||
5306 | SGPR01, | ||||||
5307 | ToReg.getValue(1) | ||||||
5308 | }; | ||||||
5309 | return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); | ||||||
5310 | } | ||||||
5311 | |||||||
5312 | SDValue SITargetLowering::lowerTrapHsa( | ||||||
5313 | SDValue Op, SelectionDAG &DAG) const { | ||||||
5314 | SDLoc SL(Op); | ||||||
5315 | SDValue Chain = Op.getOperand(0); | ||||||
5316 | |||||||
5317 | uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSATrap); | ||||||
5318 | SDValue Ops[] = { | ||||||
5319 | Chain, | ||||||
5320 | DAG.getTargetConstant(TrapID, SL, MVT::i16) | ||||||
5321 | }; | ||||||
5322 | return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); | ||||||
5323 | } | ||||||
5324 | |||||||
5325 | SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const { | ||||||
5326 | SDLoc SL(Op); | ||||||
5327 | SDValue Chain = Op.getOperand(0); | ||||||
5328 | MachineFunction &MF = DAG.getMachineFunction(); | ||||||
5329 | |||||||
5330 | if (!Subtarget->isTrapHandlerEnabled() || | ||||||
5331 | Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA) { | ||||||
5332 | DiagnosticInfoUnsupported NoTrap(MF.getFunction(), | ||||||
5333 | "debugtrap handler not supported", | ||||||
5334 | Op.getDebugLoc(), | ||||||
5335 | DS_Warning); | ||||||
5336 | LLVMContext &Ctx = MF.getFunction().getContext(); | ||||||
5337 | Ctx.diagnose(NoTrap); | ||||||
5338 | return Chain; | ||||||
5339 | } | ||||||
5340 | |||||||
5341 | uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSADebugTrap); | ||||||
5342 | SDValue Ops[] = { | ||||||
5343 | Chain, | ||||||
5344 | DAG.getTargetConstant(TrapID, SL, MVT::i16) | ||||||
5345 | }; | ||||||
5346 | return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); | ||||||
5347 | } | ||||||
5348 | |||||||
5349 | SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL, | ||||||
5350 | SelectionDAG &DAG) const { | ||||||
5351 | // FIXME: Use inline constants (src_{shared, private}_base) instead. | ||||||
5352 | if (Subtarget->hasApertureRegs()) { | ||||||
5353 | unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ? | ||||||
5354 | AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE : | ||||||
5355 | AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE; | ||||||
5356 | unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ? | ||||||
5357 | AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE : | ||||||
5358 | AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE; | ||||||
5359 | unsigned Encoding = | ||||||
5360 | AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ | | ||||||
5361 | Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ | | ||||||
5362 | WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_; | ||||||
5363 | |||||||
5364 | SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16); | ||||||
5365 | SDValue ApertureReg = SDValue( | ||||||
5366 | DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0); | ||||||
5367 | SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32); | ||||||
5368 | return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount); | ||||||
5369 | } | ||||||
5370 | |||||||
5371 | MachineFunction &MF = DAG.getMachineFunction(); | ||||||
5372 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
5373 | Register UserSGPR = Info->getQueuePtrUserSGPR(); | ||||||
5374 | assert(UserSGPR != AMDGPU::NoRegister)(static_cast <bool> (UserSGPR != AMDGPU::NoRegister) ? void (0) : __assert_fail ("UserSGPR != AMDGPU::NoRegister", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5374, __extension__ __PRETTY_FUNCTION__)); | ||||||
5375 | |||||||
5376 | SDValue QueuePtr = CreateLiveInRegister( | ||||||
5377 | DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); | ||||||
5378 | |||||||
5379 | // Offset into amd_queue_t for group_segment_aperture_base_hi / | ||||||
5380 | // private_segment_aperture_base_hi. | ||||||
5381 | uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44; | ||||||
5382 | |||||||
5383 | SDValue Ptr = | ||||||
5384 | DAG.getObjectPtrOffset(DL, QueuePtr, TypeSize::Fixed(StructOffset)); | ||||||
5385 | |||||||
5386 | // TODO: Use custom target PseudoSourceValue. | ||||||
5387 | // TODO: We should use the value from the IR intrinsic call, but it might not | ||||||
5388 | // be available and how do we get it? | ||||||
5389 | MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); | ||||||
5390 | return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo, | ||||||
5391 | commonAlignment(Align(64), StructOffset), | ||||||
5392 | MachineMemOperand::MODereferenceable | | ||||||
5393 | MachineMemOperand::MOInvariant); | ||||||
5394 | } | ||||||
5395 | |||||||
5396 | SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, | ||||||
5397 | SelectionDAG &DAG) const { | ||||||
5398 | SDLoc SL(Op); | ||||||
5399 | const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); | ||||||
5400 | |||||||
5401 | SDValue Src = ASC->getOperand(0); | ||||||
5402 | SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); | ||||||
5403 | |||||||
5404 | const AMDGPUTargetMachine &TM = | ||||||
5405 | static_cast<const AMDGPUTargetMachine &>(getTargetMachine()); | ||||||
5406 | |||||||
5407 | // flat -> local/private | ||||||
5408 | if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { | ||||||
5409 | unsigned DestAS = ASC->getDestAddressSpace(); | ||||||
5410 | |||||||
5411 | if (DestAS == AMDGPUAS::LOCAL_ADDRESS || | ||||||
5412 | DestAS == AMDGPUAS::PRIVATE_ADDRESS) { | ||||||
5413 | unsigned NullVal = TM.getNullPointerValue(DestAS); | ||||||
5414 | SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); | ||||||
5415 | SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); | ||||||
5416 | SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); | ||||||
5417 | |||||||
5418 | return DAG.getNode(ISD::SELECT, SL, MVT::i32, | ||||||
5419 | NonNull, Ptr, SegmentNullPtr); | ||||||
5420 | } | ||||||
5421 | } | ||||||
5422 | |||||||
5423 | // local/private -> flat | ||||||
5424 | if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { | ||||||
5425 | unsigned SrcAS = ASC->getSrcAddressSpace(); | ||||||
5426 | |||||||
5427 | if (SrcAS == AMDGPUAS::LOCAL_ADDRESS || | ||||||
5428 | SrcAS == AMDGPUAS::PRIVATE_ADDRESS) { | ||||||
5429 | unsigned NullVal = TM.getNullPointerValue(SrcAS); | ||||||
5430 | SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); | ||||||
5431 | |||||||
5432 | SDValue NonNull | ||||||
5433 | = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); | ||||||
5434 | |||||||
5435 | SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG); | ||||||
5436 | SDValue CvtPtr | ||||||
5437 | = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); | ||||||
5438 | |||||||
5439 | return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, | ||||||
5440 | DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr), | ||||||
5441 | FlatNullPtr); | ||||||
5442 | } | ||||||
5443 | } | ||||||
5444 | |||||||
5445 | if (ASC->getDestAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT && | ||||||
5446 | Src.getValueType() == MVT::i64) | ||||||
5447 | return DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); | ||||||
5448 | |||||||
5449 | // global <-> flat are no-ops and never emitted. | ||||||
5450 | |||||||
5451 | const MachineFunction &MF = DAG.getMachineFunction(); | ||||||
5452 | DiagnosticInfoUnsupported InvalidAddrSpaceCast( | ||||||
5453 | MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); | ||||||
5454 | DAG.getContext()->diagnose(InvalidAddrSpaceCast); | ||||||
5455 | |||||||
5456 | return DAG.getUNDEF(ASC->getValueType(0)); | ||||||
5457 | } | ||||||
5458 | |||||||
5459 | // This lowers an INSERT_SUBVECTOR by extracting the individual elements from | ||||||
5460 | // the small vector and inserting them into the big vector. That is better than | ||||||
5461 | // the default expansion of doing it via a stack slot. Even though the use of | ||||||
5462 | // the stack slot would be optimized away afterwards, the stack slot itself | ||||||
5463 | // remains. | ||||||
5464 | SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, | ||||||
5465 | SelectionDAG &DAG) const { | ||||||
5466 | SDValue Vec = Op.getOperand(0); | ||||||
5467 | SDValue Ins = Op.getOperand(1); | ||||||
5468 | SDValue Idx = Op.getOperand(2); | ||||||
5469 | EVT VecVT = Vec.getValueType(); | ||||||
5470 | EVT InsVT = Ins.getValueType(); | ||||||
5471 | EVT EltVT = VecVT.getVectorElementType(); | ||||||
5472 | unsigned InsNumElts = InsVT.getVectorNumElements(); | ||||||
5473 | unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); | ||||||
5474 | SDLoc SL(Op); | ||||||
5475 | |||||||
5476 | for (unsigned I = 0; I != InsNumElts; ++I) { | ||||||
5477 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins, | ||||||
5478 | DAG.getConstant(I, SL, MVT::i32)); | ||||||
5479 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt, | ||||||
5480 | DAG.getConstant(IdxVal + I, SL, MVT::i32)); | ||||||
5481 | } | ||||||
5482 | return Vec; | ||||||
5483 | } | ||||||
5484 | |||||||
5485 | SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, | ||||||
5486 | SelectionDAG &DAG) const { | ||||||
5487 | SDValue Vec = Op.getOperand(0); | ||||||
5488 | SDValue InsVal = Op.getOperand(1); | ||||||
5489 | SDValue Idx = Op.getOperand(2); | ||||||
5490 | EVT VecVT = Vec.getValueType(); | ||||||
5491 | EVT EltVT = VecVT.getVectorElementType(); | ||||||
5492 | unsigned VecSize = VecVT.getSizeInBits(); | ||||||
5493 | unsigned EltSize = EltVT.getSizeInBits(); | ||||||
5494 | |||||||
5495 | |||||||
5496 | assert(VecSize <= 64)(static_cast <bool> (VecSize <= 64) ? void (0) : __assert_fail ("VecSize <= 64", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5496, __extension__ __PRETTY_FUNCTION__)); | ||||||
5497 | |||||||
5498 | unsigned NumElts = VecVT.getVectorNumElements(); | ||||||
5499 | SDLoc SL(Op); | ||||||
5500 | auto KIdx = dyn_cast<ConstantSDNode>(Idx); | ||||||
5501 | |||||||
5502 | if (NumElts == 4 && EltSize == 16 && KIdx) { | ||||||
5503 | SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec); | ||||||
5504 | |||||||
5505 | SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, | ||||||
5506 | DAG.getConstant(0, SL, MVT::i32)); | ||||||
5507 | SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, | ||||||
5508 | DAG.getConstant(1, SL, MVT::i32)); | ||||||
5509 | |||||||
5510 | SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf); | ||||||
5511 | SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf); | ||||||
5512 | |||||||
5513 | unsigned Idx = KIdx->getZExtValue(); | ||||||
5514 | bool InsertLo = Idx < 2; | ||||||
5515 | SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16, | ||||||
5516 | InsertLo ? LoVec : HiVec, | ||||||
5517 | DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal), | ||||||
5518 | DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32)); | ||||||
5519 | |||||||
5520 | InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf); | ||||||
5521 | |||||||
5522 | SDValue Concat = InsertLo ? | ||||||
5523 | DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) : | ||||||
5524 | DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf }); | ||||||
5525 | |||||||
5526 | return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat); | ||||||
5527 | } | ||||||
5528 | |||||||
5529 | if (isa<ConstantSDNode>(Idx)) | ||||||
5530 | return SDValue(); | ||||||
5531 | |||||||
5532 | MVT IntVT = MVT::getIntegerVT(VecSize); | ||||||
5533 | |||||||
5534 | // Avoid stack access for dynamic indexing. | ||||||
5535 | // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec | ||||||
5536 | |||||||
5537 | // Create a congruent vector with the target value in each element so that | ||||||
5538 | // the required element can be masked and ORed into the target vector. | ||||||
5539 | SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT, | ||||||
5540 | DAG.getSplatBuildVector(VecVT, SL, InsVal)); | ||||||
5541 | |||||||
5542 | assert(isPowerOf2_32(EltSize))(static_cast <bool> (isPowerOf2_32(EltSize)) ? void (0) : __assert_fail ("isPowerOf2_32(EltSize)", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5542, __extension__ __PRETTY_FUNCTION__)); | ||||||
5543 | SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); | ||||||
5544 | |||||||
5545 | // Convert vector index to bit-index. | ||||||
5546 | SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); | ||||||
5547 | |||||||
5548 | SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); | ||||||
5549 | SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT, | ||||||
5550 | DAG.getConstant(0xffff, SL, IntVT), | ||||||
5551 | ScaledIdx); | ||||||
5552 | |||||||
5553 | SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal); | ||||||
5554 | SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT, | ||||||
5555 | DAG.getNOT(SL, BFM, IntVT), BCVec); | ||||||
5556 | |||||||
5557 | SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS); | ||||||
5558 | return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI); | ||||||
5559 | } | ||||||
5560 | |||||||
5561 | SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, | ||||||
5562 | SelectionDAG &DAG) const { | ||||||
5563 | SDLoc SL(Op); | ||||||
5564 | |||||||
5565 | EVT ResultVT = Op.getValueType(); | ||||||
5566 | SDValue Vec = Op.getOperand(0); | ||||||
5567 | SDValue Idx = Op.getOperand(1); | ||||||
5568 | EVT VecVT = Vec.getValueType(); | ||||||
5569 | unsigned VecSize = VecVT.getSizeInBits(); | ||||||
5570 | EVT EltVT = VecVT.getVectorElementType(); | ||||||
5571 | assert(VecSize <= 64)(static_cast <bool> (VecSize <= 64) ? void (0) : __assert_fail ("VecSize <= 64", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5571, __extension__ __PRETTY_FUNCTION__)); | ||||||
5572 | |||||||
5573 | DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr); | ||||||
5574 | |||||||
5575 | // Make sure we do any optimizations that will make it easier to fold | ||||||
5576 | // source modifiers before obscuring it with bit operations. | ||||||
5577 | |||||||
5578 | // XXX - Why doesn't this get called when vector_shuffle is expanded? | ||||||
5579 | if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI)) | ||||||
5580 | return Combined; | ||||||
5581 | |||||||
5582 | unsigned EltSize = EltVT.getSizeInBits(); | ||||||
5583 | assert(isPowerOf2_32(EltSize))(static_cast <bool> (isPowerOf2_32(EltSize)) ? void (0) : __assert_fail ("isPowerOf2_32(EltSize)", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5583, __extension__ __PRETTY_FUNCTION__)); | ||||||
5584 | |||||||
5585 | MVT IntVT = MVT::getIntegerVT(VecSize); | ||||||
5586 | SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); | ||||||
5587 | |||||||
5588 | // Convert vector index to bit-index (* EltSize) | ||||||
5589 | SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); | ||||||
5590 | |||||||
5591 | SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); | ||||||
5592 | SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx); | ||||||
5593 | |||||||
5594 | if (ResultVT == MVT::f16) { | ||||||
5595 | SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt); | ||||||
5596 | return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); | ||||||
5597 | } | ||||||
5598 | |||||||
5599 | return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT); | ||||||
5600 | } | ||||||
5601 | |||||||
5602 | static bool elementPairIsContiguous(ArrayRef<int> Mask, int Elt) { | ||||||
5603 | assert(Elt % 2 == 0)(static_cast <bool> (Elt % 2 == 0) ? void (0) : __assert_fail ("Elt % 2 == 0", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5603, __extension__ __PRETTY_FUNCTION__)); | ||||||
5604 | return Mask[Elt + 1] == Mask[Elt] + 1 && (Mask[Elt] % 2 == 0); | ||||||
5605 | } | ||||||
5606 | |||||||
5607 | SDValue SITargetLowering::lowerVECTOR_SHUFFLE(SDValue Op, | ||||||
5608 | SelectionDAG &DAG) const { | ||||||
5609 | SDLoc SL(Op); | ||||||
5610 | EVT ResultVT = Op.getValueType(); | ||||||
5611 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); | ||||||
5612 | |||||||
5613 | EVT PackVT = ResultVT.isInteger() ? MVT::v2i16 : MVT::v2f16; | ||||||
5614 | EVT EltVT = PackVT.getVectorElementType(); | ||||||
5615 | int SrcNumElts = Op.getOperand(0).getValueType().getVectorNumElements(); | ||||||
5616 | |||||||
5617 | // vector_shuffle <0,1,6,7> lhs, rhs | ||||||
5618 | // -> concat_vectors (extract_subvector lhs, 0), (extract_subvector rhs, 2) | ||||||
5619 | // | ||||||
5620 | // vector_shuffle <6,7,2,3> lhs, rhs | ||||||
5621 | // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 2) | ||||||
5622 | // | ||||||
5623 | // vector_shuffle <6,7,0,1> lhs, rhs | ||||||
5624 | // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 0) | ||||||
5625 | |||||||
5626 | // Avoid scalarizing when both halves are reading from consecutive elements. | ||||||
5627 | SmallVector<SDValue, 4> Pieces; | ||||||
5628 | for (int I = 0, N = ResultVT.getVectorNumElements(); I != N; I += 2) { | ||||||
5629 | if (elementPairIsContiguous(SVN->getMask(), I)) { | ||||||
5630 | const int Idx = SVN->getMaskElt(I); | ||||||
5631 | int VecIdx = Idx < SrcNumElts ? 0 : 1; | ||||||
5632 | int EltIdx = Idx < SrcNumElts ? Idx : Idx - SrcNumElts; | ||||||
5633 | SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, | ||||||
5634 | PackVT, SVN->getOperand(VecIdx), | ||||||
5635 | DAG.getConstant(EltIdx, SL, MVT::i32)); | ||||||
5636 | Pieces.push_back(SubVec); | ||||||
5637 | } else { | ||||||
5638 | const int Idx0 = SVN->getMaskElt(I); | ||||||
5639 | const int Idx1 = SVN->getMaskElt(I + 1); | ||||||
5640 | int VecIdx0 = Idx0 < SrcNumElts ? 0 : 1; | ||||||
5641 | int VecIdx1 = Idx1 < SrcNumElts ? 0 : 1; | ||||||
5642 | int EltIdx0 = Idx0 < SrcNumElts ? Idx0 : Idx0 - SrcNumElts; | ||||||
5643 | int EltIdx1 = Idx1 < SrcNumElts ? Idx1 : Idx1 - SrcNumElts; | ||||||
5644 | |||||||
5645 | SDValue Vec0 = SVN->getOperand(VecIdx0); | ||||||
5646 | SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, | ||||||
5647 | Vec0, DAG.getConstant(EltIdx0, SL, MVT::i32)); | ||||||
5648 | |||||||
5649 | SDValue Vec1 = SVN->getOperand(VecIdx1); | ||||||
5650 | SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, | ||||||
5651 | Vec1, DAG.getConstant(EltIdx1, SL, MVT::i32)); | ||||||
5652 | Pieces.push_back(DAG.getBuildVector(PackVT, SL, { Elt0, Elt1 })); | ||||||
5653 | } | ||||||
5654 | } | ||||||
5655 | |||||||
5656 | return DAG.getNode(ISD::CONCAT_VECTORS, SL, ResultVT, Pieces); | ||||||
5657 | } | ||||||
5658 | |||||||
5659 | SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op, | ||||||
5660 | SelectionDAG &DAG) const { | ||||||
5661 | SDLoc SL(Op); | ||||||
5662 | EVT VT = Op.getValueType(); | ||||||
5663 | |||||||
5664 | if (VT == MVT::v4i16 || VT == MVT::v4f16) { | ||||||
5665 | EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2); | ||||||
5666 | |||||||
5667 | // Turn into pair of packed build_vectors. | ||||||
5668 | // TODO: Special case for constants that can be materialized with s_mov_b64. | ||||||
5669 | SDValue Lo = DAG.getBuildVector(HalfVT, SL, | ||||||
5670 | { Op.getOperand(0), Op.getOperand(1) }); | ||||||
5671 | SDValue Hi = DAG.getBuildVector(HalfVT, SL, | ||||||
5672 | { Op.getOperand(2), Op.getOperand(3) }); | ||||||
5673 | |||||||
5674 | SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo); | ||||||
5675 | SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi); | ||||||
5676 | |||||||
5677 | SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi }); | ||||||
5678 | return DAG.getNode(ISD::BITCAST, SL, VT, Blend); | ||||||
5679 | } | ||||||
5680 | |||||||
5681 | assert(VT == MVT::v2f16 || VT == MVT::v2i16)(static_cast <bool> (VT == MVT::v2f16 || VT == MVT::v2i16 ) ? void (0) : __assert_fail ("VT == MVT::v2f16 || VT == MVT::v2i16" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5681, __extension__ __PRETTY_FUNCTION__)); | ||||||
5682 | assert(!Subtarget->hasVOP3PInsts() && "this should be legal")(static_cast <bool> (!Subtarget->hasVOP3PInsts() && "this should be legal") ? void (0) : __assert_fail ("!Subtarget->hasVOP3PInsts() && \"this should be legal\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5682, __extension__ __PRETTY_FUNCTION__)); | ||||||
5683 | |||||||
5684 | SDValue Lo = Op.getOperand(0); | ||||||
5685 | SDValue Hi = Op.getOperand(1); | ||||||
5686 | |||||||
5687 | // Avoid adding defined bits with the zero_extend. | ||||||
5688 | if (Hi.isUndef()) { | ||||||
5689 | Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo); | ||||||
5690 | SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo); | ||||||
5691 | return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo); | ||||||
5692 | } | ||||||
5693 | |||||||
5694 | Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi); | ||||||
5695 | Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi); | ||||||
5696 | |||||||
5697 | SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi, | ||||||
5698 | DAG.getConstant(16, SL, MVT::i32)); | ||||||
5699 | if (Lo.isUndef()) | ||||||
5700 | return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi); | ||||||
5701 | |||||||
5702 | Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo); | ||||||
5703 | Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo); | ||||||
5704 | |||||||
5705 | SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi); | ||||||
5706 | return DAG.getNode(ISD::BITCAST, SL, VT, Or); | ||||||
5707 | } | ||||||
5708 | |||||||
5709 | bool | ||||||
5710 | SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { | ||||||
5711 | // We can fold offsets for anything that doesn't require a GOT relocation. | ||||||
5712 | return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || | ||||||
5713 | GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || | ||||||
5714 | GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && | ||||||
5715 | !shouldEmitGOTReloc(GA->getGlobal()); | ||||||
5716 | } | ||||||
5717 | |||||||
5718 | static SDValue | ||||||
5719 | buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, | ||||||
5720 | const SDLoc &DL, int64_t Offset, EVT PtrVT, | ||||||
5721 | unsigned GAFlags = SIInstrInfo::MO_NONE) { | ||||||
5722 | assert(isInt<32>(Offset + 4) && "32-bit offset is expected!")(static_cast <bool> (isInt<32>(Offset + 4) && "32-bit offset is expected!") ? void (0) : __assert_fail ("isInt<32>(Offset + 4) && \"32-bit offset is expected!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5722, __extension__ __PRETTY_FUNCTION__)); | ||||||
5723 | // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is | ||||||
5724 | // lowered to the following code sequence: | ||||||
5725 | // | ||||||
5726 | // For constant address space: | ||||||
5727 | // s_getpc_b64 s[0:1] | ||||||
5728 | // s_add_u32 s0, s0, $symbol | ||||||
5729 | // s_addc_u32 s1, s1, 0 | ||||||
5730 | // | ||||||
5731 | // s_getpc_b64 returns the address of the s_add_u32 instruction and then | ||||||
5732 | // a fixup or relocation is emitted to replace $symbol with a literal | ||||||
5733 | // constant, which is a pc-relative offset from the encoding of the $symbol | ||||||
5734 | // operand to the global variable. | ||||||
5735 | // | ||||||
5736 | // For global address space: | ||||||
5737 | // s_getpc_b64 s[0:1] | ||||||
5738 | // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo | ||||||
5739 | // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi | ||||||
5740 | // | ||||||
5741 | // s_getpc_b64 returns the address of the s_add_u32 instruction and then | ||||||
5742 | // fixups or relocations are emitted to replace $symbol@*@lo and | ||||||
5743 | // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant, | ||||||
5744 | // which is a 64-bit pc-relative offset from the encoding of the $symbol | ||||||
5745 | // operand to the global variable. | ||||||
5746 | // | ||||||
5747 | // What we want here is an offset from the value returned by s_getpc | ||||||
5748 | // (which is the address of the s_add_u32 instruction) to the global | ||||||
5749 | // variable, but since the encoding of $symbol starts 4 bytes after the start | ||||||
5750 | // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too | ||||||
5751 | // small. This requires us to add 4 to the global variable offset in order to | ||||||
5752 | // compute the correct address. Similarly for the s_addc_u32 instruction, the | ||||||
5753 | // encoding of $symbol starts 12 bytes after the start of the s_add_u32 | ||||||
5754 | // instruction. | ||||||
5755 | SDValue PtrLo = | ||||||
5756 | DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags); | ||||||
5757 | SDValue PtrHi; | ||||||
5758 | if (GAFlags == SIInstrInfo::MO_NONE) { | ||||||
5759 | PtrHi = DAG.getTargetConstant(0, DL, MVT::i32); | ||||||
5760 | } else { | ||||||
5761 | PtrHi = | ||||||
5762 | DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 12, GAFlags + 1); | ||||||
5763 | } | ||||||
5764 | return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); | ||||||
5765 | } | ||||||
5766 | |||||||
5767 | SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, | ||||||
5768 | SDValue Op, | ||||||
5769 | SelectionDAG &DAG) const { | ||||||
5770 | GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); | ||||||
5771 | SDLoc DL(GSD); | ||||||
5772 | EVT PtrVT = Op.getValueType(); | ||||||
5773 | |||||||
5774 | const GlobalValue *GV = GSD->getGlobal(); | ||||||
5775 | if ((GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && | ||||||
5776 | shouldUseLDSConstAddress(GV)) || | ||||||
5777 | GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS || | ||||||
5778 | GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { | ||||||
5779 | if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && | ||||||
5780 | GV->hasExternalLinkage()) { | ||||||
5781 | Type *Ty = GV->getValueType(); | ||||||
5782 | // HIP uses an unsized array `extern __shared__ T s[]` or similar | ||||||
5783 | // zero-sized type in other languages to declare the dynamic shared | ||||||
5784 | // memory which size is not known at the compile time. They will be | ||||||
5785 | // allocated by the runtime and placed directly after the static | ||||||
5786 | // allocated ones. They all share the same offset. | ||||||
5787 | if (DAG.getDataLayout().getTypeAllocSize(Ty).isZero()) { | ||||||
5788 | assert(PtrVT == MVT::i32 && "32-bit pointer is expected.")(static_cast <bool> (PtrVT == MVT::i32 && "32-bit pointer is expected." ) ? void (0) : __assert_fail ("PtrVT == MVT::i32 && \"32-bit pointer is expected.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5788, __extension__ __PRETTY_FUNCTION__)); | ||||||
5789 | // Adjust alignment for that dynamic shared memory array. | ||||||
5790 | MFI->setDynLDSAlign(DAG.getDataLayout(), *cast<GlobalVariable>(GV)); | ||||||
5791 | return SDValue( | ||||||
5792 | DAG.getMachineNode(AMDGPU::GET_GROUPSTATICSIZE, DL, PtrVT), 0); | ||||||
5793 | } | ||||||
5794 | } | ||||||
5795 | return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); | ||||||
5796 | } | ||||||
5797 | |||||||
5798 | if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { | ||||||
5799 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, GSD->getOffset(), | ||||||
5800 | SIInstrInfo::MO_ABS32_LO); | ||||||
5801 | return DAG.getNode(AMDGPUISD::LDS, DL, MVT::i32, GA); | ||||||
5802 | } | ||||||
5803 | |||||||
5804 | if (shouldEmitFixup(GV)) | ||||||
5805 | return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); | ||||||
5806 | else if (shouldEmitPCReloc(GV)) | ||||||
5807 | return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, | ||||||
5808 | SIInstrInfo::MO_REL32); | ||||||
5809 | |||||||
5810 | SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, | ||||||
5811 | SIInstrInfo::MO_GOTPCREL32); | ||||||
5812 | |||||||
5813 | Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); | ||||||
5814 | PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); | ||||||
5815 | const DataLayout &DataLayout = DAG.getDataLayout(); | ||||||
5816 | Align Alignment = DataLayout.getABITypeAlign(PtrTy); | ||||||
5817 | MachinePointerInfo PtrInfo | ||||||
5818 | = MachinePointerInfo::getGOT(DAG.getMachineFunction()); | ||||||
5819 | |||||||
5820 | return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Alignment, | ||||||
5821 | MachineMemOperand::MODereferenceable | | ||||||
5822 | MachineMemOperand::MOInvariant); | ||||||
5823 | } | ||||||
5824 | |||||||
5825 | SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, | ||||||
5826 | const SDLoc &DL, SDValue V) const { | ||||||
5827 | // We can't use S_MOV_B32 directly, because there is no way to specify m0 as | ||||||
5828 | // the destination register. | ||||||
5829 | // | ||||||
5830 | // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, | ||||||
5831 | // so we will end up with redundant moves to m0. | ||||||
5832 | // | ||||||
5833 | // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. | ||||||
5834 | |||||||
5835 | // A Null SDValue creates a glue result. | ||||||
5836 | SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, | ||||||
5837 | V, Chain); | ||||||
5838 | return SDValue(M0, 0); | ||||||
5839 | } | ||||||
5840 | |||||||
5841 | SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, | ||||||
5842 | SDValue Op, | ||||||
5843 | MVT VT, | ||||||
5844 | unsigned Offset) const { | ||||||
5845 | SDLoc SL(Op); | ||||||
5846 | SDValue Param = lowerKernargMemParameter( | ||||||
5847 | DAG, MVT::i32, MVT::i32, SL, DAG.getEntryNode(), Offset, Align(4), false); | ||||||
5848 | // The local size values will have the hi 16-bits as zero. | ||||||
5849 | return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, | ||||||
5850 | DAG.getValueType(VT)); | ||||||
5851 | } | ||||||
5852 | |||||||
5853 | static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, | ||||||
5854 | EVT VT) { | ||||||
5855 | DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), | ||||||
5856 | "non-hsa intrinsic with hsa target", | ||||||
5857 | DL.getDebugLoc()); | ||||||
5858 | DAG.getContext()->diagnose(BadIntrin); | ||||||
5859 | return DAG.getUNDEF(VT); | ||||||
5860 | } | ||||||
5861 | |||||||
5862 | static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, | ||||||
5863 | EVT VT) { | ||||||
5864 | DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), | ||||||
5865 | "intrinsic not supported on subtarget", | ||||||
5866 | DL.getDebugLoc()); | ||||||
5867 | DAG.getContext()->diagnose(BadIntrin); | ||||||
5868 | return DAG.getUNDEF(VT); | ||||||
5869 | } | ||||||
5870 | |||||||
5871 | static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL, | ||||||
5872 | ArrayRef<SDValue> Elts) { | ||||||
5873 | assert(!Elts.empty())(static_cast <bool> (!Elts.empty()) ? void (0) : __assert_fail ("!Elts.empty()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5873, __extension__ __PRETTY_FUNCTION__)); | ||||||
5874 | MVT Type; | ||||||
5875 | unsigned NumElts = Elts.size(); | ||||||
5876 | |||||||
5877 | if (NumElts <= 8) { | ||||||
5878 | Type = MVT::getVectorVT(MVT::f32, NumElts); | ||||||
5879 | } else { | ||||||
5880 | assert(Elts.size() <= 16)(static_cast <bool> (Elts.size() <= 16) ? void (0) : __assert_fail ("Elts.size() <= 16", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5880, __extension__ __PRETTY_FUNCTION__)); | ||||||
5881 | Type = MVT::v16f32; | ||||||
5882 | NumElts = 16; | ||||||
5883 | } | ||||||
5884 | |||||||
5885 | SmallVector<SDValue, 16> VecElts(NumElts); | ||||||
5886 | for (unsigned i = 0; i < Elts.size(); ++i) { | ||||||
5887 | SDValue Elt = Elts[i]; | ||||||
5888 | if (Elt.getValueType() != MVT::f32) | ||||||
5889 | Elt = DAG.getBitcast(MVT::f32, Elt); | ||||||
5890 | VecElts[i] = Elt; | ||||||
5891 | } | ||||||
5892 | for (unsigned i = Elts.size(); i < NumElts; ++i) | ||||||
5893 | VecElts[i] = DAG.getUNDEF(MVT::f32); | ||||||
5894 | |||||||
5895 | if (NumElts == 1) | ||||||
5896 | return VecElts[0]; | ||||||
5897 | return DAG.getBuildVector(Type, DL, VecElts); | ||||||
5898 | } | ||||||
5899 | |||||||
5900 | static SDValue padEltsToUndef(SelectionDAG &DAG, const SDLoc &DL, EVT CastVT, | ||||||
5901 | SDValue Src, int ExtraElts) { | ||||||
5902 | EVT SrcVT = Src.getValueType(); | ||||||
5903 | |||||||
5904 | SmallVector<SDValue, 8> Elts; | ||||||
5905 | |||||||
5906 | if (SrcVT.isVector()) | ||||||
5907 | DAG.ExtractVectorElements(Src, Elts); | ||||||
5908 | else | ||||||
5909 | Elts.push_back(Src); | ||||||
5910 | |||||||
5911 | SDValue Undef = DAG.getUNDEF(SrcVT.getScalarType()); | ||||||
5912 | while (ExtraElts--) | ||||||
5913 | Elts.push_back(Undef); | ||||||
5914 | |||||||
5915 | return DAG.getBuildVector(CastVT, DL, Elts); | ||||||
5916 | } | ||||||
5917 | |||||||
5918 | // Re-construct the required return value for a image load intrinsic. | ||||||
5919 | // This is more complicated due to the optional use TexFailCtrl which means the required | ||||||
5920 | // return type is an aggregate | ||||||
5921 | static SDValue constructRetValue(SelectionDAG &DAG, | ||||||
5922 | MachineSDNode *Result, | ||||||
5923 | ArrayRef<EVT> ResultTypes, | ||||||
5924 | bool IsTexFail, bool Unpacked, bool IsD16, | ||||||
5925 | int DMaskPop, int NumVDataDwords, | ||||||
5926 | const SDLoc &DL) { | ||||||
5927 | // Determine the required return type. This is the same regardless of IsTexFail flag | ||||||
5928 | EVT ReqRetVT = ResultTypes[0]; | ||||||
5929 | int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1; | ||||||
5930 | int NumDataDwords = (!IsD16 || (IsD16 && Unpacked)) ? | ||||||
5931 | ReqRetNumElts : (ReqRetNumElts + 1) / 2; | ||||||
5932 | |||||||
5933 | int MaskPopDwords = (!IsD16 || (IsD16 && Unpacked)) ? | ||||||
5934 | DMaskPop : (DMaskPop + 1) / 2; | ||||||
5935 | |||||||
5936 | MVT DataDwordVT = NumDataDwords == 1 ? | ||||||
5937 | MVT::i32 : MVT::getVectorVT(MVT::i32, NumDataDwords); | ||||||
5938 | |||||||
5939 | MVT MaskPopVT = MaskPopDwords == 1 ? | ||||||
5940 | MVT::i32 : MVT::getVectorVT(MVT::i32, MaskPopDwords); | ||||||
5941 | |||||||
5942 | SDValue Data(Result, 0); | ||||||
5943 | SDValue TexFail; | ||||||
5944 | |||||||
5945 | if (DMaskPop > 0 && Data.getValueType() != MaskPopVT) { | ||||||
5946 | SDValue ZeroIdx = DAG.getConstant(0, DL, MVT::i32); | ||||||
5947 | if (MaskPopVT.isVector()) { | ||||||
5948 | Data = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MaskPopVT, | ||||||
5949 | SDValue(Result, 0), ZeroIdx); | ||||||
5950 | } else { | ||||||
5951 | Data = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MaskPopVT, | ||||||
5952 | SDValue(Result, 0), ZeroIdx); | ||||||
5953 | } | ||||||
5954 | } | ||||||
5955 | |||||||
5956 | if (DataDwordVT.isVector()) | ||||||
5957 | Data = padEltsToUndef(DAG, DL, DataDwordVT, Data, | ||||||
5958 | NumDataDwords - MaskPopDwords); | ||||||
5959 | |||||||
5960 | if (IsD16) | ||||||
5961 | Data = adjustLoadValueTypeImpl(Data, ReqRetVT, DL, DAG, Unpacked); | ||||||
5962 | |||||||
5963 | EVT LegalReqRetVT = ReqRetVT; | ||||||
5964 | if (!ReqRetVT.isVector()) { | ||||||
5965 | if (!Data.getValueType().isInteger()) | ||||||
5966 | Data = DAG.getNode(ISD::BITCAST, DL, | ||||||
5967 | Data.getValueType().changeTypeToInteger(), Data); | ||||||
5968 | Data = DAG.getNode(ISD::TRUNCATE, DL, ReqRetVT.changeTypeToInteger(), Data); | ||||||
5969 | } else { | ||||||
5970 | // We need to widen the return vector to a legal type | ||||||
5971 | if ((ReqRetVT.getVectorNumElements() % 2) == 1 && | ||||||
5972 | ReqRetVT.getVectorElementType().getSizeInBits() == 16) { | ||||||
5973 | LegalReqRetVT = | ||||||
5974 | EVT::getVectorVT(*DAG.getContext(), ReqRetVT.getVectorElementType(), | ||||||
5975 | ReqRetVT.getVectorNumElements() + 1); | ||||||
5976 | } | ||||||
5977 | } | ||||||
5978 | Data = DAG.getNode(ISD::BITCAST, DL, LegalReqRetVT, Data); | ||||||
5979 | |||||||
5980 | if (IsTexFail) { | ||||||
5981 | TexFail = | ||||||
5982 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, SDValue(Result, 0), | ||||||
5983 | DAG.getConstant(MaskPopDwords, DL, MVT::i32)); | ||||||
5984 | |||||||
5985 | return DAG.getMergeValues({Data, TexFail, SDValue(Result, 1)}, DL); | ||||||
5986 | } | ||||||
5987 | |||||||
5988 | if (Result->getNumValues() == 1) | ||||||
5989 | return Data; | ||||||
5990 | |||||||
5991 | return DAG.getMergeValues({Data, SDValue(Result, 1)}, DL); | ||||||
5992 | } | ||||||
5993 | |||||||
5994 | static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE, | ||||||
5995 | SDValue *LWE, bool &IsTexFail) { | ||||||
5996 | auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode()); | ||||||
5997 | |||||||
5998 | uint64_t Value = TexFailCtrlConst->getZExtValue(); | ||||||
5999 | if (Value) { | ||||||
6000 | IsTexFail = true; | ||||||
6001 | } | ||||||
6002 | |||||||
6003 | SDLoc DL(TexFailCtrlConst); | ||||||
6004 | *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32); | ||||||
6005 | Value &= ~(uint64_t)0x1; | ||||||
6006 | *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32); | ||||||
6007 | Value &= ~(uint64_t)0x2; | ||||||
6008 | |||||||
6009 | return Value == 0; | ||||||
6010 | } | ||||||
6011 | |||||||
6012 | static void packImage16bitOpsToDwords(SelectionDAG &DAG, SDValue Op, | ||||||
6013 | MVT PackVectorVT, | ||||||
6014 | SmallVectorImpl<SDValue> &PackedAddrs, | ||||||
6015 | unsigned DimIdx, unsigned EndIdx, | ||||||
6016 | unsigned NumGradients) { | ||||||
6017 | SDLoc DL(Op); | ||||||
6018 | for (unsigned I = DimIdx; I < EndIdx; I++) { | ||||||
6019 | SDValue Addr = Op.getOperand(I); | ||||||
6020 | |||||||
6021 | // Gradients are packed with undef for each coordinate. | ||||||
6022 | // In <hi 16 bit>,<lo 16 bit> notation, the registers look like this: | ||||||
6023 | // 1D: undef,dx/dh; undef,dx/dv | ||||||
6024 | // 2D: dy/dh,dx/dh; dy/dv,dx/dv | ||||||
6025 | // 3D: dy/dh,dx/dh; undef,dz/dh; dy/dv,dx/dv; undef,dz/dv | ||||||
6026 | if (((I + 1) >= EndIdx) || | ||||||
6027 | ((NumGradients / 2) % 2 == 1 && (I == DimIdx + (NumGradients / 2) - 1 || | ||||||
6028 | I == DimIdx + NumGradients - 1))) { | ||||||
6029 | if (Addr.getValueType() != MVT::i16) | ||||||
6030 | Addr = DAG.getBitcast(MVT::i16, Addr); | ||||||
6031 | Addr = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Addr); | ||||||
6032 | } else { | ||||||
6033 | Addr = DAG.getBuildVector(PackVectorVT, DL, {Addr, Op.getOperand(I + 1)}); | ||||||
6034 | I++; | ||||||
6035 | } | ||||||
6036 | Addr = DAG.getBitcast(MVT::f32, Addr); | ||||||
6037 | PackedAddrs.push_back(Addr); | ||||||
6038 | } | ||||||
6039 | } | ||||||
6040 | |||||||
6041 | SDValue SITargetLowering::lowerImage(SDValue Op, | ||||||
6042 | const AMDGPU::ImageDimIntrinsicInfo *Intr, | ||||||
6043 | SelectionDAG &DAG, bool WithChain) const { | ||||||
6044 | SDLoc DL(Op); | ||||||
6045 | MachineFunction &MF = DAG.getMachineFunction(); | ||||||
6046 | const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>(); | ||||||
6047 | const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = | ||||||
6048 | AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); | ||||||
6049 | const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); | ||||||
6050 | const AMDGPU::MIMGLZMappingInfo *LZMappingInfo = | ||||||
6051 | AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode); | ||||||
6052 | const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo = | ||||||
6053 | AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode); | ||||||
6054 | unsigned IntrOpcode = Intr->BaseOpcode; | ||||||
6055 | bool IsGFX10Plus = AMDGPU::isGFX10Plus(*Subtarget); | ||||||
6056 | |||||||
6057 | SmallVector<EVT, 3> ResultTypes(Op->values()); | ||||||
6058 | SmallVector<EVT, 3> OrigResultTypes(Op->values()); | ||||||
6059 | bool IsD16 = false; | ||||||
6060 | bool IsG16 = false; | ||||||
6061 | bool IsA16 = false; | ||||||
6062 | SDValue VData; | ||||||
6063 | int NumVDataDwords; | ||||||
6064 | bool AdjustRetType = false; | ||||||
6065 | |||||||
6066 | // Offset of intrinsic arguments | ||||||
6067 | const unsigned ArgOffset = WithChain ? 2 : 1; | ||||||
6068 | |||||||
6069 | unsigned DMask; | ||||||
6070 | unsigned DMaskLanes = 0; | ||||||
6071 | |||||||
6072 | if (BaseOpcode->Atomic) { | ||||||
6073 | VData = Op.getOperand(2); | ||||||
6074 | |||||||
6075 | bool Is64Bit = VData.getValueType() == MVT::i64; | ||||||
6076 | if (BaseOpcode->AtomicX2) { | ||||||
6077 | SDValue VData2 = Op.getOperand(3); | ||||||
6078 | VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL, | ||||||
6079 | {VData, VData2}); | ||||||
6080 | if (Is64Bit) | ||||||
6081 | VData = DAG.getBitcast(MVT::v4i32, VData); | ||||||
6082 | |||||||
6083 | ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32; | ||||||
6084 | DMask = Is64Bit ? 0xf : 0x3; | ||||||
6085 | NumVDataDwords = Is64Bit ? 4 : 2; | ||||||
6086 | } else { | ||||||
6087 | DMask = Is64Bit ? 0x3 : 0x1; | ||||||
6088 | NumVDataDwords = Is64Bit ? 2 : 1; | ||||||
6089 | } | ||||||
6090 | } else { | ||||||
6091 | auto *DMaskConst = | ||||||
6092 | cast<ConstantSDNode>(Op.getOperand(ArgOffset + Intr->DMaskIndex)); | ||||||
6093 | DMask = DMaskConst->getZExtValue(); | ||||||
6094 | DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask); | ||||||
6095 | |||||||
6096 | if (BaseOpcode->Store) { | ||||||
6097 | VData = Op.getOperand(2); | ||||||
6098 | |||||||
6099 | MVT StoreVT = VData.getSimpleValueType(); | ||||||
6100 | if (StoreVT.getScalarType() == MVT::f16) { | ||||||
6101 | if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16) | ||||||
6102 | return Op; // D16 is unsupported for this instruction | ||||||
6103 | |||||||
6104 | IsD16 = true; | ||||||
6105 | VData = handleD16VData(VData, DAG, true); | ||||||
6106 | } | ||||||
6107 | |||||||
6108 | NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32; | ||||||
6109 | } else { | ||||||
6110 | // Work out the num dwords based on the dmask popcount and underlying type | ||||||
6111 | // and whether packing is supported. | ||||||
6112 | MVT LoadVT = ResultTypes[0].getSimpleVT(); | ||||||
6113 | if (LoadVT.getScalarType() == MVT::f16) { | ||||||
6114 | if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16) | ||||||
6115 | return Op; // D16 is unsupported for this instruction | ||||||
6116 | |||||||
6117 | IsD16 = true; | ||||||
6118 | } | ||||||
6119 | |||||||
6120 | // Confirm that the return type is large enough for the dmask specified | ||||||
6121 | if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) || | ||||||
6122 | (!LoadVT.isVector() && DMaskLanes > 1)) | ||||||
6123 | return Op; | ||||||
6124 | |||||||
6125 | // The sq block of gfx8 and gfx9 do not estimate register use correctly | ||||||
6126 | // for d16 image_gather4, image_gather4_l, and image_gather4_lz | ||||||
6127 | // instructions. | ||||||
6128 | if (IsD16 && !Subtarget->hasUnpackedD16VMem() && | ||||||
6129 | !(BaseOpcode->Gather4 && Subtarget->hasImageGather4D16Bug())) | ||||||
6130 | NumVDataDwords = (DMaskLanes + 1) / 2; | ||||||
6131 | else | ||||||
6132 | NumVDataDwords = DMaskLanes; | ||||||
6133 | |||||||
6134 | AdjustRetType = true; | ||||||
6135 | } | ||||||
6136 | } | ||||||
6137 | |||||||
6138 | unsigned VAddrEnd = ArgOffset + Intr->VAddrEnd; | ||||||
6139 | SmallVector<SDValue, 4> VAddrs; | ||||||
6140 | |||||||
6141 | // Optimize _L to _LZ when _L is zero | ||||||
6142 | if (LZMappingInfo) { | ||||||
6143 | if (auto *ConstantLod = dyn_cast<ConstantFPSDNode>( | ||||||
6144 | Op.getOperand(ArgOffset + Intr->LodIndex))) { | ||||||
6145 | if (ConstantLod->isZero() || ConstantLod->isNegative()) { | ||||||
6146 | IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l | ||||||
6147 | VAddrEnd--; // remove 'lod' | ||||||
6148 | } | ||||||
6149 | } | ||||||
6150 | } | ||||||
6151 | |||||||
6152 | // Optimize _mip away, when 'lod' is zero | ||||||
6153 | if (MIPMappingInfo) { | ||||||
6154 | if (auto *ConstantLod = dyn_cast<ConstantSDNode>( | ||||||
6155 | Op.getOperand(ArgOffset + Intr->MipIndex))) { | ||||||
6156 | if (ConstantLod->isNullValue()) { | ||||||
6157 | IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip | ||||||
6158 | VAddrEnd--; // remove 'mip' | ||||||
6159 | } | ||||||
6160 | } | ||||||
6161 | } | ||||||
6162 | |||||||
6163 | // Push back extra arguments. | ||||||
6164 | for (unsigned I = Intr->VAddrStart; I < Intr->GradientStart; I++) | ||||||
6165 | VAddrs.push_back(Op.getOperand(ArgOffset + I)); | ||||||
6166 | |||||||
6167 | // Check for 16 bit addresses or derivatives and pack if true. | ||||||
6168 | MVT VAddrVT = | ||||||
6169 | Op.getOperand(ArgOffset + Intr->GradientStart).getSimpleValueType(); | ||||||
6170 | MVT VAddrScalarVT = VAddrVT.getScalarType(); | ||||||
6171 | MVT GradPackVectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16; | ||||||
6172 | IsG16 = VAddrScalarVT == MVT::f16 || VAddrScalarVT == MVT::i16; | ||||||
6173 | |||||||
6174 | VAddrVT = Op.getOperand(ArgOffset + Intr->CoordStart).getSimpleValueType(); | ||||||
6175 | VAddrScalarVT = VAddrVT.getScalarType(); | ||||||
6176 | MVT AddrPackVectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16; | ||||||
6177 | IsA16 = VAddrScalarVT == MVT::f16 || VAddrScalarVT == MVT::i16; | ||||||
6178 | |||||||
6179 | if (BaseOpcode->Gradients && !ST->hasG16() && (IsA16 != IsG16)) { | ||||||
6180 | // 16 bit gradients are supported, but are tied to the A16 control | ||||||
6181 | // so both gradients and addresses must be 16 bit | ||||||
6182 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("si-lower")) { dbgs() << "Failed to lower image intrinsic: 16 bit addresses " "require 16 bit args for both gradients and addresses"; } } while (false) | ||||||
6183 | dbgs() << "Failed to lower image intrinsic: 16 bit addresses "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("si-lower")) { dbgs() << "Failed to lower image intrinsic: 16 bit addresses " "require 16 bit args for both gradients and addresses"; } } while (false) | ||||||
6184 | "require 16 bit args for both gradients and addresses")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("si-lower")) { dbgs() << "Failed to lower image intrinsic: 16 bit addresses " "require 16 bit args for both gradients and addresses"; } } while (false); | ||||||
6185 | return Op; | ||||||
6186 | } | ||||||
6187 | |||||||
6188 | if (IsA16) { | ||||||
6189 | if (!ST->hasA16()) { | ||||||
6190 | LLVM_DEBUG(dbgs() << "Failed to lower image intrinsic: Target does not "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("si-lower")) { dbgs() << "Failed to lower image intrinsic: Target does not " "support 16 bit addresses\n"; } } while (false) | ||||||
6191 | "support 16 bit addresses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("si-lower")) { dbgs() << "Failed to lower image intrinsic: Target does not " "support 16 bit addresses\n"; } } while (false); | ||||||
6192 | return Op; | ||||||
6193 | } | ||||||
6194 | } | ||||||
6195 | |||||||
6196 | // We've dealt with incorrect input so we know that if IsA16, IsG16 | ||||||
6197 | // are set then we have to compress/pack operands (either address, | ||||||
6198 | // gradient or both) | ||||||
6199 | // In the case where a16 and gradients are tied (no G16 support) then we | ||||||
6200 | // have already verified that both IsA16 and IsG16 are true | ||||||
6201 | if (BaseOpcode->Gradients && IsG16 && ST->hasG16()) { | ||||||
6202 | // Activate g16 | ||||||
6203 | const AMDGPU::MIMGG16MappingInfo *G16MappingInfo = | ||||||
6204 | AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode); | ||||||
6205 | IntrOpcode = G16MappingInfo->G16; // set new opcode to variant with _g16 | ||||||
6206 | } | ||||||
6207 | |||||||
6208 | // Add gradients (packed or unpacked) | ||||||
6209 | if (IsG16) { | ||||||
6210 | // Pack the gradients | ||||||
6211 | // const int PackEndIdx = IsA16 ? VAddrEnd : (ArgOffset + Intr->CoordStart); | ||||||
6212 | packImage16bitOpsToDwords(DAG, Op, GradPackVectorVT, VAddrs, | ||||||
6213 | ArgOffset + Intr->GradientStart, | ||||||
6214 | ArgOffset + Intr->CoordStart, Intr->NumGradients); | ||||||
6215 | } else { | ||||||
6216 | for (unsigned I = ArgOffset + Intr->GradientStart; | ||||||
6217 | I < ArgOffset + Intr->CoordStart; I++) | ||||||
6218 | VAddrs.push_back(Op.getOperand(I)); | ||||||
6219 | } | ||||||
6220 | |||||||
6221 | // Add addresses (packed or unpacked) | ||||||
6222 | if (IsA16) { | ||||||
6223 | packImage16bitOpsToDwords(DAG, Op, AddrPackVectorVT, VAddrs, | ||||||
6224 | ArgOffset + Intr->CoordStart, VAddrEnd, | ||||||
6225 | 0 /* No gradients */); | ||||||
6226 | } else { | ||||||
6227 | // Add uncompressed address | ||||||
6228 | for (unsigned I = ArgOffset + Intr->CoordStart; I < VAddrEnd; I++) | ||||||
6229 | VAddrs.push_back(Op.getOperand(I)); | ||||||
6230 | } | ||||||
6231 | |||||||
6232 | // If the register allocator cannot place the address registers contiguously | ||||||
6233 | // without introducing moves, then using the non-sequential address encoding | ||||||
6234 | // is always preferable, since it saves VALU instructions and is usually a | ||||||
6235 | // wash in terms of code size or even better. | ||||||
6236 | // | ||||||
6237 | // However, we currently have no way of hinting to the register allocator that | ||||||
6238 | // MIMG addresses should be placed contiguously when it is possible to do so, | ||||||
6239 | // so force non-NSA for the common 2-address case as a heuristic. | ||||||
6240 | // | ||||||
6241 | // SIShrinkInstructions will convert NSA encodings to non-NSA after register | ||||||
6242 | // allocation when possible. | ||||||
6243 | bool UseNSA = ST->hasFeature(AMDGPU::FeatureNSAEncoding) && | ||||||
6244 | VAddrs.size() >= 3 && | ||||||
6245 | VAddrs.size() <= (unsigned)ST->getNSAMaxSize(); | ||||||
6246 | SDValue VAddr; | ||||||
6247 | if (!UseNSA) | ||||||
6248 | VAddr = getBuildDwordsVector(DAG, DL, VAddrs); | ||||||
6249 | |||||||
6250 | SDValue True = DAG.getTargetConstant(1, DL, MVT::i1); | ||||||
6251 | SDValue False = DAG.getTargetConstant(0, DL, MVT::i1); | ||||||
6252 | SDValue Unorm; | ||||||
6253 | if (!BaseOpcode->Sampler) { | ||||||
6254 | Unorm = True; | ||||||
6255 | } else { | ||||||
6256 | auto UnormConst = | ||||||
6257 | cast<ConstantSDNode>(Op.getOperand(ArgOffset + Intr->UnormIndex)); | ||||||
6258 | |||||||
6259 | Unorm = UnormConst->getZExtValue() ? True : False; | ||||||
6260 | } | ||||||
6261 | |||||||
6262 | SDValue TFE; | ||||||
6263 | SDValue LWE; | ||||||
6264 | SDValue TexFail = Op.getOperand(ArgOffset + Intr->TexFailCtrlIndex); | ||||||
6265 | bool IsTexFail = false; | ||||||
6266 | if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail)) | ||||||
6267 | return Op; | ||||||
6268 | |||||||
6269 | if (IsTexFail) { | ||||||
6270 | if (!DMaskLanes) { | ||||||
6271 | // Expecting to get an error flag since TFC is on - and dmask is 0 | ||||||
6272 | // Force dmask to be at least 1 otherwise the instruction will fail | ||||||
6273 | DMask = 0x1; | ||||||
6274 | DMaskLanes = 1; | ||||||
6275 | NumVDataDwords = 1; | ||||||
6276 | } | ||||||
6277 | NumVDataDwords += 1; | ||||||
6278 | AdjustRetType = true; | ||||||
6279 | } | ||||||
6280 | |||||||
6281 | // Has something earlier tagged that the return type needs adjusting | ||||||
6282 | // This happens if the instruction is a load or has set TexFailCtrl flags | ||||||
6283 | if (AdjustRetType) { | ||||||
6284 | // NumVDataDwords reflects the true number of dwords required in the return type | ||||||
6285 | if (DMaskLanes == 0 && !BaseOpcode->Store) { | ||||||
6286 | // This is a no-op load. This can be eliminated | ||||||
6287 | SDValue Undef = DAG.getUNDEF(Op.getValueType()); | ||||||
6288 | if (isa<MemSDNode>(Op)) | ||||||
6289 | return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL); | ||||||
6290 | return Undef; | ||||||
6291 | } | ||||||
6292 | |||||||
6293 | EVT NewVT = NumVDataDwords > 1 ? | ||||||
6294 | EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumVDataDwords) | ||||||
6295 | : MVT::i32; | ||||||
6296 | |||||||
6297 | ResultTypes[0] = NewVT; | ||||||
6298 | if (ResultTypes.size() == 3) { | ||||||
6299 | // Original result was aggregate type used for TexFailCtrl results | ||||||
6300 | // The actual instruction returns as a vector type which has now been | ||||||
6301 | // created. Remove the aggregate result. | ||||||
6302 | ResultTypes.erase(&ResultTypes[1]); | ||||||
6303 | } | ||||||
6304 | } | ||||||
6305 | |||||||
6306 | unsigned CPol = cast<ConstantSDNode>( | ||||||
6307 | Op.getOperand(ArgOffset + Intr->CachePolicyIndex))->getZExtValue(); | ||||||
6308 | if (BaseOpcode->Atomic) | ||||||
6309 | CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization | ||||||
6310 | if (CPol & ~AMDGPU::CPol::ALL) | ||||||
6311 | return Op; | ||||||
6312 | |||||||
6313 | SmallVector<SDValue, 26> Ops; | ||||||
6314 | if (BaseOpcode->Store || BaseOpcode->Atomic) | ||||||
6315 | Ops.push_back(VData); // vdata | ||||||
6316 | if (UseNSA) | ||||||
6317 | append_range(Ops, VAddrs); | ||||||
6318 | else | ||||||
6319 | Ops.push_back(VAddr); | ||||||
6320 | Ops.push_back(Op.getOperand(ArgOffset + Intr->RsrcIndex)); | ||||||
6321 | if (BaseOpcode->Sampler) | ||||||
6322 | Ops.push_back(Op.getOperand(ArgOffset + Intr->SampIndex)); | ||||||
6323 | Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32)); | ||||||
6324 | if (IsGFX10Plus) | ||||||
6325 | Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32)); | ||||||
6326 | Ops.push_back(Unorm); | ||||||
6327 | Ops.push_back(DAG.getTargetConstant(CPol, DL, MVT::i32)); | ||||||
6328 | Ops.push_back(IsA16 && // r128, a16 for gfx9 | ||||||
6329 | ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False); | ||||||
6330 | if (IsGFX10Plus) | ||||||
6331 | Ops.push_back(IsA16 ? True : False); | ||||||
6332 | if (!Subtarget->hasGFX90AInsts()) { | ||||||
6333 | Ops.push_back(TFE); //tfe | ||||||
6334 | } else if (cast<ConstantSDNode>(TFE)->getZExtValue()) { | ||||||
6335 | report_fatal_error("TFE is not supported on this GPU"); | ||||||
6336 | } | ||||||
6337 | Ops.push_back(LWE); // lwe | ||||||
6338 | if (!IsGFX10Plus) | ||||||
6339 | Ops.push_back(DimInfo->DA ? True : False); | ||||||
6340 | if (BaseOpcode->HasD16) | ||||||
6341 | Ops.push_back(IsD16 ? True : False); | ||||||
6342 | if (isa<MemSDNode>(Op)) | ||||||
6343 | Ops.push_back(Op.getOperand(0)); // chain | ||||||
6344 | |||||||
6345 | int NumVAddrDwords = | ||||||
6346 | UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32; | ||||||
6347 | int Opcode = -1; | ||||||
6348 | |||||||
6349 | if (IsGFX10Plus) { | ||||||
6350 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, | ||||||
6351 | UseNSA ? AMDGPU::MIMGEncGfx10NSA | ||||||
6352 | : AMDGPU::MIMGEncGfx10Default, | ||||||
6353 | NumVDataDwords, NumVAddrDwords); | ||||||
6354 | } else { | ||||||
6355 | if (Subtarget->hasGFX90AInsts()) { | ||||||
6356 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a, | ||||||
6357 | NumVDataDwords, NumVAddrDwords); | ||||||
6358 | if (Opcode == -1) | ||||||
6359 | report_fatal_error( | ||||||
6360 | "requested image instruction is not supported on this GPU"); | ||||||
6361 | } | ||||||
6362 | if (Opcode == -1 && | ||||||
6363 | Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) | ||||||
6364 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8, | ||||||
6365 | NumVDataDwords, NumVAddrDwords); | ||||||
6366 | if (Opcode == -1) | ||||||
6367 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6, | ||||||
6368 | NumVDataDwords, NumVAddrDwords); | ||||||
6369 | } | ||||||
6370 | assert(Opcode != -1)(static_cast <bool> (Opcode != -1) ? void (0) : __assert_fail ("Opcode != -1", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 6370, __extension__ __PRETTY_FUNCTION__)); | ||||||
6371 | |||||||
6372 | MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops); | ||||||
6373 | if (auto MemOp = dyn_cast<MemSDNode>(Op)) { | ||||||
6374 | MachineMemOperand *MemRef = MemOp->getMemOperand(); | ||||||
6375 | DAG.setNodeMemRefs(NewNode, {MemRef}); | ||||||
6376 | } | ||||||
6377 | |||||||
6378 | if (BaseOpcode->AtomicX2) { | ||||||
6379 | SmallVector<SDValue, 1> Elt; | ||||||
6380 | DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1); | ||||||
6381 | return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL); | ||||||
6382 | } | ||||||
6383 | if (BaseOpcode->Store) | ||||||
6384 | return SDValue(NewNode, 0); | ||||||
6385 | return constructRetValue(DAG, NewNode, | ||||||
6386 | OrigResultTypes, IsTexFail, | ||||||
6387 | Subtarget->hasUnpackedD16VMem(), IsD16, | ||||||
6388 | DMaskLanes, NumVDataDwords, DL); | ||||||
6389 | } | ||||||
6390 | |||||||
6391 | SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc, | ||||||
6392 | SDValue Offset, SDValue CachePolicy, | ||||||
6393 | SelectionDAG &DAG) const { | ||||||
6394 | MachineFunction &MF = DAG.getMachineFunction(); | ||||||
6395 | |||||||
6396 | const DataLayout &DataLayout = DAG.getDataLayout(); | ||||||
6397 | Align Alignment = | ||||||
6398 | DataLayout.getABITypeAlign(VT.getTypeForEVT(*DAG.getContext())); | ||||||
6399 | |||||||
6400 | MachineMemOperand *MMO = MF.getMachineMemOperand( | ||||||
6401 | MachinePointerInfo(), | ||||||
6402 | MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | | ||||||
6403 | MachineMemOperand::MOInvariant, | ||||||
6404 | VT.getStoreSize(), Alignment); | ||||||
6405 | |||||||
6406 | if (!Offset->isDivergent()) { | ||||||
6407 | SDValue Ops[] = { | ||||||
6408 | Rsrc, | ||||||
6409 | Offset, // Offset | ||||||
6410 | CachePolicy | ||||||
6411 | }; | ||||||
6412 | |||||||
6413 | // Widen vec3 load to vec4. | ||||||
6414 | if (VT.isVector() && VT.getVectorNumElements() == 3) { | ||||||
6415 | EVT WidenedVT = | ||||||
6416 | EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4); | ||||||
6417 | auto WidenedOp = DAG.getMemIntrinsicNode( | ||||||
6418 | AMDGPUISD::SBUFFER_LOAD, DL, DAG.getVTList(WidenedVT), Ops, WidenedVT, | ||||||
6419 | MF.getMachineMemOperand(MMO, 0, WidenedVT.getStoreSize())); | ||||||
6420 | auto Subvector = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, WidenedOp, | ||||||
6421 | DAG.getVectorIdxConstant(0, DL)); | ||||||
6422 | return Subvector; | ||||||
6423 | } | ||||||
6424 | |||||||
6425 | return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL, | ||||||
6426 | DAG.getVTList(VT), Ops, VT, MMO); | ||||||
6427 | } | ||||||
6428 | |||||||
6429 | // We have a divergent offset. Emit a MUBUF buffer load instead. We can | ||||||
6430 | // assume that the buffer is unswizzled. | ||||||
6431 | SmallVector<SDValue, 4> Loads; | ||||||
6432 | unsigned NumLoads = 1; | ||||||
6433 | MVT LoadVT = VT.getSimpleVT(); | ||||||
6434 | unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1; | ||||||
6435 | assert((LoadVT.getScalarType() == MVT::i32 ||(static_cast <bool> ((LoadVT.getScalarType() == MVT::i32 || LoadVT.getScalarType() == MVT::f32)) ? void (0) : __assert_fail ("(LoadVT.getScalarType() == MVT::i32 || LoadVT.getScalarType() == MVT::f32)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 6436, __extension__ __PRETTY_FUNCTION__)) | ||||||
6436 | LoadVT.getScalarType() == MVT::f32))(static_cast <bool> ((LoadVT.getScalarType() == MVT::i32 || LoadVT.getScalarType() == MVT::f32)) ? void (0) : __assert_fail ("(LoadVT.getScalarType() == MVT::i32 || LoadVT.getScalarType() == MVT::f32)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 6436, __extension__ __PRETTY_FUNCTION__)); | ||||||
6437 | |||||||
6438 | if (NumElts == 8 || NumElts == 16) { | ||||||
6439 | NumLoads = NumElts / 4; | ||||||
6440 | LoadVT = MVT::getVectorVT(LoadVT.getScalarType(), 4); | ||||||
6441 | } | ||||||
6442 | |||||||
6443 | SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue}); | ||||||
6444 | SDValue Ops[] = { | ||||||
6445 | DAG.getEntryNode(), // Chain | ||||||
6446 | Rsrc, // rsrc | ||||||
6447 | DAG.getConstant(0, DL, MVT::i32), // vindex | ||||||
6448 | {}, // voffset | ||||||
6449 | {}, // soffset | ||||||
6450 | {}, // offset | ||||||
6451 | CachePolicy, // cachepolicy | ||||||
6452 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen | ||||||
6453 | }; | ||||||
6454 | |||||||
6455 | // Use the alignment to ensure that the required offsets will fit into the | ||||||
6456 | // immediate offsets. | ||||||
6457 | setBufferOffsets(Offset, DAG, &Ops[3], | ||||||
6458 | NumLoads > 1 ? Align(16 * NumLoads) : Align(4)); | ||||||
6459 | |||||||
6460 | uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue(); | ||||||
6461 | for (unsigned i = 0; i < NumLoads; ++i) { | ||||||
6462 | Ops[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32); | ||||||
6463 | Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, Ops, | ||||||
6464 | LoadVT, MMO, DAG)); | ||||||
6465 | } | ||||||
6466 | |||||||
6467 | if (NumElts == 8 || NumElts == 16) | ||||||
6468 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads); | ||||||
6469 | |||||||
6470 | return Loads[0]; | ||||||
6471 | } | ||||||
6472 | |||||||
6473 | SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, | ||||||
6474 | SelectionDAG &DAG) const { | ||||||
6475 | MachineFunction &MF = DAG.getMachineFunction(); | ||||||
6476 | auto MFI = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
6477 | |||||||
6478 | EVT VT = Op.getValueType(); | ||||||
6479 | SDLoc DL(Op); | ||||||
6480 | unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | ||||||
6481 | |||||||
6482 | // TODO: Should this propagate fast-math-flags? | ||||||
6483 | |||||||
6484 | switch (IntrinsicID) { | ||||||
6485 | case Intrinsic::amdgcn_implicit_buffer_ptr: { | ||||||
6486 | if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction())) | ||||||
6487 | return emitNonHSAIntrinsicError(DAG, DL, VT); | ||||||
6488 | return getPreloadedValue(DAG, *MFI, VT, | ||||||
6489 | AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR); | ||||||
6490 | } | ||||||
6491 | case Intrinsic::amdgcn_dispatch_ptr: | ||||||
6492 | case Intrinsic::amdgcn_queue_ptr: { | ||||||
6493 | if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) { | ||||||
6494 | DiagnosticInfoUnsupported BadIntrin( | ||||||
6495 | MF.getFunction(), "unsupported hsa intrinsic without hsa target", | ||||||
6496 | DL.getDebugLoc()); | ||||||
6497 | DAG.getContext()->diagnose(BadIntrin); | ||||||
6498 | return DAG.getUNDEF(VT); | ||||||
6499 | } | ||||||
6500 | |||||||
6501 | auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? | ||||||
6502 | AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR; | ||||||
6503 | return getPreloadedValue(DAG, *MFI, VT, RegID); | ||||||
6504 | } | ||||||
6505 | case Intrinsic::amdgcn_implicitarg_ptr: { | ||||||
6506 | if (MFI->isEntryFunction()) | ||||||
6507 | return getImplicitArgPtr(DAG, DL); | ||||||
6508 | return getPreloadedValue(DAG, *MFI, VT, | ||||||
6509 | AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); | ||||||
6510 | } | ||||||
6511 | case Intrinsic::amdgcn_kernarg_segment_ptr: { | ||||||
6512 | if (!AMDGPU::isKernel(MF.getFunction().getCallingConv())) { | ||||||
6513 | // This only makes sense to call in a kernel, so just lower to null. | ||||||
6514 | return DAG.getConstant(0, DL, VT); | ||||||
6515 | } | ||||||
6516 | |||||||
6517 | return getPreloadedValue(DAG, *MFI, VT, | ||||||
6518 | AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); | ||||||
6519 | } | ||||||
6520 | case Intrinsic::amdgcn_dispatch_id: { | ||||||
6521 | return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID); | ||||||
6522 | } | ||||||
6523 | case Intrinsic::amdgcn_rcp: | ||||||
6524 | return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); | ||||||
6525 | case Intrinsic::amdgcn_rsq: | ||||||
6526 | return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); | ||||||
6527 | case Intrinsic::amdgcn_rsq_legacy: | ||||||
6528 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) | ||||||
6529 | return emitRemovedIntrinsicError(DAG, DL, VT); | ||||||
6530 | return SDValue(); | ||||||
6531 | case Intrinsic::amdgcn_rcp_legacy: | ||||||
6532 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) | ||||||
6533 | return emitRemovedIntrinsicError(DAG, DL, VT); | ||||||
6534 | return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); | ||||||
6535 | case Intrinsic::amdgcn_rsq_clamp: { | ||||||
6536 | if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) | ||||||
6537 | return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); | ||||||
6538 | |||||||
6539 | Type *Type = VT.getTypeForEVT(*DAG.getContext()); | ||||||
6540 | APFloat Max = APFloat::getLargest(Type->getFltSemantics()); | ||||||
6541 | APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); | ||||||
6542 | |||||||
6543 | SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); | ||||||
6544 | SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, | ||||||
6545 | DAG.getConstantFP(Max, DL, VT)); | ||||||
6546 | return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, | ||||||
6547 | DAG.getConstantFP(Min, DL, VT)); | ||||||
6548 | } | ||||||
6549 | case Intrinsic::r600_read_ngroups_x: | ||||||
6550 | if (Subtarget->isAmdHsaOS()) | ||||||
6551 | return emitNonHSAIntrinsicError(DAG, DL, VT); | ||||||
6552 | |||||||
6553 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), | ||||||
6554 | SI::KernelInputOffsets::NGROUPS_X, Align(4), | ||||||
6555 | false); | ||||||
6556 | case Intrinsic::r600_read_ngroups_y: | ||||||
6557 | if (Subtarget->isAmdHsaOS()) | ||||||
6558 | return emitNonHSAIntrinsicError(DAG, DL, VT); | ||||||
6559 | |||||||
6560 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), | ||||||
6561 | SI::KernelInputOffsets::NGROUPS_Y, Align(4), | ||||||
6562 | false); | ||||||
6563 | case Intrinsic::r600_read_ngroups_z: | ||||||
6564 | if (Subtarget->isAmdHsaOS()) | ||||||
6565 | return emitNonHSAIntrinsicError(DAG, DL, VT); | ||||||
6566 | |||||||
6567 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), | ||||||
6568 | SI::KernelInputOffsets::NGROUPS_Z, Align(4), | ||||||
6569 | false); | ||||||
6570 | case Intrinsic::r600_read_global_size_x: | ||||||
6571 | if (Subtarget->isAmdHsaOS()) | ||||||
6572 | return emitNonHSAIntrinsicError(DAG, DL, VT); | ||||||
6573 | |||||||
6574 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), | ||||||
6575 | SI::KernelInputOffsets::GLOBAL_SIZE_X, | ||||||
6576 | Align(4), false); | ||||||
6577 | case Intrinsic::r600_read_global_size_y: | ||||||
6578 | if (Subtarget->isAmdHsaOS()) | ||||||
6579 | return emitNonHSAIntrinsicError(DAG, DL, VT); | ||||||
6580 | |||||||
6581 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), | ||||||
6582 | SI::KernelInputOffsets::GLOBAL_SIZE_Y, | ||||||
6583 | Align(4), false); | ||||||
6584 | case Intrinsic::r600_read_global_size_z: | ||||||
6585 | if (Subtarget->isAmdHsaOS()) | ||||||
6586 | return emitNonHSAIntrinsicError(DAG, DL, VT); | ||||||
6587 | |||||||
6588 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), | ||||||
6589 | SI::KernelInputOffsets::GLOBAL_SIZE_Z, | ||||||
6590 | Align(4), false); | ||||||
6591 | case Intrinsic::r600_read_local_size_x: | ||||||
6592 | if (Subtarget->isAmdHsaOS()) | ||||||
6593 | return emitNonHSAIntrinsicError(DAG, DL, VT); | ||||||
6594 | |||||||
6595 | return lowerImplicitZextParam(DAG, Op, MVT::i16, | ||||||
6596 | SI::KernelInputOffsets::LOCAL_SIZE_X); | ||||||
6597 | case Intrinsic::r600_read_local_size_y: | ||||||
6598 | if (Subtarget->isAmdHsaOS()) | ||||||
6599 | return emitNonHSAIntrinsicError(DAG, DL, VT); | ||||||
6600 | |||||||
6601 | return lowerImplicitZextParam(DAG, Op, MVT::i16, | ||||||
6602 | SI::KernelInputOffsets::LOCAL_SIZE_Y); | ||||||
6603 | case Intrinsic::r600_read_local_size_z: | ||||||
6604 | if (Subtarget->isAmdHsaOS()) | ||||||
6605 | return emitNonHSAIntrinsicError(DAG, DL, VT); | ||||||
6606 | |||||||
6607 | return lowerImplicitZextParam(DAG, Op, MVT::i16, | ||||||
6608 | SI::KernelInputOffsets::LOCAL_SIZE_Z); | ||||||
6609 | case Intrinsic::amdgcn_workgroup_id_x: | ||||||
6610 | return getPreloadedValue(DAG, *MFI, VT, | ||||||
6611 | AMDGPUFunctionArgInfo::WORKGROUP_ID_X); | ||||||
6612 | case Intrinsic::amdgcn_workgroup_id_y: | ||||||
6613 | return getPreloadedValue(DAG, *MFI, VT, | ||||||
6614 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); | ||||||
6615 | case Intrinsic::amdgcn_workgroup_id_z: | ||||||
6616 | return getPreloadedValue(DAG, *MFI, VT, | ||||||
6617 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); | ||||||
6618 | case Intrinsic::amdgcn_workitem_id_x: | ||||||
6619 | return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, | ||||||
6620 | SDLoc(DAG.getEntryNode()), | ||||||
6621 | MFI->getArgInfo().WorkItemIDX); | ||||||
6622 | case Intrinsic::amdgcn_workitem_id_y: | ||||||
6623 | return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, | ||||||
6624 | SDLoc(DAG.getEntryNode()), | ||||||
6625 | MFI->getArgInfo().WorkItemIDY); | ||||||
6626 | case Intrinsic::amdgcn_workitem_id_z: | ||||||
6627 | return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, | ||||||
6628 | SDLoc(DAG.getEntryNode()), | ||||||
6629 | MFI->getArgInfo().WorkItemIDZ); | ||||||
6630 | case Intrinsic::amdgcn_wavefrontsize: | ||||||
6631 | return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(), | ||||||
6632 | SDLoc(Op), MVT::i32); | ||||||
6633 | case Intrinsic::amdgcn_s_buffer_load: { | ||||||
6634 | unsigned CPol = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); | ||||||
6635 | if (CPol & ~AMDGPU::CPol::ALL) | ||||||
6636 | return Op; | ||||||
6637 | return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), | ||||||
6638 | DAG); | ||||||
6639 | } | ||||||
6640 | case Intrinsic::amdgcn_fdiv_fast: | ||||||
6641 | return lowerFDIV_FAST(Op, DAG); | ||||||
6642 | case Intrinsic::amdgcn_sin: | ||||||
6643 | return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); | ||||||
6644 | |||||||
6645 | case Intrinsic::amdgcn_cos: | ||||||
6646 | return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); | ||||||
6647 | |||||||
6648 | case Intrinsic::amdgcn_mul_u24: | ||||||
6649 | return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT, Op.getOperand(1), Op.getOperand(2)); | ||||||
6650 | case Intrinsic::amdgcn_mul_i24: | ||||||
6651 | return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT, Op.getOperand(1), Op.getOperand(2)); | ||||||
6652 | |||||||
6653 | case Intrinsic::amdgcn_log_clamp: { | ||||||
6654 | if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) | ||||||
6655 | return SDValue(); | ||||||
6656 | |||||||
6657 | return emitRemovedIntrinsicError(DAG, DL, VT); | ||||||
6658 | } | ||||||
6659 | case Intrinsic::amdgcn_ldexp: | ||||||
6660 | return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, | ||||||
6661 | Op.getOperand(1), Op.getOperand(2)); | ||||||
6662 | |||||||
6663 | case Intrinsic::amdgcn_fract: | ||||||
6664 | return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); | ||||||
6665 | |||||||
6666 | case Intrinsic::amdgcn_class: | ||||||
6667 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, | ||||||
6668 | Op.getOperand(1), Op.getOperand(2)); | ||||||
6669 | case Intrinsic::amdgcn_div_fmas: | ||||||
6670 | return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, | ||||||
6671 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), | ||||||
6672 | Op.getOperand(4)); | ||||||
6673 | |||||||
6674 | case Intrinsic::amdgcn_div_fixup: | ||||||
6675 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, | ||||||
6676 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); | ||||||
6677 | |||||||
6678 | case Intrinsic::amdgcn_div_scale: { | ||||||
6679 | const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3)); | ||||||
6680 | |||||||
6681 | // Translate to the operands expected by the machine instruction. The | ||||||
6682 | // first parameter must be the same as the first instruction. | ||||||
6683 | SDValue Numerator = Op.getOperand(1); | ||||||
6684 | SDValue Denominator = Op.getOperand(2); | ||||||
6685 | |||||||
6686 | // Note this order is opposite of the machine instruction's operations, | ||||||
6687 | // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The | ||||||
6688 | // intrinsic has the numerator as the first operand to match a normal | ||||||
6689 | // division operation. | ||||||
6690 | |||||||
6691 | SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; | ||||||
6692 | |||||||
6693 | return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, | ||||||
6694 | Denominator, Numerator); | ||||||
6695 | } | ||||||
6696 | case Intrinsic::amdgcn_icmp: { | ||||||
6697 | // There is a Pat that handles this variant, so return it as-is. | ||||||
6698 | if (Op.getOperand(1).getValueType() == MVT::i1 && | ||||||
6699 | Op.getConstantOperandVal(2) == 0 && | ||||||
6700 | Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE) | ||||||
6701 | return Op; | ||||||
6702 | return lowerICMPIntrinsic(*this, Op.getNode(), DAG); | ||||||
6703 | } | ||||||
6704 | case Intrinsic::amdgcn_fcmp: { | ||||||
6705 | return lowerFCMPIntrinsic(*this, Op.getNode(), DAG); | ||||||
6706 | } | ||||||
6707 | case Intrinsic::amdgcn_ballot: | ||||||
6708 | return lowerBALLOTIntrinsic(*this, Op.getNode(), DAG); | ||||||
6709 | case Intrinsic::amdgcn_fmed3: | ||||||
6710 | return DAG.getNode(AMDGPUISD::FMED3, DL, VT, | ||||||
6711 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); | ||||||
6712 | case Intrinsic::amdgcn_fdot2: | ||||||
6713 | return DAG.getNode(AMDGPUISD::FDOT2, DL, VT, | ||||||
6714 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), | ||||||
6715 | Op.getOperand(4)); | ||||||
6716 | case Intrinsic::amdgcn_fmul_legacy: | ||||||
6717 | return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, | ||||||
6718 | Op.getOperand(1), Op.getOperand(2)); | ||||||
6719 | case Intrinsic::amdgcn_sffbh: | ||||||
6720 | return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); | ||||||
6721 | case Intrinsic::amdgcn_sbfe: | ||||||
6722 | return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT, | ||||||
6723 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); | ||||||
6724 | case Intrinsic::amdgcn_ubfe: | ||||||
6725 | return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT, | ||||||
6726 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); | ||||||
6727 | case Intrinsic::amdgcn_cvt_pkrtz: | ||||||
6728 | case Intrinsic::amdgcn_cvt_pknorm_i16: | ||||||
6729 | case Intrinsic::amdgcn_cvt_pknorm_u16: | ||||||
6730 | case Intrinsic::amdgcn_cvt_pk_i16: | ||||||
6731 | case Intrinsic::amdgcn_cvt_pk_u16: { | ||||||
6732 | // FIXME: Stop adding cast if v2f16/v2i16 are legal. | ||||||
6733 | EVT VT = Op.getValueType(); | ||||||
6734 | unsigned Opcode; | ||||||
6735 | |||||||
6736 | if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz) | ||||||
6737 | Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32; | ||||||
6738 | else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16) | ||||||
6739 | Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; | ||||||
6740 | else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16) | ||||||
6741 | Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; | ||||||
6742 | else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16) | ||||||
6743 | Opcode = AMDGPUISD::CVT_PK_I16_I32; | ||||||
6744 | else | ||||||
6745 | Opcode = AMDGPUISD::CVT_PK_U16_U32; | ||||||
6746 | |||||||
6747 | if (isTypeLegal(VT)) | ||||||
6748 | return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2)); | ||||||
6749 | |||||||
6750 | SDValue Node = DAG.getNode(Opcode, DL, MVT::i32, | ||||||
6751 | Op.getOperand(1), Op.getOperand(2)); | ||||||
6752 | return DAG.getNode(ISD::BITCAST, DL, VT, Node); | ||||||
6753 | } | ||||||
6754 | case Intrinsic::amdgcn_fmad_ftz: | ||||||
6755 | return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1), | ||||||
6756 | Op.getOperand(2), Op.getOperand(3)); | ||||||
6757 | |||||||
6758 | case Intrinsic::amdgcn_if_break: | ||||||
6759 | return SDValue(DAG.getMachineNode(AMDGPU::SI_IF_BREAK, DL, VT, | ||||||
6760 | Op->getOperand(1), Op->getOperand(2)), 0); | ||||||
6761 | |||||||
6762 | case Intrinsic::amdgcn_groupstaticsize: { | ||||||
6763 | Triple::OSType OS = getTargetMachine().getTargetTriple().getOS(); | ||||||
6764 | if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) | ||||||
6765 | return Op; | ||||||
6766 | |||||||
6767 | const Module *M = MF.getFunction().getParent(); | ||||||
6768 | const GlobalValue *GV = | ||||||
6769 | M->getNamedValue(Intrinsic::getName(Intrinsic::amdgcn_groupstaticsize)); | ||||||
6770 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0, | ||||||
6771 | SIInstrInfo::MO_ABS32_LO); | ||||||
6772 | return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0}; | ||||||
6773 | } | ||||||
6774 | case Intrinsic::amdgcn_is_shared: | ||||||
6775 | case Intrinsic::amdgcn_is_private: { | ||||||
6776 | SDLoc SL(Op); | ||||||
6777 | unsigned AS = (IntrinsicID == Intrinsic::amdgcn_is_shared) ? | ||||||
6778 | AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS; | ||||||
6779 | SDValue Aperture = getSegmentAperture(AS, SL, DAG); | ||||||
6780 | SDValue SrcVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, | ||||||
6781 | Op.getOperand(1)); | ||||||
6782 | |||||||
6783 | SDValue SrcHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, SrcVec, | ||||||
6784 | DAG.getConstant(1, SL, MVT::i32)); | ||||||
6785 | return DAG.getSetCC(SL, MVT::i1, SrcHi, Aperture, ISD::SETEQ); | ||||||
6786 | } | ||||||
6787 | case Intrinsic::amdgcn_alignbit: | ||||||
6788 | return DAG.getNode(ISD::FSHR, DL, VT, | ||||||
6789 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); | ||||||
6790 | case Intrinsic::amdgcn_perm: | ||||||
6791 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, Op.getOperand(1), | ||||||
6792 | Op.getOperand(2), Op.getOperand(3)); | ||||||
6793 | case Intrinsic::amdgcn_reloc_constant: { | ||||||
6794 | Module *M = const_cast<Module *>(MF.getFunction().getParent()); | ||||||
6795 | const MDNode *Metadata = cast<MDNodeSDNode>(Op.getOperand(1))->getMD(); | ||||||
6796 | auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString(); | ||||||
6797 | auto RelocSymbol = cast<GlobalVariable>( | ||||||
6798 | M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext()))); | ||||||
6799 | SDValue GA = DAG.getTargetGlobalAddress(RelocSymbol, DL, MVT::i32, 0, | ||||||
6800 | SIInstrInfo::MO_ABS32_LO); | ||||||
6801 | return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0}; | ||||||
6802 | } | ||||||
6803 | default: | ||||||
6804 | if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = | ||||||
6805 | AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) | ||||||
6806 | return lowerImage(Op, ImageDimIntr, DAG, false); | ||||||
6807 | |||||||
6808 | return Op; | ||||||
6809 | } | ||||||
6810 | } | ||||||
6811 | |||||||
6812 | /// Update \p MMO based on the offset inputs to an intrinsic. | ||||||
6813 | static void updateBufferMMO(MachineMemOperand *MMO, SDValue VOffset, | ||||||
6814 | SDValue SOffset, SDValue Offset, | ||||||
6815 | SDValue VIndex = SDValue()) { | ||||||
6816 | if (!isa<ConstantSDNode>(VOffset) || !isa<ConstantSDNode>(SOffset) || | ||||||
6817 | !isa<ConstantSDNode>(Offset)) { | ||||||
6818 | // The combined offset is not known to be constant, so we cannot represent | ||||||
6819 | // it in the MMO. Give up. | ||||||
6820 | MMO->setValue((Value *)nullptr); | ||||||
6821 | return; | ||||||
6822 | } | ||||||
6823 | |||||||
6824 | if (VIndex && (!isa<ConstantSDNode>(VIndex) || | ||||||
6825 | !cast<ConstantSDNode>(VIndex)->isNullValue())) { | ||||||
6826 | // The strided index component of the address is not known to be zero, so we | ||||||
6827 | // cannot represent it in the MMO. Give up. | ||||||
6828 | MMO->setValue((Value *)nullptr); | ||||||
6829 | return; | ||||||
6830 | } | ||||||
6831 | |||||||
6832 | MMO->setOffset(cast<ConstantSDNode>(VOffset)->getSExtValue() + | ||||||
6833 | cast<ConstantSDNode>(SOffset)->getSExtValue() + | ||||||
6834 | cast<ConstantSDNode>(Offset)->getSExtValue()); | ||||||
6835 | } | ||||||
6836 | |||||||
6837 | SDValue SITargetLowering::lowerRawBufferAtomicIntrin(SDValue Op, | ||||||
6838 | SelectionDAG &DAG, | ||||||
6839 | unsigned NewOpcode) const { | ||||||
6840 | SDLoc DL(Op); | ||||||
6841 | |||||||
6842 | SDValue VData = Op.getOperand(2); | ||||||
6843 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); | ||||||
6844 | SDValue Ops[] = { | ||||||
6845 | Op.getOperand(0), // Chain | ||||||
6846 | VData, // vdata | ||||||
6847 | Op.getOperand(3), // rsrc | ||||||
6848 | DAG.getConstant(0, DL, MVT::i32), // vindex | ||||||
6849 | Offsets.first, // voffset | ||||||
6850 | Op.getOperand(5), // soffset | ||||||
6851 | Offsets.second, // offset | ||||||
6852 | Op.getOperand(6), // cachepolicy | ||||||
6853 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen | ||||||
6854 | }; | ||||||
6855 | |||||||
6856 | auto *M = cast<MemSDNode>(Op); | ||||||
6857 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6]); | ||||||
6858 | |||||||
6859 | EVT MemVT = VData.getValueType(); | ||||||
6860 | return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT, | ||||||
6861 | M->getMemOperand()); | ||||||
6862 | } | ||||||
6863 | |||||||
6864 | // Return a value to use for the idxen operand by examining the vindex operand. | ||||||
6865 | static unsigned getIdxEn(SDValue VIndex) { | ||||||
6866 | if (auto VIndexC = dyn_cast<ConstantSDNode>(VIndex)) | ||||||
6867 | // No need to set idxen if vindex is known to be zero. | ||||||
6868 | return VIndexC->getZExtValue() != 0; | ||||||
6869 | return 1; | ||||||
6870 | } | ||||||
6871 | |||||||
6872 | SDValue | ||||||
6873 | SITargetLowering::lowerStructBufferAtomicIntrin(SDValue Op, SelectionDAG &DAG, | ||||||
6874 | unsigned NewOpcode) const { | ||||||
6875 | SDLoc DL(Op); | ||||||
6876 | |||||||
6877 | SDValue VData = Op.getOperand(2); | ||||||
6878 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); | ||||||
6879 | SDValue Ops[] = { | ||||||
6880 | Op.getOperand(0), // Chain | ||||||
6881 | VData, // vdata | ||||||
6882 | Op.getOperand(3), // rsrc | ||||||
6883 | Op.getOperand(4), // vindex | ||||||
6884 | Offsets.first, // voffset | ||||||
6885 | Op.getOperand(6), // soffset | ||||||
6886 | Offsets.second, // offset | ||||||
6887 | Op.getOperand(7), // cachepolicy | ||||||
6888 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen | ||||||
6889 | }; | ||||||
6890 | |||||||
6891 | auto *M = cast<MemSDNode>(Op); | ||||||
6892 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]); | ||||||
6893 | |||||||
6894 | EVT MemVT = VData.getValueType(); | ||||||
6895 | return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT, | ||||||
6896 | M->getMemOperand()); | ||||||
6897 | } | ||||||
6898 | |||||||
6899 | SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, | ||||||
6900 | SelectionDAG &DAG) const { | ||||||
6901 | unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); | ||||||
6902 | SDLoc DL(Op); | ||||||
6903 | |||||||
6904 | switch (IntrID) { | ||||||
6905 | case Intrinsic::amdgcn_ds_ordered_add: | ||||||
6906 | case Intrinsic::amdgcn_ds_ordered_swap: { | ||||||
6907 | MemSDNode *M = cast<MemSDNode>(Op); | ||||||
6908 | SDValue Chain = M->getOperand(0); | ||||||
6909 | SDValue M0 = M->getOperand(2); | ||||||
6910 | SDValue Value = M->getOperand(3); | ||||||
6911 | unsigned IndexOperand = M->getConstantOperandVal(7); | ||||||
6912 | unsigned WaveRelease = M->getConstantOperandVal(8); | ||||||
6913 | unsigned WaveDone = M->getConstantOperandVal(9); | ||||||
6914 | |||||||
6915 | unsigned OrderedCountIndex = IndexOperand & 0x3f; | ||||||
6916 | IndexOperand &= ~0x3f; | ||||||
6917 | unsigned CountDw = 0; | ||||||
6918 | |||||||
6919 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) { | ||||||
6920 | CountDw = (IndexOperand >> 24) & 0xf; | ||||||
6921 | IndexOperand &= ~(0xf << 24); | ||||||
6922 | |||||||
6923 | if (CountDw < 1 || CountDw > 4) { | ||||||
6924 | report_fatal_error( | ||||||
6925 | "ds_ordered_count: dword count must be between 1 and 4"); | ||||||
6926 | } | ||||||
6927 | } | ||||||
6928 | |||||||
6929 | if (IndexOperand) | ||||||
6930 | report_fatal_error("ds_ordered_count: bad index operand"); | ||||||
6931 | |||||||
6932 | if (WaveDone && !WaveRelease) | ||||||
6933 | report_fatal_error("ds_ordered_count: wave_done requires wave_release"); | ||||||
6934 | |||||||
6935 | unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1; | ||||||
6936 | unsigned ShaderType = | ||||||
6937 | SIInstrInfo::getDSShaderTypeValue(DAG.getMachineFunction()); | ||||||
6938 | unsigned Offset0 = OrderedCountIndex << 2; | ||||||
6939 | unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) | | ||||||
6940 | (Instruction << 4); | ||||||
6941 | |||||||
6942 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) | ||||||
6943 | Offset1 |= (CountDw - 1) << 6; | ||||||
6944 | |||||||
6945 | unsigned Offset = Offset0 | (Offset1 << 8); | ||||||
6946 | |||||||
6947 | SDValue Ops[] = { | ||||||
6948 | Chain, | ||||||
6949 | Value, | ||||||
6950 | DAG.getTargetConstant(Offset, DL, MVT::i16), | ||||||
6951 | copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue | ||||||
6952 | }; | ||||||
6953 | return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL, | ||||||
6954 | M->getVTList(), Ops, M->getMemoryVT(), | ||||||
6955 | M->getMemOperand()); | ||||||
6956 | } | ||||||
6957 | case Intrinsic::amdgcn_ds_fadd: { | ||||||
6958 | MemSDNode *M = cast<MemSDNode>(Op); | ||||||
6959 | unsigned Opc; | ||||||
6960 | switch (IntrID) { | ||||||
6961 | case Intrinsic::amdgcn_ds_fadd: | ||||||
6962 | Opc = ISD::ATOMIC_LOAD_FADD; | ||||||
6963 | break; | ||||||
6964 | } | ||||||
6965 | |||||||
6966 | return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(), | ||||||
6967 | M->getOperand(0), M->getOperand(2), M->getOperand(3), | ||||||
6968 | M->getMemOperand()); | ||||||
6969 | } | ||||||
6970 | case Intrinsic::amdgcn_atomic_inc: | ||||||
6971 | case Intrinsic::amdgcn_atomic_dec: | ||||||
6972 | case Intrinsic::amdgcn_ds_fmin: | ||||||
6973 | case Intrinsic::amdgcn_ds_fmax: { | ||||||
6974 | MemSDNode *M = cast<MemSDNode>(Op); | ||||||
6975 | unsigned Opc; | ||||||
6976 | switch (IntrID) { | ||||||
6977 | case Intrinsic::amdgcn_atomic_inc: | ||||||
6978 | Opc = AMDGPUISD::ATOMIC_INC; | ||||||
6979 | break; | ||||||
6980 | case Intrinsic::amdgcn_atomic_dec: | ||||||
6981 | Opc = AMDGPUISD::ATOMIC_DEC; | ||||||
6982 | break; | ||||||
6983 | case Intrinsic::amdgcn_ds_fmin: | ||||||
6984 | Opc = AMDGPUISD::ATOMIC_LOAD_FMIN; | ||||||
6985 | break; | ||||||
6986 | case Intrinsic::amdgcn_ds_fmax: | ||||||
6987 | Opc = AMDGPUISD::ATOMIC_LOAD_FMAX; | ||||||
6988 | break; | ||||||
6989 | default: | ||||||
6990 | llvm_unreachable("Unknown intrinsic!")::llvm::llvm_unreachable_internal("Unknown intrinsic!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 6990); | ||||||
6991 | } | ||||||
6992 | SDValue Ops[] = { | ||||||
6993 | M->getOperand(0), // Chain | ||||||
6994 | M->getOperand(2), // Ptr | ||||||
6995 | M->getOperand(3) // Value | ||||||
6996 | }; | ||||||
6997 | |||||||
6998 | return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, | ||||||
6999 | M->getMemoryVT(), M->getMemOperand()); | ||||||
7000 | } | ||||||
7001 | case Intrinsic::amdgcn_buffer_load: | ||||||
7002 | case Intrinsic::amdgcn_buffer_load_format: { | ||||||
7003 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); | ||||||
7004 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); | ||||||
7005 | unsigned IdxEn = getIdxEn(Op.getOperand(3)); | ||||||
7006 | SDValue Ops[] = { | ||||||
7007 | Op.getOperand(0), // Chain | ||||||
7008 | Op.getOperand(2), // rsrc | ||||||
7009 | Op.getOperand(3), // vindex | ||||||
7010 | SDValue(), // voffset -- will be set by setBufferOffsets | ||||||
7011 | SDValue(), // soffset -- will be set by setBufferOffsets | ||||||
7012 | SDValue(), // offset -- will be set by setBufferOffsets | ||||||
7013 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy | ||||||
7014 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen | ||||||
7015 | }; | ||||||
7016 | setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]); | ||||||
7017 | |||||||
7018 | unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ? | ||||||
7019 | AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; | ||||||
7020 | |||||||
7021 | EVT VT = Op.getValueType(); | ||||||
7022 | EVT IntVT = VT.changeTypeToInteger(); | ||||||
7023 | auto *M = cast<MemSDNode>(Op); | ||||||
7024 | updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5], Ops[2]); | ||||||
7025 | EVT LoadVT = Op.getValueType(); | ||||||
7026 | |||||||
7027 | if (LoadVT.getScalarType() == MVT::f16) | ||||||
7028 | return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, | ||||||
7029 | M, DAG, Ops); | ||||||
7030 | |||||||
7031 | // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics | ||||||
7032 | if (LoadVT.getScalarType() == MVT::i8 || | ||||||
7033 | LoadVT.getScalarType() == MVT::i16) | ||||||
7034 | return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); | ||||||
7035 | |||||||
7036 | return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, | ||||||
7037 | M->getMemOperand(), DAG); | ||||||
7038 | } | ||||||
7039 | case Intrinsic::amdgcn_raw_buffer_load: | ||||||
7040 | case Intrinsic::amdgcn_raw_buffer_load_format: { | ||||||
7041 | const bool IsFormat = IntrID == Intrinsic::amdgcn_raw_buffer_load_format; | ||||||
7042 | |||||||
7043 | auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); | ||||||
7044 | SDValue Ops[] = { | ||||||
7045 | Op.getOperand(0), // Chain | ||||||
7046 | Op.getOperand(2), // rsrc | ||||||
7047 | DAG.getConstant(0, DL, MVT::i32), // vindex | ||||||
7048 | Offsets.first, // voffset | ||||||
7049 | Op.getOperand(4), // soffset | ||||||
7050 | Offsets.second, // offset | ||||||
7051 | Op.getOperand(5), // cachepolicy, swizzled buffer | ||||||
7052 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen | ||||||
7053 | }; | ||||||
7054 | |||||||
7055 | auto *M = cast<MemSDNode>(Op); | ||||||
7056 | updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5]); | ||||||
7057 | return lowerIntrinsicLoad(M, IsFormat, DAG, Ops); | ||||||
7058 | } | ||||||
7059 | case Intrinsic::amdgcn_struct_buffer_load: | ||||||
7060 | case Intrinsic::amdgcn_struct_buffer_load_format: { | ||||||
7061 | const bool IsFormat = IntrID == Intrinsic::amdgcn_struct_buffer_load_format; | ||||||
7062 | |||||||
7063 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); | ||||||
7064 | SDValue Ops[] = { | ||||||
7065 | Op.getOperand(0), // Chain | ||||||
7066 | Op.getOperand(2), // rsrc | ||||||
7067 | Op.getOperand(3), // vindex | ||||||
7068 | Offsets.first, // voffset | ||||||
7069 | Op.getOperand(5), // soffset | ||||||
7070 | Offsets.second, // offset | ||||||
7071 | Op.getOperand(6), // cachepolicy, swizzled buffer | ||||||
7072 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen | ||||||
7073 | }; | ||||||
7074 | |||||||
7075 | auto *M = cast<MemSDNode>(Op); | ||||||
7076 | updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5], Ops[2]); | ||||||
7077 | return lowerIntrinsicLoad(cast<MemSDNode>(Op), IsFormat, DAG, Ops); | ||||||
7078 | } | ||||||
7079 | case Intrinsic::amdgcn_tbuffer_load: { | ||||||
7080 | MemSDNode *M = cast<MemSDNode>(Op); | ||||||
7081 | EVT LoadVT = Op.getValueType(); | ||||||
7082 | |||||||
7083 | unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); | ||||||
7084 | unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue(); | ||||||
7085 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); | ||||||
7086 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); | ||||||
7087 | unsigned IdxEn = getIdxEn(Op.getOperand(3)); | ||||||
7088 | SDValue Ops[] = { | ||||||
7089 | Op.getOperand(0), // Chain | ||||||
7090 | Op.getOperand(2), // rsrc | ||||||
7091 | Op.getOperand(3), // vindex | ||||||
7092 | Op.getOperand(4), // voffset | ||||||
7093 | Op.getOperand(5), // soffset | ||||||
7094 | Op.getOperand(6), // offset | ||||||
7095 | DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format | ||||||
7096 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy | ||||||
7097 | DAG.getTargetConstant(IdxEn, DL, MVT::i1) // idxen | ||||||
7098 | }; | ||||||
7099 | |||||||
7100 | if (LoadVT.getScalarType() == MVT::f16) | ||||||
7101 | return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, | ||||||
7102 | M, DAG, Ops); | ||||||
7103 | return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, | ||||||
7104 | Op->getVTList(), Ops, LoadVT, M->getMemOperand(), | ||||||
7105 | DAG); | ||||||
7106 | } | ||||||
7107 | case Intrinsic::amdgcn_raw_tbuffer_load: { | ||||||
7108 | MemSDNode *M = cast<MemSDNode>(Op); | ||||||
7109 | EVT LoadVT = Op.getValueType(); | ||||||
7110 | auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); | ||||||
7111 | |||||||
7112 | SDValue Ops[] = { | ||||||
7113 | Op.getOperand(0), // Chain | ||||||
7114 | Op.getOperand(2), // rsrc | ||||||
7115 | DAG.getConstant(0, DL, MVT::i32), // vindex | ||||||
7116 | Offsets.first, // voffset | ||||||
7117 | Op.getOperand(4), // soffset | ||||||
7118 | Offsets.second, // offset | ||||||
7119 | Op.getOperand(5), // format | ||||||
7120 | Op.getOperand(6), // cachepolicy, swizzled buffer | ||||||
7121 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen | ||||||
7122 | }; | ||||||
7123 | |||||||
7124 | if (LoadVT.getScalarType() == MVT::f16) | ||||||
7125 | return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, | ||||||
7126 | M, DAG, Ops); | ||||||
7127 | return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, | ||||||
7128 | Op->getVTList(), Ops, LoadVT, M->getMemOperand(), | ||||||
7129 | DAG); | ||||||
7130 | } | ||||||
7131 | case Intrinsic::amdgcn_struct_tbuffer_load: { | ||||||
7132 | MemSDNode *M = cast<MemSDNode>(Op); | ||||||
7133 | EVT LoadVT = Op.getValueType(); | ||||||
7134 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); | ||||||
7135 | |||||||
7136 | SDValue Ops[] = { | ||||||
7137 | Op.getOperand(0), // Chain | ||||||
7138 | Op.getOperand(2), // rsrc | ||||||
7139 | Op.getOperand(3), // vindex | ||||||
7140 | Offsets.first, // voffset | ||||||
7141 | Op.getOperand(5), // soffset | ||||||
7142 | Offsets.second, // offset | ||||||
7143 | Op.getOperand(6), // format | ||||||
7144 | Op.getOperand(7), // cachepolicy, swizzled buffer | ||||||
7145 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen | ||||||
7146 | }; | ||||||
7147 | |||||||
7148 | if (LoadVT.getScalarType() == MVT::f16) | ||||||
7149 | return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, | ||||||
7150 | M, DAG, Ops); | ||||||
7151 | return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, | ||||||
7152 | Op->getVTList(), Ops, LoadVT, M->getMemOperand(), | ||||||
7153 | DAG); | ||||||
7154 | } | ||||||
7155 | case Intrinsic::amdgcn_buffer_atomic_swap: | ||||||
7156 | case Intrinsic::amdgcn_buffer_atomic_add: | ||||||
7157 | case Intrinsic::amdgcn_buffer_atomic_sub: | ||||||
7158 | case Intrinsic::amdgcn_buffer_atomic_csub: | ||||||
7159 | case Intrinsic::amdgcn_buffer_atomic_smin: | ||||||
7160 | case Intrinsic::amdgcn_buffer_atomic_umin: | ||||||
7161 | case Intrinsic::amdgcn_buffer_atomic_smax: | ||||||
7162 | case Intrinsic::amdgcn_buffer_atomic_umax: | ||||||
7163 | case Intrinsic::amdgcn_buffer_atomic_and: | ||||||
7164 | case Intrinsic::amdgcn_buffer_atomic_or: | ||||||
7165 | case Intrinsic::amdgcn_buffer_atomic_xor: | ||||||
7166 | case Intrinsic::amdgcn_buffer_atomic_fadd: { | ||||||
7167 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); | ||||||
7168 | unsigned IdxEn = getIdxEn(Op.getOperand(4)); | ||||||
7169 | SDValue Ops[] = { | ||||||
7170 | Op.getOperand(0), // Chain | ||||||
7171 | Op.getOperand(2), // vdata | ||||||
7172 | Op.getOperand(3), // rsrc | ||||||
7173 | Op.getOperand(4), // vindex | ||||||
7174 | SDValue(), // voffset -- will be set by setBufferOffsets | ||||||
7175 | SDValue(), // soffset -- will be set by setBufferOffsets | ||||||
7176 | SDValue(), // offset -- will be set by setBufferOffsets | ||||||
7177 | DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy | ||||||
7178 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen | ||||||
7179 | }; | ||||||
7180 | setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); | ||||||
7181 | |||||||
7182 | EVT VT = Op.getValueType(); | ||||||
7183 | |||||||
7184 | auto *M = cast<MemSDNode>(Op); | ||||||
7185 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]); | ||||||
7186 | unsigned Opcode = 0; | ||||||
7187 | |||||||
7188 | switch (IntrID) { | ||||||
7189 | case Intrinsic::amdgcn_buffer_atomic_swap: | ||||||
7190 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP; | ||||||
7191 | break; | ||||||
7192 | case Intrinsic::amdgcn_buffer_atomic_add: | ||||||
7193 | Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD; | ||||||
7194 | break; | ||||||
7195 | case Intrinsic::amdgcn_buffer_atomic_sub: | ||||||
7196 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB; | ||||||
7197 | break; | ||||||
7198 | case Intrinsic::amdgcn_buffer_atomic_csub: | ||||||
7199 | Opcode = AMDGPUISD::BUFFER_ATOMIC_CSUB; | ||||||
7200 | break; | ||||||
7201 | case Intrinsic::amdgcn_buffer_atomic_smin: | ||||||
7202 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN; | ||||||
7203 | break; | ||||||
7204 | case Intrinsic::amdgcn_buffer_atomic_umin: | ||||||
7205 | Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN; | ||||||
7206 | break; | ||||||
7207 | case Intrinsic::amdgcn_buffer_atomic_smax: | ||||||
7208 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX; | ||||||
7209 | break; | ||||||
7210 | case Intrinsic::amdgcn_buffer_atomic_umax: | ||||||
7211 | Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX; | ||||||
7212 | break; | ||||||
7213 | case Intrinsic::amdgcn_buffer_atomic_and: | ||||||
7214 | Opcode = AMDGPUISD::BUFFER_ATOMIC_AND; | ||||||
7215 | break; | ||||||
7216 | case Intrinsic::amdgcn_buffer_atomic_or: | ||||||
7217 | Opcode = AMDGPUISD::BUFFER_ATOMIC_OR; | ||||||
7218 | break; | ||||||
7219 | case Intrinsic::amdgcn_buffer_atomic_xor: | ||||||
7220 | Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR; | ||||||
7221 | break; | ||||||
7222 | case Intrinsic::amdgcn_buffer_atomic_fadd: | ||||||
7223 | if (!Op.getValue(0).use_empty() && !Subtarget->hasGFX90AInsts()) { | ||||||
7224 | DiagnosticInfoUnsupported | ||||||
7225 | NoFpRet(DAG.getMachineFunction().getFunction(), | ||||||
7226 | "return versions of fp atomics not supported", | ||||||
7227 | DL.getDebugLoc(), DS_Error); | ||||||
7228 | DAG.getContext()->diagnose(NoFpRet); | ||||||
7229 | return SDValue(); | ||||||
7230 | } | ||||||
7231 | Opcode = AMDGPUISD::BUFFER_ATOMIC_FADD; | ||||||
7232 | break; | ||||||
7233 | default: | ||||||
7234 | llvm_unreachable("unhandled atomic opcode")::llvm::llvm_unreachable_internal("unhandled atomic opcode", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7234); | ||||||
7235 | } | ||||||
7236 | |||||||
7237 | return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, | ||||||
7238 | M->getMemOperand()); | ||||||
7239 | } | ||||||
7240 | case Intrinsic::amdgcn_raw_buffer_atomic_fadd: | ||||||
7241 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FADD); | ||||||
7242 | case Intrinsic::amdgcn_struct_buffer_atomic_fadd: | ||||||
7243 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FADD); | ||||||
7244 | case Intrinsic::amdgcn_raw_buffer_atomic_fmin: | ||||||
7245 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMIN); | ||||||
7246 | case Intrinsic::amdgcn_struct_buffer_atomic_fmin: | ||||||
7247 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMIN); | ||||||
7248 | case Intrinsic::amdgcn_raw_buffer_atomic_fmax: | ||||||
7249 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMAX); | ||||||
7250 | case Intrinsic::amdgcn_struct_buffer_atomic_fmax: | ||||||
7251 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMAX); | ||||||
7252 | case Intrinsic::amdgcn_raw_buffer_atomic_swap: | ||||||
7253 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SWAP); | ||||||
7254 | case Intrinsic::amdgcn_raw_buffer_atomic_add: | ||||||
7255 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_ADD); | ||||||
7256 | case Intrinsic::amdgcn_raw_buffer_atomic_sub: | ||||||
7257 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SUB); | ||||||
7258 | case Intrinsic::amdgcn_raw_buffer_atomic_smin: | ||||||
7259 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SMIN); | ||||||
7260 | case Intrinsic::amdgcn_raw_buffer_atomic_umin: | ||||||
7261 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_UMIN); | ||||||
7262 | case Intrinsic::amdgcn_raw_buffer_atomic_smax: | ||||||
7263 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SMAX); | ||||||
7264 | case Intrinsic::amdgcn_raw_buffer_atomic_umax: | ||||||
7265 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_UMAX); | ||||||
7266 | case Intrinsic::amdgcn_raw_buffer_atomic_and: | ||||||
7267 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_AND); | ||||||
7268 | case Intrinsic::amdgcn_raw_buffer_atomic_or: | ||||||
7269 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_OR); | ||||||
7270 | case Intrinsic::amdgcn_raw_buffer_atomic_xor: | ||||||
7271 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_XOR); | ||||||
7272 | case Intrinsic::amdgcn_raw_buffer_atomic_inc: | ||||||
7273 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_INC); | ||||||
7274 | case Intrinsic::amdgcn_raw_buffer_atomic_dec: | ||||||
7275 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC); | ||||||
7276 | case Intrinsic::amdgcn_struct_buffer_atomic_swap: | ||||||
7277 | return lowerStructBufferAtomicIntrin(Op, DAG, | ||||||
7278 | AMDGPUISD::BUFFER_ATOMIC_SWAP); | ||||||
7279 | case Intrinsic::amdgcn_struct_buffer_atomic_add: | ||||||
7280 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_ADD); | ||||||
7281 | case Intrinsic::amdgcn_struct_buffer_atomic_sub: | ||||||
7282 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SUB); | ||||||
7283 | case Intrinsic::amdgcn_struct_buffer_atomic_smin: | ||||||
7284 | return lowerStructBufferAtomicIntrin(Op, DAG, | ||||||
7285 | AMDGPUISD::BUFFER_ATOMIC_SMIN); | ||||||
7286 | case Intrinsic::amdgcn_struct_buffer_atomic_umin: | ||||||
7287 | return lowerStructBufferAtomicIntrin(Op, DAG, | ||||||
7288 | AMDGPUISD::BUFFER_ATOMIC_UMIN); | ||||||
7289 | case Intrinsic::amdgcn_struct_buffer_atomic_smax: | ||||||
7290 | return lowerStructBufferAtomicIntrin(Op, DAG, | ||||||
7291 | AMDGPUISD::BUFFER_ATOMIC_SMAX); | ||||||
7292 | case Intrinsic::amdgcn_struct_buffer_atomic_umax: | ||||||
7293 | return lowerStructBufferAtomicIntrin(Op, DAG, | ||||||
7294 | AMDGPUISD::BUFFER_ATOMIC_UMAX); | ||||||
7295 | case Intrinsic::amdgcn_struct_buffer_atomic_and: | ||||||
7296 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_AND); | ||||||
7297 | case Intrinsic::amdgcn_struct_buffer_atomic_or: | ||||||
7298 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_OR); | ||||||
7299 | case Intrinsic::amdgcn_struct_buffer_atomic_xor: | ||||||
7300 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_XOR); | ||||||
7301 | case Intrinsic::amdgcn_struct_buffer_atomic_inc: | ||||||
7302 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_INC); | ||||||
7303 | case Intrinsic::amdgcn_struct_buffer_atomic_dec: | ||||||
7304 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC); | ||||||
7305 | |||||||
7306 | case Intrinsic::amdgcn_buffer_atomic_cmpswap: { | ||||||
7307 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); | ||||||
7308 | unsigned IdxEn = getIdxEn(Op.getOperand(5)); | ||||||
7309 | SDValue Ops[] = { | ||||||
7310 | Op.getOperand(0), // Chain | ||||||
7311 | Op.getOperand(2), // src | ||||||
7312 | Op.getOperand(3), // cmp | ||||||
7313 | Op.getOperand(4), // rsrc | ||||||
7314 | Op.getOperand(5), // vindex | ||||||
7315 | SDValue(), // voffset -- will be set by setBufferOffsets | ||||||
7316 | SDValue(), // soffset -- will be set by setBufferOffsets | ||||||
7317 | SDValue(), // offset -- will be set by setBufferOffsets | ||||||
7318 | DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy | ||||||
7319 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen | ||||||
7320 | }; | ||||||
7321 | setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]); | ||||||
7322 | |||||||
7323 | EVT VT = Op.getValueType(); | ||||||
7324 | auto *M = cast<MemSDNode>(Op); | ||||||
7325 | updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7], Ops[4]); | ||||||
7326 | |||||||
7327 | return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, | ||||||
7328 | Op->getVTList(), Ops, VT, M->getMemOperand()); | ||||||
7329 | } | ||||||
7330 | case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: { | ||||||
7331 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); | ||||||
7332 | SDValue Ops[] = { | ||||||
7333 | Op.getOperand(0), // Chain | ||||||
7334 | Op.getOperand(2), // src | ||||||
7335 | Op.getOperand(3), // cmp | ||||||
7336 | Op.getOperand(4), // rsrc | ||||||
7337 | DAG.getConstant(0, DL, MVT::i32), // vindex | ||||||
7338 | Offsets.first, // voffset | ||||||
7339 | Op.getOperand(6), // soffset | ||||||
7340 | Offsets.second, // offset | ||||||
7341 | Op.getOperand(7), // cachepolicy | ||||||
7342 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen | ||||||
7343 | }; | ||||||
7344 | EVT VT = Op.getValueType(); | ||||||
7345 | auto *M = cast<MemSDNode>(Op); | ||||||
7346 | updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7]); | ||||||
7347 | |||||||
7348 | return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, | ||||||
7349 | Op->getVTList(), Ops, VT, M->getMemOperand()); | ||||||
7350 | } | ||||||
7351 | case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: { | ||||||
7352 | auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG); | ||||||
7353 | SDValue Ops[] = { | ||||||
7354 | Op.getOperand(0), // Chain | ||||||
7355 | Op.getOperand(2), // src | ||||||
7356 | Op.getOperand(3), // cmp | ||||||
7357 | Op.getOperand(4), // rsrc | ||||||
7358 | Op.getOperand(5), // vindex | ||||||
7359 | Offsets.first, // voffset | ||||||
7360 | Op.getOperand(7), // soffset | ||||||
7361 | Offsets.second, // offset | ||||||
7362 | Op.getOperand(8), // cachepolicy | ||||||
7363 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen | ||||||
7364 | }; | ||||||
7365 | EVT VT = Op.getValueType(); | ||||||
7366 | auto *M = cast<MemSDNode>(Op); | ||||||
7367 | updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7], Ops[4]); | ||||||
7368 | |||||||
7369 | return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, | ||||||
7370 | Op->getVTList(), Ops, VT, M->getMemOperand()); | ||||||
7371 | } | ||||||
7372 | case Intrinsic::amdgcn_image_bvh_intersect_ray: { | ||||||
7373 | MemSDNode *M = cast<MemSDNode>(Op); | ||||||
7374 | SDValue NodePtr = M->getOperand(2); | ||||||
7375 | SDValue RayExtent = M->getOperand(3); | ||||||
7376 | SDValue RayOrigin = M->getOperand(4); | ||||||
7377 | SDValue RayDir = M->getOperand(5); | ||||||
7378 | SDValue RayInvDir = M->getOperand(6); | ||||||
7379 | SDValue TDescr = M->getOperand(7); | ||||||
7380 | |||||||
7381 | assert(NodePtr.getValueType() == MVT::i32 ||(static_cast <bool> (NodePtr.getValueType() == MVT::i32 || NodePtr.getValueType() == MVT::i64) ? void (0) : __assert_fail ("NodePtr.getValueType() == MVT::i32 || NodePtr.getValueType() == MVT::i64" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7382, __extension__ __PRETTY_FUNCTION__)) | ||||||
7382 | NodePtr.getValueType() == MVT::i64)(static_cast <bool> (NodePtr.getValueType() == MVT::i32 || NodePtr.getValueType() == MVT::i64) ? void (0) : __assert_fail ("NodePtr.getValueType() == MVT::i32 || NodePtr.getValueType() == MVT::i64" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7382, __extension__ __PRETTY_FUNCTION__)); | ||||||
7383 | assert(RayDir.getValueType() == MVT::v4f16 ||(static_cast <bool> (RayDir.getValueType() == MVT::v4f16 || RayDir.getValueType() == MVT::v4f32) ? void (0) : __assert_fail ("RayDir.getValueType() == MVT::v4f16 || RayDir.getValueType() == MVT::v4f32" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7384, __extension__ __PRETTY_FUNCTION__)) | ||||||
7384 | RayDir.getValueType() == MVT::v4f32)(static_cast <bool> (RayDir.getValueType() == MVT::v4f16 || RayDir.getValueType() == MVT::v4f32) ? void (0) : __assert_fail ("RayDir.getValueType() == MVT::v4f16 || RayDir.getValueType() == MVT::v4f32" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7384, __extension__ __PRETTY_FUNCTION__)); | ||||||
7385 | |||||||
7386 | if (!Subtarget->hasGFX10_AEncoding()) { | ||||||
7387 | emitRemovedIntrinsicError(DAG, DL, Op.getValueType()); | ||||||
7388 | return SDValue(); | ||||||
7389 | } | ||||||
7390 | |||||||
7391 | const bool IsA16 = RayDir.getValueType().getVectorElementType() == MVT::f16; | ||||||
7392 | const bool Is64 = NodePtr.getValueType() == MVT::i64; | ||||||
7393 | const unsigned NumVDataDwords = 4; | ||||||
7394 | const unsigned NumVAddrDwords = IsA16 ? (Is64 ? 9 : 8) : (Is64 ? 12 : 11); | ||||||
7395 | const bool UseNSA = Subtarget->hasNSAEncoding() && | ||||||
7396 | NumVAddrDwords <= Subtarget->getNSAMaxSize(); | ||||||
7397 | const unsigned BaseOpcodes[2][2] = { | ||||||
7398 | {AMDGPU::IMAGE_BVH_INTERSECT_RAY, AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16}, | ||||||
7399 | {AMDGPU::IMAGE_BVH64_INTERSECT_RAY, | ||||||
7400 | AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16}}; | ||||||
7401 | int Opcode; | ||||||
7402 | if (UseNSA) { | ||||||
7403 | Opcode = AMDGPU::getMIMGOpcode(BaseOpcodes[Is64][IsA16], | ||||||
7404 | AMDGPU::MIMGEncGfx10NSA, NumVDataDwords, | ||||||
7405 | NumVAddrDwords); | ||||||
7406 | } else { | ||||||
7407 | Opcode = AMDGPU::getMIMGOpcode( | ||||||
7408 | BaseOpcodes[Is64][IsA16], AMDGPU::MIMGEncGfx10Default, NumVDataDwords, | ||||||
7409 | PowerOf2Ceil(NumVAddrDwords)); | ||||||
7410 | } | ||||||
7411 | assert(Opcode != -1)(static_cast <bool> (Opcode != -1) ? void (0) : __assert_fail ("Opcode != -1", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7411, __extension__ __PRETTY_FUNCTION__)); | ||||||
7412 | |||||||
7413 | SmallVector<SDValue, 16> Ops; | ||||||
7414 | |||||||
7415 | auto packLanes = [&DAG, &Ops, &DL] (SDValue Op, bool IsAligned) { | ||||||
7416 | SmallVector<SDValue, 3> Lanes; | ||||||
7417 | DAG.ExtractVectorElements(Op, Lanes, 0, 3); | ||||||
7418 | if (Lanes[0].getValueSizeInBits() == 32) { | ||||||
7419 | for (unsigned I = 0; I < 3; ++I) | ||||||
7420 | Ops.push_back(DAG.getBitcast(MVT::i32, Lanes[I])); | ||||||
7421 | } else { | ||||||
7422 | if (IsAligned) { | ||||||
7423 | Ops.push_back( | ||||||
7424 | DAG.getBitcast(MVT::i32, | ||||||
7425 | DAG.getBuildVector(MVT::v2f16, DL, | ||||||
7426 | { Lanes[0], Lanes[1] }))); | ||||||
7427 | Ops.push_back(Lanes[2]); | ||||||
7428 | } else { | ||||||
7429 | SDValue Elt0 = Ops.pop_back_val(); | ||||||
7430 | Ops.push_back( | ||||||
7431 | DAG.getBitcast(MVT::i32, | ||||||
7432 | DAG.getBuildVector(MVT::v2f16, DL, | ||||||
7433 | { Elt0, Lanes[0] }))); | ||||||
7434 | Ops.push_back( | ||||||
7435 | DAG.getBitcast(MVT::i32, | ||||||
7436 | DAG.getBuildVector(MVT::v2f16, DL, | ||||||
7437 | { Lanes[1], Lanes[2] }))); | ||||||
7438 | } | ||||||
7439 | } | ||||||
7440 | }; | ||||||
7441 | |||||||
7442 | if (Is64) | ||||||
7443 | DAG.ExtractVectorElements(DAG.getBitcast(MVT::v2i32, NodePtr), Ops, 0, 2); | ||||||
7444 | else | ||||||
7445 | Ops.push_back(NodePtr); | ||||||
7446 | |||||||
7447 | Ops.push_back(DAG.getBitcast(MVT::i32, RayExtent)); | ||||||
7448 | packLanes(RayOrigin, true); | ||||||
7449 | packLanes(RayDir, true); | ||||||
7450 | packLanes(RayInvDir, false); | ||||||
7451 | |||||||
7452 | if (!UseNSA) { | ||||||
7453 | // Build a single vector containing all the operands so far prepared. | ||||||
7454 | if (NumVAddrDwords > 8) { | ||||||
7455 | SDValue Undef = DAG.getUNDEF(MVT::i32); | ||||||
7456 | Ops.append(16 - Ops.size(), Undef); | ||||||
7457 | } | ||||||
7458 | assert(Ops.size() == 8 || Ops.size() == 16)(static_cast <bool> (Ops.size() == 8 || Ops.size() == 16 ) ? void (0) : __assert_fail ("Ops.size() == 8 || Ops.size() == 16" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7458, __extension__ __PRETTY_FUNCTION__)); | ||||||
7459 | SDValue MergedOps = DAG.getBuildVector( | ||||||
7460 | Ops.size() == 16 ? MVT::v16i32 : MVT::v8i32, DL, Ops); | ||||||
7461 | Ops.clear(); | ||||||
7462 | Ops.push_back(MergedOps); | ||||||
7463 | } | ||||||
7464 | |||||||
7465 | Ops.push_back(TDescr); | ||||||
7466 | if (IsA16) | ||||||
7467 | Ops.push_back(DAG.getTargetConstant(1, DL, MVT::i1)); | ||||||
7468 | Ops.push_back(M->getChain()); | ||||||
7469 | |||||||
7470 | auto *NewNode = DAG.getMachineNode(Opcode, DL, M->getVTList(), Ops); | ||||||
7471 | MachineMemOperand *MemRef = M->getMemOperand(); | ||||||
7472 | DAG.setNodeMemRefs(NewNode, {MemRef}); | ||||||
7473 | return SDValue(NewNode, 0); | ||||||
7474 | } | ||||||
7475 | case Intrinsic::amdgcn_global_atomic_fadd: | ||||||
7476 | if (!Op.getValue(0).use_empty() && !Subtarget->hasGFX90AInsts()) { | ||||||
7477 | DiagnosticInfoUnsupported | ||||||
7478 | NoFpRet(DAG.getMachineFunction().getFunction(), | ||||||
7479 | "return versions of fp atomics not supported", | ||||||
7480 | DL.getDebugLoc(), DS_Error); | ||||||
7481 | DAG.getContext()->diagnose(NoFpRet); | ||||||
7482 | return SDValue(); | ||||||
7483 | } | ||||||
7484 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||||
7485 | case Intrinsic::amdgcn_global_atomic_fmin: | ||||||
7486 | case Intrinsic::amdgcn_global_atomic_fmax: | ||||||
7487 | case Intrinsic::amdgcn_flat_atomic_fadd: | ||||||
7488 | case Intrinsic::amdgcn_flat_atomic_fmin: | ||||||
7489 | case Intrinsic::amdgcn_flat_atomic_fmax: { | ||||||
7490 | MemSDNode *M = cast<MemSDNode>(Op); | ||||||
7491 | SDValue Ops[] = { | ||||||
7492 | M->getOperand(0), // Chain | ||||||
7493 | M->getOperand(2), // Ptr | ||||||
7494 | M->getOperand(3) // Value | ||||||
7495 | }; | ||||||
7496 | unsigned Opcode = 0; | ||||||
7497 | switch (IntrID) { | ||||||
7498 | case Intrinsic::amdgcn_global_atomic_fadd: | ||||||
7499 | case Intrinsic::amdgcn_flat_atomic_fadd: { | ||||||
7500 | EVT VT = Op.getOperand(3).getValueType(); | ||||||
7501 | return DAG.getAtomic(ISD::ATOMIC_LOAD_FADD, DL, VT, | ||||||
7502 | DAG.getVTList(VT, MVT::Other), Ops, | ||||||
7503 | M->getMemOperand()); | ||||||
7504 | } | ||||||
7505 | case Intrinsic::amdgcn_global_atomic_fmin: | ||||||
7506 | case Intrinsic::amdgcn_flat_atomic_fmin: { | ||||||
7507 | Opcode = AMDGPUISD::ATOMIC_LOAD_FMIN; | ||||||
7508 | break; | ||||||
7509 | } | ||||||
7510 | case Intrinsic::amdgcn_global_atomic_fmax: | ||||||
7511 | case Intrinsic::amdgcn_flat_atomic_fmax: { | ||||||
7512 | Opcode = AMDGPUISD::ATOMIC_LOAD_FMAX; | ||||||
7513 | break; | ||||||
7514 | } | ||||||
7515 | default: | ||||||
7516 | llvm_unreachable("unhandled atomic opcode")::llvm::llvm_unreachable_internal("unhandled atomic opcode", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7516); | ||||||
7517 | } | ||||||
7518 | return DAG.getMemIntrinsicNode(Opcode, SDLoc(Op), | ||||||
7519 | M->getVTList(), Ops, M->getMemoryVT(), | ||||||
7520 | M->getMemOperand()); | ||||||
7521 | } | ||||||
7522 | default: | ||||||
7523 | |||||||
7524 | if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = | ||||||
7525 | AMDGPU::getImageDimIntrinsicInfo(IntrID)) | ||||||
7526 | return lowerImage(Op, ImageDimIntr, DAG, true); | ||||||
7527 | |||||||
7528 | return SDValue(); | ||||||
7529 | } | ||||||
7530 | } | ||||||
7531 | |||||||
7532 | // Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to | ||||||
7533 | // dwordx4 if on SI. | ||||||
7534 | SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL, | ||||||
7535 | SDVTList VTList, | ||||||
7536 | ArrayRef<SDValue> Ops, EVT MemVT, | ||||||
7537 | MachineMemOperand *MMO, | ||||||
7538 | SelectionDAG &DAG) const { | ||||||
7539 | EVT VT = VTList.VTs[0]; | ||||||
7540 | EVT WidenedVT = VT; | ||||||
7541 | EVT WidenedMemVT = MemVT; | ||||||
7542 | if (!Subtarget->hasDwordx3LoadStores() && | ||||||
7543 | (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) { | ||||||
7544 | WidenedVT = EVT::getVectorVT(*DAG.getContext(), | ||||||
7545 | WidenedVT.getVectorElementType(), 4); | ||||||
7546 | WidenedMemVT = EVT::getVectorVT(*DAG.getContext(), | ||||||
7547 | WidenedMemVT.getVectorElementType(), 4); | ||||||
7548 | MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16); | ||||||
7549 | } | ||||||
7550 | |||||||
7551 | assert(VTList.NumVTs == 2)(static_cast <bool> (VTList.NumVTs == 2) ? void (0) : __assert_fail ("VTList.NumVTs == 2", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7551, __extension__ __PRETTY_FUNCTION__)); | ||||||
7552 | SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]); | ||||||
7553 | |||||||
7554 | auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops, | ||||||
7555 | WidenedMemVT, MMO); | ||||||
7556 | if (WidenedVT != VT) { | ||||||
7557 | auto Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp, | ||||||
7558 | DAG.getVectorIdxConstant(0, DL)); | ||||||
7559 | NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL); | ||||||
7560 | } | ||||||
7561 | return NewOp; | ||||||
7562 | } | ||||||
7563 | |||||||
7564 | SDValue SITargetLowering::handleD16VData(SDValue VData, SelectionDAG &DAG, | ||||||
7565 | bool ImageStore) const { | ||||||
7566 | EVT StoreVT = VData.getValueType(); | ||||||
7567 | |||||||
7568 | // No change for f16 and legal vector D16 types. | ||||||
7569 | if (!StoreVT.isVector()) | ||||||
7570 | return VData; | ||||||
7571 | |||||||
7572 | SDLoc DL(VData); | ||||||
7573 | unsigned NumElements = StoreVT.getVectorNumElements(); | ||||||
7574 | |||||||
7575 | if (Subtarget->hasUnpackedD16VMem()) { | ||||||
7576 | // We need to unpack the packed data to store. | ||||||
7577 | EVT IntStoreVT = StoreVT.changeTypeToInteger(); | ||||||
7578 | SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); | ||||||
7579 | |||||||
7580 | EVT EquivStoreVT = | ||||||
7581 | EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElements); | ||||||
7582 | SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData); | ||||||
7583 | return DAG.UnrollVectorOp(ZExt.getNode()); | ||||||
7584 | } | ||||||
7585 | |||||||
7586 | // The sq block of gfx8.1 does not estimate register use correctly for d16 | ||||||
7587 | // image store instructions. The data operand is computed as if it were not a | ||||||
7588 | // d16 image instruction. | ||||||
7589 | if (ImageStore && Subtarget->hasImageStoreD16Bug()) { | ||||||
7590 | // Bitcast to i16 | ||||||
7591 | EVT IntStoreVT = StoreVT.changeTypeToInteger(); | ||||||
7592 | SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); | ||||||
7593 | |||||||
7594 | // Decompose into scalars | ||||||
7595 | SmallVector<SDValue, 4> Elts; | ||||||
7596 | DAG.ExtractVectorElements(IntVData, Elts); | ||||||
7597 | |||||||
7598 | // Group pairs of i16 into v2i16 and bitcast to i32 | ||||||
7599 | SmallVector<SDValue, 4> PackedElts; | ||||||
7600 | for (unsigned I = 0; I < Elts.size() / 2; I += 1) { | ||||||
7601 | SDValue Pair = | ||||||
7602 | DAG.getBuildVector(MVT::v2i16, DL, {Elts[I * 2], Elts[I * 2 + 1]}); | ||||||
7603 | SDValue IntPair = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Pair); | ||||||
7604 | PackedElts.push_back(IntPair); | ||||||
7605 | } | ||||||
7606 | if ((NumElements % 2) == 1) { | ||||||
7607 | // Handle v3i16 | ||||||
7608 | unsigned I = Elts.size() / 2; | ||||||
7609 | SDValue Pair = DAG.getBuildVector(MVT::v2i16, DL, | ||||||
7610 | {Elts[I * 2], DAG.getUNDEF(MVT::i16)}); | ||||||
7611 | SDValue IntPair = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Pair); | ||||||
7612 | PackedElts.push_back(IntPair); | ||||||
7613 | } | ||||||
7614 | |||||||
7615 | // Pad using UNDEF | ||||||
7616 | PackedElts.resize(Elts.size(), DAG.getUNDEF(MVT::i32)); | ||||||
7617 | |||||||
7618 | // Build final vector | ||||||
7619 | EVT VecVT = | ||||||
7620 | EVT::getVectorVT(*DAG.getContext(), MVT::i32, PackedElts.size()); | ||||||
7621 | return DAG.getBuildVector(VecVT, DL, PackedElts); | ||||||
7622 | } | ||||||
7623 | |||||||
7624 | if (NumElements == 3) { | ||||||
7625 | EVT IntStoreVT = | ||||||
7626 | EVT::getIntegerVT(*DAG.getContext(), StoreVT.getStoreSizeInBits()); | ||||||
7627 | SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); | ||||||
7628 | |||||||
7629 | EVT WidenedStoreVT = EVT::getVectorVT( | ||||||
7630 | *DAG.getContext(), StoreVT.getVectorElementType(), NumElements + 1); | ||||||
7631 | EVT WidenedIntVT = EVT::getIntegerVT(*DAG.getContext(), | ||||||
7632 | WidenedStoreVT.getStoreSizeInBits()); | ||||||
7633 | SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenedIntVT, IntVData); | ||||||
7634 | return DAG.getNode(ISD::BITCAST, DL, WidenedStoreVT, ZExt); | ||||||
7635 | } | ||||||
7636 | |||||||
7637 | assert(isTypeLegal(StoreVT))(static_cast <bool> (isTypeLegal(StoreVT)) ? void (0) : __assert_fail ("isTypeLegal(StoreVT)", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7637, __extension__ __PRETTY_FUNCTION__)); | ||||||
7638 | return VData; | ||||||
7639 | } | ||||||
7640 | |||||||
7641 | SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, | ||||||
7642 | SelectionDAG &DAG) const { | ||||||
7643 | SDLoc DL(Op); | ||||||
7644 | SDValue Chain = Op.getOperand(0); | ||||||
7645 | unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); | ||||||
7646 | MachineFunction &MF = DAG.getMachineFunction(); | ||||||
7647 | |||||||
7648 | switch (IntrinsicID) { | ||||||
7649 | case Intrinsic::amdgcn_exp_compr: { | ||||||
7650 | SDValue Src0 = Op.getOperand(4); | ||||||
7651 | SDValue Src1 = Op.getOperand(5); | ||||||
7652 | // Hack around illegal type on SI by directly selecting it. | ||||||
7653 | if (isTypeLegal(Src0.getValueType())) | ||||||
7654 | return SDValue(); | ||||||
7655 | |||||||
7656 | const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6)); | ||||||
7657 | SDValue Undef = DAG.getUNDEF(MVT::f32); | ||||||
7658 | const SDValue Ops[] = { | ||||||
7659 | Op.getOperand(2), // tgt | ||||||
7660 | DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), // src0 | ||||||
7661 | DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), // src1 | ||||||
7662 | Undef, // src2 | ||||||
7663 | Undef, // src3 | ||||||
7664 | Op.getOperand(7), // vm | ||||||
7665 | DAG.getTargetConstant(1, DL, MVT::i1), // compr | ||||||
7666 | Op.getOperand(3), // en | ||||||
7667 | Op.getOperand(0) // Chain | ||||||
7668 | }; | ||||||
7669 | |||||||
7670 | unsigned Opc = Done->isNullValue() ? AMDGPU::EXP : AMDGPU::EXP_DONE; | ||||||
7671 | return SDValue(DAG.getMachineNode(Opc, DL, Op->getVTList(), Ops), 0); | ||||||
7672 | } | ||||||
7673 | case Intrinsic::amdgcn_s_barrier: { | ||||||
7674 | if (getTargetMachine().getOptLevel() > CodeGenOpt::None) { | ||||||
7675 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | ||||||
7676 | unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second; | ||||||
7677 | if (WGSize <= ST.getWavefrontSize()) | ||||||
7678 | return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other, | ||||||
7679 | Op.getOperand(0)), 0); | ||||||
7680 | } | ||||||
7681 | return SDValue(); | ||||||
7682 | }; | ||||||
7683 | case Intrinsic::amdgcn_tbuffer_store: { | ||||||
7684 | SDValue VData = Op.getOperand(2); | ||||||
7685 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); | ||||||
7686 | if (IsD16) | ||||||
7687 | VData = handleD16VData(VData, DAG); | ||||||
7688 | unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue(); | ||||||
7689 | unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); | ||||||
7690 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); | ||||||
7691 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue(); | ||||||
7692 | unsigned IdxEn = getIdxEn(Op.getOperand(4)); | ||||||
7693 | SDValue Ops[] = { | ||||||
7694 | Chain, | ||||||
7695 | VData, // vdata | ||||||
7696 | Op.getOperand(3), // rsrc | ||||||
7697 | Op.getOperand(4), // vindex | ||||||
7698 | Op.getOperand(5), // voffset | ||||||
7699 | Op.getOperand(6), // soffset | ||||||
7700 | Op.getOperand(7), // offset | ||||||
7701 | DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format | ||||||
7702 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy | ||||||
7703 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen | ||||||
7704 | }; | ||||||
7705 | unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : | ||||||
7706 | AMDGPUISD::TBUFFER_STORE_FORMAT; | ||||||
7707 | MemSDNode *M = cast<MemSDNode>(Op); | ||||||
7708 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, | ||||||
7709 | M->getMemoryVT(), M->getMemOperand()); | ||||||
7710 | } | ||||||
7711 | |||||||
7712 | case Intrinsic::amdgcn_struct_tbuffer_store: { | ||||||
7713 | SDValue VData = Op.getOperand(2); | ||||||
7714 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); | ||||||
7715 | if (IsD16) | ||||||
7716 | VData = handleD16VData(VData, DAG); | ||||||
7717 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); | ||||||
7718 | SDValue Ops[] = { | ||||||
7719 | Chain, | ||||||
7720 | VData, // vdata | ||||||
7721 | Op.getOperand(3), // rsrc | ||||||
7722 | Op.getOperand(4), // vindex | ||||||
7723 | Offsets.first, // voffset | ||||||
7724 | Op.getOperand(6), // soffset | ||||||
7725 | Offsets.second, // offset | ||||||
7726 | Op.getOperand(7), // format | ||||||
7727 | Op.getOperand(8), // cachepolicy, swizzled buffer | ||||||
7728 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen | ||||||
7729 | }; | ||||||
7730 | unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : | ||||||
7731 | AMDGPUISD::TBUFFER_STORE_FORMAT; | ||||||
7732 | MemSDNode *M = cast<MemSDNode>(Op); | ||||||
7733 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, | ||||||
7734 | M->getMemoryVT(), M->getMemOperand()); | ||||||
7735 | } | ||||||
7736 | |||||||
7737 | case Intrinsic::amdgcn_raw_tbuffer_store: { | ||||||
7738 | SDValue VData = Op.getOperand(2); | ||||||
7739 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); | ||||||
7740 | if (IsD16) | ||||||
7741 | VData = handleD16VData(VData, DAG); | ||||||
7742 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); | ||||||
7743 | SDValue Ops[] = { | ||||||
7744 | Chain, | ||||||
7745 | VData, // vdata | ||||||
7746 | Op.getOperand(3), // rsrc | ||||||
7747 | DAG.getConstant(0, DL, MVT::i32), // vindex | ||||||
7748 | Offsets.first, // voffset | ||||||
7749 | Op.getOperand(5), // soffset | ||||||
7750 | Offsets.second, // offset | ||||||
7751 | Op.getOperand(6), // format | ||||||
7752 | Op.getOperand(7), // cachepolicy, swizzled buffer | ||||||
7753 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen | ||||||
7754 | }; | ||||||
7755 | unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : | ||||||
7756 | AMDGPUISD::TBUFFER_STORE_FORMAT; | ||||||
7757 | MemSDNode *M = cast<MemSDNode>(Op); | ||||||
7758 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, | ||||||
7759 | M->getMemoryVT(), M->getMemOperand()); | ||||||
7760 | } | ||||||
7761 | |||||||
7762 | case Intrinsic::amdgcn_buffer_store: | ||||||
7763 | case Intrinsic::amdgcn_buffer_store_format: { | ||||||
7764 | SDValue VData = Op.getOperand(2); | ||||||
7765 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); | ||||||
7766 | if (IsD16) | ||||||
7767 | VData = handleD16VData(VData, DAG); | ||||||
7768 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); | ||||||
7769 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); | ||||||
7770 | unsigned IdxEn = getIdxEn(Op.getOperand(4)); | ||||||
7771 | SDValue Ops[] = { | ||||||
7772 | Chain, | ||||||
7773 | VData, | ||||||
7774 | Op.getOperand(3), // rsrc | ||||||
7775 | Op.getOperand(4), // vindex | ||||||
7776 | SDValue(), // voffset -- will be set by setBufferOffsets | ||||||
7777 | SDValue(), // soffset -- will be set by setBufferOffsets | ||||||
7778 | SDValue(), // offset -- will be set by setBufferOffsets | ||||||
7779 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy | ||||||
7780 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen | ||||||
7781 | }; | ||||||
7782 | setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); | ||||||
7783 | |||||||
7784 | unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ? | ||||||
7785 | AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; | ||||||
7786 | Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; | ||||||
7787 | MemSDNode *M = cast<MemSDNode>(Op); | ||||||
7788 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]); | ||||||
7789 | |||||||
7790 | // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics | ||||||
7791 | EVT VDataType = VData.getValueType().getScalarType(); | ||||||
7792 | if (VDataType == MVT::i8 || VDataType == MVT::i16) | ||||||
7793 | return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); | ||||||
7794 | |||||||
7795 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, | ||||||
7796 | M->getMemoryVT(), M->getMemOperand()); | ||||||
7797 | } | ||||||
7798 | |||||||
7799 | case Intrinsic::amdgcn_raw_buffer_store: | ||||||
7800 | case Intrinsic::amdgcn_raw_buffer_store_format: { | ||||||
7801 | const bool IsFormat = | ||||||
7802 | IntrinsicID == Intrinsic::amdgcn_raw_buffer_store_format; | ||||||
7803 | |||||||
7804 | SDValue VData = Op.getOperand(2); | ||||||
7805 | EVT VDataVT = VData.getValueType(); | ||||||
7806 | EVT EltType = VDataVT.getScalarType(); | ||||||
7807 | bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); | ||||||
7808 | if (IsD16) { | ||||||
7809 | VData = handleD16VData(VData, DAG); | ||||||
7810 | VDataVT = VData.getValueType(); | ||||||
7811 | } | ||||||
7812 | |||||||
7813 | if (!isTypeLegal(VDataVT)) { | ||||||
7814 | VData = | ||||||
7815 | DAG.getNode(ISD::BITCAST, DL, | ||||||
7816 | getEquivalentMemType(*DAG.getContext(), VDataVT), VData); | ||||||
7817 | } | ||||||
7818 | |||||||
7819 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); | ||||||
7820 | SDValue Ops[] = { | ||||||
7821 | Chain, | ||||||
7822 | VData, | ||||||
7823 | Op.getOperand(3), // rsrc | ||||||
7824 | DAG.getConstant(0, DL, MVT::i32), // vindex | ||||||
7825 | Offsets.first, // voffset | ||||||
7826 | Op.getOperand(5), // soffset | ||||||
7827 | Offsets.second, // offset | ||||||
7828 | Op.getOperand(6), // cachepolicy, swizzled buffer | ||||||
7829 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen | ||||||
7830 | }; | ||||||
7831 | unsigned Opc = | ||||||
7832 | IsFormat ? AMDGPUISD::BUFFER_STORE_FORMAT : AMDGPUISD::BUFFER_STORE; | ||||||
7833 | Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; | ||||||
7834 | MemSDNode *M = cast<MemSDNode>(Op); | ||||||
7835 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6]); | ||||||
7836 | |||||||
7837 | // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics | ||||||
7838 | if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32) | ||||||
7839 | return handleByteShortBufferStores(DAG, VDataVT, DL, Ops, M); | ||||||
7840 | |||||||
7841 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, | ||||||
7842 | M->getMemoryVT(), M->getMemOperand()); | ||||||
7843 | } | ||||||
7844 | |||||||
7845 | case Intrinsic::amdgcn_struct_buffer_store: | ||||||
7846 | case Intrinsic::amdgcn_struct_buffer_store_format: { | ||||||
7847 | const bool IsFormat = | ||||||
7848 | IntrinsicID == Intrinsic::amdgcn_struct_buffer_store_format; | ||||||
7849 | |||||||
7850 | SDValue VData = Op.getOperand(2); | ||||||
7851 | EVT VDataVT = VData.getValueType(); | ||||||
7852 | EVT EltType = VDataVT.getScalarType(); | ||||||
7853 | bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); | ||||||
7854 | |||||||
7855 | if (IsD16) { | ||||||
7856 | VData = handleD16VData(VData, DAG); | ||||||
7857 | VDataVT = VData.getValueType(); | ||||||
7858 | } | ||||||
7859 | |||||||
7860 | if (!isTypeLegal(VDataVT)) { | ||||||
7861 | VData = | ||||||
7862 | DAG.getNode(ISD::BITCAST, DL, | ||||||
7863 | getEquivalentMemType(*DAG.getContext(), VDataVT), VData); | ||||||
7864 | } | ||||||
7865 | |||||||
7866 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); | ||||||
7867 | SDValue Ops[] = { | ||||||
7868 | Chain, | ||||||
7869 | VData, | ||||||
7870 | Op.getOperand(3), // rsrc | ||||||
7871 | Op.getOperand(4), // vindex | ||||||
7872 | Offsets.first, // voffset | ||||||
7873 | Op.getOperand(6), // soffset | ||||||
7874 | Offsets.second, // offset | ||||||
7875 | Op.getOperand(7), // cachepolicy, swizzled buffer | ||||||
7876 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen | ||||||
7877 | }; | ||||||
7878 | unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ? | ||||||
7879 | AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; | ||||||
7880 | Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; | ||||||
7881 | MemSDNode *M = cast<MemSDNode>(Op); | ||||||
7882 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]); | ||||||
7883 | |||||||
7884 | // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics | ||||||
7885 | EVT VDataType = VData.getValueType().getScalarType(); | ||||||
7886 | if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32) | ||||||
7887 | return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); | ||||||
7888 | |||||||
7889 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, | ||||||
7890 | M->getMemoryVT(), M->getMemOperand()); | ||||||
7891 | } | ||||||
7892 | case Intrinsic::amdgcn_end_cf: | ||||||
7893 | return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other, | ||||||
7894 | Op->getOperand(2), Chain), 0); | ||||||
7895 | |||||||
7896 | default: { | ||||||
7897 | if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = | ||||||
7898 | AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) | ||||||
7899 | return lowerImage(Op, ImageDimIntr, DAG, true); | ||||||
7900 | |||||||
7901 | return Op; | ||||||
7902 | } | ||||||
7903 | } | ||||||
7904 | } | ||||||
7905 | |||||||
7906 | // The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args: | ||||||
7907 | // offset (the offset that is included in bounds checking and swizzling, to be | ||||||
7908 | // split between the instruction's voffset and immoffset fields) and soffset | ||||||
7909 | // (the offset that is excluded from bounds checking and swizzling, to go in | ||||||
7910 | // the instruction's soffset field). This function takes the first kind of | ||||||
7911 | // offset and figures out how to split it between voffset and immoffset. | ||||||
7912 | std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets( | ||||||
7913 | SDValue Offset, SelectionDAG &DAG) const { | ||||||
7914 | SDLoc DL(Offset); | ||||||
7915 | const unsigned MaxImm = 4095; | ||||||
7916 | SDValue N0 = Offset; | ||||||
7917 | ConstantSDNode *C1 = nullptr; | ||||||
7918 | |||||||
7919 | if ((C1 = dyn_cast<ConstantSDNode>(N0))) | ||||||
7920 | N0 = SDValue(); | ||||||
7921 | else if (DAG.isBaseWithConstantOffset(N0)) { | ||||||
7922 | C1 = cast<ConstantSDNode>(N0.getOperand(1)); | ||||||
7923 | N0 = N0.getOperand(0); | ||||||
7924 | } | ||||||
7925 | |||||||
7926 | if (C1) { | ||||||
7927 | unsigned ImmOffset = C1->getZExtValue(); | ||||||
7928 | // If the immediate value is too big for the immoffset field, put the value | ||||||
7929 | // and -4096 into the immoffset field so that the value that is copied/added | ||||||
7930 | // for the voffset field is a multiple of 4096, and it stands more chance | ||||||
7931 | // of being CSEd with the copy/add for another similar load/store. | ||||||
7932 | // However, do not do that rounding down to a multiple of 4096 if that is a | ||||||
7933 | // negative number, as it appears to be illegal to have a negative offset | ||||||
7934 | // in the vgpr, even if adding the immediate offset makes it positive. | ||||||
7935 | unsigned Overflow = ImmOffset & ~MaxImm; | ||||||
7936 | ImmOffset -= Overflow; | ||||||
7937 | if ((int32_t)Overflow < 0) { | ||||||
7938 | Overflow += ImmOffset; | ||||||
7939 | ImmOffset = 0; | ||||||
7940 | } | ||||||
7941 | C1 = cast<ConstantSDNode>(DAG.getTargetConstant(ImmOffset, DL, MVT::i32)); | ||||||
7942 | if (Overflow) { | ||||||
7943 | auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32); | ||||||
7944 | if (!N0) | ||||||
7945 | N0 = OverflowVal; | ||||||
7946 | else { | ||||||
7947 | SDValue Ops[] = { N0, OverflowVal }; | ||||||
7948 | N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops); | ||||||
7949 | } | ||||||
7950 | } | ||||||
7951 | } | ||||||
7952 | if (!N0) | ||||||
7953 | N0 = DAG.getConstant(0, DL, MVT::i32); | ||||||
7954 | if (!C1) | ||||||
7955 | C1 = cast<ConstantSDNode>(DAG.getTargetConstant(0, DL, MVT::i32)); | ||||||
7956 | return {N0, SDValue(C1, 0)}; | ||||||
7957 | } | ||||||
7958 | |||||||
7959 | // Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the | ||||||
7960 | // three offsets (voffset, soffset and instoffset) into the SDValue[3] array | ||||||
7961 | // pointed to by Offsets. | ||||||
7962 | void SITargetLowering::setBufferOffsets(SDValue CombinedOffset, | ||||||
7963 | SelectionDAG &DAG, SDValue *Offsets, | ||||||
7964 | Align Alignment) const { | ||||||
7965 | SDLoc DL(CombinedOffset); | ||||||
7966 | if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) { | ||||||
7967 | uint32_t Imm = C->getZExtValue(); | ||||||
7968 | uint32_t SOffset, ImmOffset; | ||||||
7969 | if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, | ||||||
7970 | Alignment)) { | ||||||
7971 | Offsets[0] = DAG.getConstant(0, DL, MVT::i32); | ||||||
7972 | Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); | ||||||
7973 | Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32); | ||||||
7974 | return; | ||||||
7975 | } | ||||||
7976 | } | ||||||
7977 | if (DAG.isBaseWithConstantOffset(CombinedOffset)) { | ||||||
7978 | SDValue N0 = CombinedOffset.getOperand(0); | ||||||
7979 | SDValue N1 = CombinedOffset.getOperand(1); | ||||||
7980 | uint32_t SOffset, ImmOffset; | ||||||
7981 | int Offset = cast<ConstantSDNode>(N1)->getSExtValue(); | ||||||
7982 | if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset, | ||||||
7983 | Subtarget, Alignment)) { | ||||||
7984 | Offsets[0] = N0; | ||||||
7985 | Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); | ||||||
7986 | Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32); | ||||||
7987 | return; | ||||||
7988 | } | ||||||
7989 | } | ||||||
7990 | Offsets[0] = CombinedOffset; | ||||||
7991 | Offsets[1] = DAG.getConstant(0, DL, MVT::i32); | ||||||
7992 | Offsets[2] = DAG.getTargetConstant(0, DL, MVT::i32); | ||||||
7993 | } | ||||||
7994 | |||||||
7995 | // Handle 8 bit and 16 bit buffer loads | ||||||
7996 | SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG, | ||||||
7997 | EVT LoadVT, SDLoc DL, | ||||||
7998 | ArrayRef<SDValue> Ops, | ||||||
7999 | MemSDNode *M) const { | ||||||
8000 | EVT IntVT = LoadVT.changeTypeToInteger(); | ||||||
8001 | unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ? | ||||||
8002 | AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT; | ||||||
8003 | |||||||
8004 | SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other); | ||||||
8005 | SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList, | ||||||
8006 | Ops, IntVT, | ||||||
8007 | M->getMemOperand()); | ||||||
8008 | SDValue LoadVal = DAG.getNode(ISD::TRUNCATE, DL, IntVT, BufferLoad); | ||||||
8009 | LoadVal = DAG.getNode(ISD::BITCAST, DL, LoadVT, LoadVal); | ||||||
8010 | |||||||
8011 | return DAG.getMergeValues({LoadVal, BufferLoad.getValue(1)}, DL); | ||||||
8012 | } | ||||||
8013 | |||||||
8014 | // Handle 8 bit and 16 bit buffer stores | ||||||
8015 | SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG, | ||||||
8016 | EVT VDataType, SDLoc DL, | ||||||
8017 | SDValue Ops[], | ||||||
8018 | MemSDNode *M) const { | ||||||
8019 | if (VDataType == MVT::f16) | ||||||
8020 | Ops[1] = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Ops[1]); | ||||||
8021 | |||||||
8022 | SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]); | ||||||
8023 | Ops[1] = BufferStoreExt; | ||||||
8024 | unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE : | ||||||
8025 | AMDGPUISD::BUFFER_STORE_SHORT; | ||||||
8026 | ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9); | ||||||
8027 | return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType, | ||||||
8028 | M->getMemOperand()); | ||||||
8029 | } | ||||||
8030 | |||||||
8031 | static SDValue getLoadExtOrTrunc(SelectionDAG &DAG, | ||||||
8032 | ISD::LoadExtType ExtType, SDValue Op, | ||||||
8033 | const SDLoc &SL, EVT VT) { | ||||||
8034 | if (VT.bitsLT(Op.getValueType())) | ||||||
8035 | return DAG.getNode(ISD::TRUNCATE, SL, VT, Op); | ||||||
8036 | |||||||
8037 | switch (ExtType) { | ||||||
8038 | case ISD::SEXTLOAD: | ||||||
8039 | return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op); | ||||||
8040 | case ISD::ZEXTLOAD: | ||||||
8041 | return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op); | ||||||
8042 | case ISD::EXTLOAD: | ||||||
8043 | return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op); | ||||||
8044 | case ISD::NON_EXTLOAD: | ||||||
8045 | return Op; | ||||||
8046 | } | ||||||
8047 | |||||||
8048 | llvm_unreachable("invalid ext type")::llvm::llvm_unreachable_internal("invalid ext type", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8048); | ||||||
8049 | } | ||||||
8050 | |||||||
8051 | SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const { | ||||||
8052 | SelectionDAG &DAG = DCI.DAG; | ||||||
8053 | if (Ld->getAlignment() < 4 || Ld->isDivergent()) | ||||||
8054 | return SDValue(); | ||||||
8055 | |||||||
8056 | // FIXME: Constant loads should all be marked invariant. | ||||||
8057 | unsigned AS = Ld->getAddressSpace(); | ||||||
8058 | if (AS != AMDGPUAS::CONSTANT_ADDRESS && | ||||||
8059 | AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT && | ||||||
8060 | (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant())) | ||||||
8061 | return SDValue(); | ||||||
8062 | |||||||
8063 | // Don't do this early, since it may interfere with adjacent load merging for | ||||||
8064 | // illegal types. We can avoid losing alignment information for exotic types | ||||||
8065 | // pre-legalize. | ||||||
8066 | EVT MemVT = Ld->getMemoryVT(); | ||||||
8067 | if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) || | ||||||
8068 | MemVT.getSizeInBits() >= 32) | ||||||
8069 | return SDValue(); | ||||||
8070 | |||||||
8071 | SDLoc SL(Ld); | ||||||
8072 | |||||||
8073 | assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&(static_cast <bool> ((!MemVT.isVector() || Ld->getExtensionType () == ISD::NON_EXTLOAD) && "unexpected vector extload" ) ? void (0) : __assert_fail ("(!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) && \"unexpected vector extload\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8074, __extension__ __PRETTY_FUNCTION__)) | ||||||
8074 | "unexpected vector extload")(static_cast <bool> ((!MemVT.isVector() || Ld->getExtensionType () == ISD::NON_EXTLOAD) && "unexpected vector extload" ) ? void (0) : __assert_fail ("(!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) && \"unexpected vector extload\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8074, __extension__ __PRETTY_FUNCTION__)); | ||||||
8075 | |||||||
8076 | // TODO: Drop only high part of range. | ||||||
8077 | SDValue Ptr = Ld->getBasePtr(); | ||||||
8078 | SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, | ||||||
8079 | MVT::i32, SL, Ld->getChain(), Ptr, | ||||||
8080 | Ld->getOffset(), | ||||||
8081 | Ld->getPointerInfo(), MVT::i32, | ||||||
8082 | Ld->getAlignment(), | ||||||
8083 | Ld->getMemOperand()->getFlags(), | ||||||
8084 | Ld->getAAInfo(), | ||||||
8085 | nullptr); // Drop ranges | ||||||
8086 | |||||||
8087 | EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()); | ||||||
8088 | if (MemVT.isFloatingPoint()) { | ||||||
8089 | assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&(static_cast <bool> (Ld->getExtensionType() == ISD:: NON_EXTLOAD && "unexpected fp extload") ? void (0) : __assert_fail ("Ld->getExtensionType() == ISD::NON_EXTLOAD && \"unexpected fp extload\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8090, __extension__ __PRETTY_FUNCTION__)) | ||||||
8090 | "unexpected fp extload")(static_cast <bool> (Ld->getExtensionType() == ISD:: NON_EXTLOAD && "unexpected fp extload") ? void (0) : __assert_fail ("Ld->getExtensionType() == ISD::NON_EXTLOAD && \"unexpected fp extload\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8090, __extension__ __PRETTY_FUNCTION__)); | ||||||
8091 | TruncVT = MemVT.changeTypeToInteger(); | ||||||
8092 | } | ||||||
8093 | |||||||
8094 | SDValue Cvt = NewLoad; | ||||||
8095 | if (Ld->getExtensionType() == ISD::SEXTLOAD) { | ||||||
8096 | Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad, | ||||||
8097 | DAG.getValueType(TruncVT)); | ||||||
8098 | } else if (Ld->getExtensionType() == ISD::ZEXTLOAD || | ||||||
8099 | Ld->getExtensionType() == ISD::NON_EXTLOAD) { | ||||||
8100 | Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT); | ||||||
8101 | } else { | ||||||
8102 | assert(Ld->getExtensionType() == ISD::EXTLOAD)(static_cast <bool> (Ld->getExtensionType() == ISD:: EXTLOAD) ? void (0) : __assert_fail ("Ld->getExtensionType() == ISD::EXTLOAD" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8102, __extension__ __PRETTY_FUNCTION__)); | ||||||
8103 | } | ||||||
8104 | |||||||
8105 | EVT VT = Ld->getValueType(0); | ||||||
8106 | EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); | ||||||
8107 | |||||||
8108 | DCI.AddToWorklist(Cvt.getNode()); | ||||||
8109 | |||||||
8110 | // We may need to handle exotic cases, such as i16->i64 extloads, so insert | ||||||
8111 | // the appropriate extension from the 32-bit load. | ||||||
8112 | Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT); | ||||||
8113 | DCI.AddToWorklist(Cvt.getNode()); | ||||||
8114 | |||||||
8115 | // Handle conversion back to floating point if necessary. | ||||||
8116 | Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt); | ||||||
8117 | |||||||
8118 | return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL); | ||||||
8119 | } | ||||||
8120 | |||||||
8121 | SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { | ||||||
8122 | SDLoc DL(Op); | ||||||
8123 | LoadSDNode *Load = cast<LoadSDNode>(Op); | ||||||
8124 | ISD::LoadExtType ExtType = Load->getExtensionType(); | ||||||
8125 | EVT MemVT = Load->getMemoryVT(); | ||||||
8126 | |||||||
8127 | if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { | ||||||
8128 | if (MemVT == MVT::i16 && isTypeLegal(MVT::i16)) | ||||||
8129 | return SDValue(); | ||||||
8130 | |||||||
8131 | // FIXME: Copied from PPC | ||||||
8132 | // First, load into 32 bits, then truncate to 1 bit. | ||||||
8133 | |||||||
8134 | SDValue Chain = Load->getChain(); | ||||||
8135 | SDValue BasePtr = Load->getBasePtr(); | ||||||
8136 | MachineMemOperand *MMO = Load->getMemOperand(); | ||||||
8137 | |||||||
8138 | EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16; | ||||||
8139 | |||||||
8140 | SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, | ||||||
8141 | BasePtr, RealMemVT, MMO); | ||||||
8142 | |||||||
8143 | if (!MemVT.isVector()) { | ||||||
8144 | SDValue Ops[] = { | ||||||
8145 | DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), | ||||||
8146 | NewLD.getValue(1) | ||||||
8147 | }; | ||||||
8148 | |||||||
8149 | return DAG.getMergeValues(Ops, DL); | ||||||
8150 | } | ||||||
8151 | |||||||
8152 | SmallVector<SDValue, 3> Elts; | ||||||
8153 | for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) { | ||||||
8154 | SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD, | ||||||
8155 | DAG.getConstant(I, DL, MVT::i32)); | ||||||
8156 | |||||||
8157 | Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt)); | ||||||
8158 | } | ||||||
8159 | |||||||
8160 | SDValue Ops[] = { | ||||||
8161 | DAG.getBuildVector(MemVT, DL, Elts), | ||||||
8162 | NewLD.getValue(1) | ||||||
8163 | }; | ||||||
8164 | |||||||
8165 | return DAG.getMergeValues(Ops, DL); | ||||||
8166 | } | ||||||
8167 | |||||||
8168 | if (!MemVT.isVector()) | ||||||
8169 | return SDValue(); | ||||||
8170 | |||||||
8171 | assert(Op.getValueType().getVectorElementType() == MVT::i32 &&(static_cast <bool> (Op.getValueType().getVectorElementType () == MVT::i32 && "Custom lowering for non-i32 vectors hasn't been implemented." ) ? void (0) : __assert_fail ("Op.getValueType().getVectorElementType() == MVT::i32 && \"Custom lowering for non-i32 vectors hasn't been implemented.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8172, __extension__ __PRETTY_FUNCTION__)) | ||||||
8172 | "Custom lowering for non-i32 vectors hasn't been implemented.")(static_cast <bool> (Op.getValueType().getVectorElementType () == MVT::i32 && "Custom lowering for non-i32 vectors hasn't been implemented." ) ? void (0) : __assert_fail ("Op.getValueType().getVectorElementType() == MVT::i32 && \"Custom lowering for non-i32 vectors hasn't been implemented.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8172, __extension__ __PRETTY_FUNCTION__)); | ||||||
8173 | |||||||
8174 | unsigned Alignment = Load->getAlignment(); | ||||||
8175 | unsigned AS = Load->getAddressSpace(); | ||||||
8176 | if (Subtarget->hasLDSMisalignedBug() && | ||||||
8177 | AS == AMDGPUAS::FLAT_ADDRESS && | ||||||
8178 | Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) { | ||||||
8179 | return SplitVectorLoad(Op, DAG); | ||||||
8180 | } | ||||||
8181 | |||||||
8182 | MachineFunction &MF = DAG.getMachineFunction(); | ||||||
8183 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
8184 | // If there is a possibilty that flat instruction access scratch memory | ||||||
8185 | // then we need to use the same legalization rules we use for private. | ||||||
8186 | if (AS == AMDGPUAS::FLAT_ADDRESS && | ||||||
8187 | !Subtarget->hasMultiDwordFlatScratchAddressing()) | ||||||
8188 | AS = MFI->hasFlatScratchInit() ? | ||||||
8189 | AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; | ||||||
8190 | |||||||
8191 | unsigned NumElements = MemVT.getVectorNumElements(); | ||||||
8192 | |||||||
8193 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || | ||||||
8194 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) { | ||||||
8195 | if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) { | ||||||
8196 | if (MemVT.isPow2VectorType()) | ||||||
8197 | return SDValue(); | ||||||
8198 | return WidenOrSplitVectorLoad(Op, DAG); | ||||||
8199 | } | ||||||
8200 | // Non-uniform loads will be selected to MUBUF instructions, so they | ||||||
8201 | // have the same legalization requirements as global and private | ||||||
8202 | // loads. | ||||||
8203 | // | ||||||
8204 | } | ||||||
8205 | |||||||
8206 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || | ||||||
8207 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || | ||||||
8208 | AS == AMDGPUAS::GLOBAL_ADDRESS) { | ||||||
8209 | if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() && | ||||||
8210 | Load->isSimple() && isMemOpHasNoClobberedMemOperand(Load) && | ||||||
8211 | Alignment >= 4 && NumElements < 32) { | ||||||
8212 | if (MemVT.isPow2VectorType()) | ||||||
8213 | return SDValue(); | ||||||
8214 | return WidenOrSplitVectorLoad(Op, DAG); | ||||||
8215 | } | ||||||
8216 | // Non-uniform loads will be selected to MUBUF instructions, so they | ||||||
8217 | // have the same legalization requirements as global and private | ||||||
8218 | // loads. | ||||||
8219 | // | ||||||
8220 | } | ||||||
8221 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || | ||||||
8222 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || | ||||||
8223 | AS == AMDGPUAS::GLOBAL_ADDRESS || | ||||||
8224 | AS == AMDGPUAS::FLAT_ADDRESS) { | ||||||
8225 | if (NumElements > 4) | ||||||
8226 | return SplitVectorLoad(Op, DAG); | ||||||
8227 | // v3 loads not supported on SI. | ||||||
8228 | if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) | ||||||
8229 | return WidenOrSplitVectorLoad(Op, DAG); | ||||||
8230 | |||||||
8231 | // v3 and v4 loads are supported for private and global memory. | ||||||
8232 | return SDValue(); | ||||||
8233 | } | ||||||
8234 | if (AS == AMDGPUAS::PRIVATE_ADDRESS) { | ||||||
8235 | // Depending on the setting of the private_element_size field in the | ||||||
8236 | // resource descriptor, we can only make private accesses up to a certain | ||||||
8237 | // size. | ||||||
8238 | switch (Subtarget->getMaxPrivateElementSize()) { | ||||||
8239 | case 4: { | ||||||
8240 | SDValue Ops[2]; | ||||||
8241 | std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG); | ||||||
8242 | return DAG.getMergeValues(Ops, DL); | ||||||
8243 | } | ||||||
8244 | case 8: | ||||||
8245 | if (NumElements > 2) | ||||||
8246 | return SplitVectorLoad(Op, DAG); | ||||||
8247 | return SDValue(); | ||||||
8248 | case 16: | ||||||
8249 | // Same as global/flat | ||||||
8250 | if (NumElements > 4) | ||||||
8251 | return SplitVectorLoad(Op, DAG); | ||||||
8252 | // v3 loads not supported on SI. | ||||||
8253 | if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) | ||||||
8254 | return WidenOrSplitVectorLoad(Op, DAG); | ||||||
8255 | |||||||
8256 | return SDValue(); | ||||||
8257 | default: | ||||||
8258 | llvm_unreachable("unsupported private_element_size")::llvm::llvm_unreachable_internal("unsupported private_element_size" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8258); | ||||||
8259 | } | ||||||
8260 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { | ||||||
8261 | // Use ds_read_b128 or ds_read_b96 when possible. | ||||||
8262 | if (Subtarget->hasDS96AndDS128() && | ||||||
8263 | ((Subtarget->useDS128() && MemVT.getStoreSize() == 16) || | ||||||
8264 | MemVT.getStoreSize() == 12) && | ||||||
8265 | allowsMisalignedMemoryAccessesImpl(MemVT.getSizeInBits(), AS, | ||||||
8266 | Load->getAlign())) | ||||||
8267 | return SDValue(); | ||||||
8268 | |||||||
8269 | if (NumElements > 2) | ||||||
8270 | return SplitVectorLoad(Op, DAG); | ||||||
8271 | |||||||
8272 | // SI has a hardware bug in the LDS / GDS boounds checking: if the base | ||||||
8273 | // address is negative, then the instruction is incorrectly treated as | ||||||
8274 | // out-of-bounds even if base + offsets is in bounds. Split vectorized | ||||||
8275 | // loads here to avoid emitting ds_read2_b32. We may re-combine the | ||||||
8276 | // load later in the SILoadStoreOptimizer. | ||||||
8277 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && | ||||||
8278 | NumElements == 2 && MemVT.getStoreSize() == 8 && | ||||||
8279 | Load->getAlignment() < 8) { | ||||||
8280 | return SplitVectorLoad(Op, DAG); | ||||||
8281 | } | ||||||
8282 | } | ||||||
8283 | |||||||
8284 | if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), | ||||||
8285 | MemVT, *Load->getMemOperand())) { | ||||||
8286 | SDValue Ops[2]; | ||||||
8287 | std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); | ||||||
8288 | return DAG.getMergeValues(Ops, DL); | ||||||
8289 | } | ||||||
8290 | |||||||
8291 | return SDValue(); | ||||||
8292 | } | ||||||
8293 | |||||||
8294 | SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { | ||||||
8295 | EVT VT = Op.getValueType(); | ||||||
8296 | assert(VT.getSizeInBits() == 64)(static_cast <bool> (VT.getSizeInBits() == 64) ? void ( 0) : __assert_fail ("VT.getSizeInBits() == 64", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8296, __extension__ __PRETTY_FUNCTION__)); | ||||||
8297 | |||||||
8298 | SDLoc DL(Op); | ||||||
8299 | SDValue Cond = Op.getOperand(0); | ||||||
8300 | |||||||
8301 | SDValue Zero = DAG.getConstant(0, DL, MVT::i32); | ||||||
8302 | SDValue One = DAG.getConstant(1, DL, MVT::i32); | ||||||
8303 | |||||||
8304 | SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); | ||||||
8305 | SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); | ||||||
8306 | |||||||
8307 | SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); | ||||||
8308 | SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); | ||||||
8309 | |||||||
8310 | SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); | ||||||
8311 | |||||||
8312 | SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); | ||||||
8313 | SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); | ||||||
8314 | |||||||
8315 | SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); | ||||||
8316 | |||||||
8317 | SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); | ||||||
8318 | return DAG.getNode(ISD::BITCAST, DL, VT, Res); | ||||||
8319 | } | ||||||
8320 | |||||||
8321 | // Catch division cases where we can use shortcuts with rcp and rsq | ||||||
8322 | // instructions. | ||||||
8323 | SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, | ||||||
8324 | SelectionDAG &DAG) const { | ||||||
8325 | SDLoc SL(Op); | ||||||
8326 | SDValue LHS = Op.getOperand(0); | ||||||
8327 | SDValue RHS = Op.getOperand(1); | ||||||
8328 | EVT VT = Op.getValueType(); | ||||||
8329 | const SDNodeFlags Flags = Op->getFlags(); | ||||||
8330 | |||||||
8331 | bool AllowInaccurateRcp = Flags.hasApproximateFuncs(); | ||||||
8332 | |||||||
8333 | // Without !fpmath accuracy information, we can't do more because we don't | ||||||
8334 | // know exactly whether rcp is accurate enough to meet !fpmath requirement. | ||||||
8335 | if (!AllowInaccurateRcp) | ||||||
8336 | return SDValue(); | ||||||
8337 | |||||||
8338 | if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { | ||||||
8339 | if (CLHS->isExactlyValue(1.0)) { | ||||||
8340 | // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to | ||||||
8341 | // the CI documentation has a worst case error of 1 ulp. | ||||||
8342 | // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to | ||||||
8343 | // use it as long as we aren't trying to use denormals. | ||||||
8344 | // | ||||||
8345 | // v_rcp_f16 and v_rsq_f16 DO support denormals. | ||||||
8346 | |||||||
8347 | // 1.0 / sqrt(x) -> rsq(x) | ||||||
8348 | |||||||
8349 | // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP | ||||||
8350 | // error seems really high at 2^29 ULP. | ||||||
8351 | if (RHS.getOpcode() == ISD::FSQRT) | ||||||
8352 | return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); | ||||||
8353 | |||||||
8354 | // 1.0 / x -> rcp(x) | ||||||
8355 | return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); | ||||||
8356 | } | ||||||
8357 | |||||||
8358 | // Same as for 1.0, but expand the sign out of the constant. | ||||||
8359 | if (CLHS->isExactlyValue(-1.0)) { | ||||||
8360 | // -1.0 / x -> rcp (fneg x) | ||||||
8361 | SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); | ||||||
8362 | return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); | ||||||
8363 | } | ||||||
8364 | } | ||||||
8365 | |||||||
8366 | // Turn into multiply by the reciprocal. | ||||||
8367 | // x / y -> x * (1.0 / y) | ||||||
8368 | SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); | ||||||
8369 | return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags); | ||||||
8370 | } | ||||||
8371 | |||||||
8372 | SDValue SITargetLowering::lowerFastUnsafeFDIV64(SDValue Op, | ||||||
8373 | SelectionDAG &DAG) const { | ||||||
8374 | SDLoc SL(Op); | ||||||
8375 | SDValue X = Op.getOperand(0); | ||||||
8376 | SDValue Y = Op.getOperand(1); | ||||||
8377 | EVT VT = Op.getValueType(); | ||||||
8378 | const SDNodeFlags Flags = Op->getFlags(); | ||||||
8379 | |||||||
8380 | bool AllowInaccurateDiv = Flags.hasApproximateFuncs() || | ||||||
8381 | DAG.getTarget().Options.UnsafeFPMath; | ||||||
8382 | if (!AllowInaccurateDiv) | ||||||
8383 | return SDValue(); | ||||||
8384 | |||||||
8385 | SDValue NegY = DAG.getNode(ISD::FNEG, SL, VT, Y); | ||||||
8386 | SDValue One = DAG.getConstantFP(1.0, SL, VT); | ||||||
8387 | |||||||
8388 | SDValue R = DAG.getNode(AMDGPUISD::RCP, SL, VT, Y); | ||||||
8389 | SDValue Tmp0 = DAG.getNode(ISD::FMA, SL, VT, NegY, R, One); | ||||||
8390 | |||||||
8391 | R = DAG.getNode(ISD::FMA, SL, VT, Tmp0, R, R); | ||||||
8392 | SDValue Tmp1 = DAG.getNode(ISD::FMA, SL, VT, NegY, R, One); | ||||||
8393 | R = DAG.getNode(ISD::FMA, SL, VT, Tmp1, R, R); | ||||||
8394 | SDValue Ret = DAG.getNode(ISD::FMUL, SL, VT, X, R); | ||||||
8395 | SDValue Tmp2 = DAG.getNode(ISD::FMA, SL, VT, NegY, Ret, X); | ||||||
8396 | return DAG.getNode(ISD::FMA, SL, VT, Tmp2, R, Ret); | ||||||
8397 | } | ||||||
8398 | |||||||
8399 | static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, | ||||||
8400 | EVT VT, SDValue A, SDValue B, SDValue GlueChain, | ||||||
8401 | SDNodeFlags Flags) { | ||||||
8402 | if (GlueChain->getNumValues() <= 1) { | ||||||
8403 | return DAG.getNode(Opcode, SL, VT, A, B, Flags); | ||||||
8404 | } | ||||||
8405 | |||||||
8406 | assert(GlueChain->getNumValues() == 3)(static_cast <bool> (GlueChain->getNumValues() == 3) ? void (0) : __assert_fail ("GlueChain->getNumValues() == 3" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8406, __extension__ __PRETTY_FUNCTION__)); | ||||||
8407 | |||||||
8408 | SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); | ||||||
8409 | switch (Opcode) { | ||||||
8410 | default: llvm_unreachable("no chain equivalent for opcode")::llvm::llvm_unreachable_internal("no chain equivalent for opcode" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8410); | ||||||
8411 | case ISD::FMUL: | ||||||
8412 | Opcode = AMDGPUISD::FMUL_W_CHAIN; | ||||||
8413 | break; | ||||||
8414 | } | ||||||
8415 | |||||||
8416 | return DAG.getNode(Opcode, SL, VTList, | ||||||
8417 | {GlueChain.getValue(1), A, B, GlueChain.getValue(2)}, | ||||||
8418 | Flags); | ||||||
8419 | } | ||||||
8420 | |||||||
8421 | static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, | ||||||
8422 | EVT VT, SDValue A, SDValue B, SDValue C, | ||||||
8423 | SDValue GlueChain, SDNodeFlags Flags) { | ||||||
8424 | if (GlueChain->getNumValues() <= 1) { | ||||||
8425 | return DAG.getNode(Opcode, SL, VT, {A, B, C}, Flags); | ||||||
8426 | } | ||||||
8427 | |||||||
8428 | assert(GlueChain->getNumValues() == 3)(static_cast <bool> (GlueChain->getNumValues() == 3) ? void (0) : __assert_fail ("GlueChain->getNumValues() == 3" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8428, __extension__ __PRETTY_FUNCTION__)); | ||||||
8429 | |||||||
8430 | SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); | ||||||
8431 | switch (Opcode) { | ||||||
8432 | default: llvm_unreachable("no chain equivalent for opcode")::llvm::llvm_unreachable_internal("no chain equivalent for opcode" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8432); | ||||||
8433 | case ISD::FMA: | ||||||
8434 | Opcode = AMDGPUISD::FMA_W_CHAIN; | ||||||
8435 | break; | ||||||
8436 | } | ||||||
8437 | |||||||
8438 | return DAG.getNode(Opcode, SL, VTList, | ||||||
8439 | {GlueChain.getValue(1), A, B, C, GlueChain.getValue(2)}, | ||||||
8440 | Flags); | ||||||
8441 | } | ||||||
8442 | |||||||
8443 | SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const { | ||||||
8444 | if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) | ||||||
8445 | return FastLowered; | ||||||
8446 | |||||||
8447 | SDLoc SL(Op); | ||||||
8448 | SDValue Src0 = Op.getOperand(0); | ||||||
8449 | SDValue Src1 = Op.getOperand(1); | ||||||
8450 | |||||||
8451 | SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); | ||||||
8452 | SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); | ||||||
8453 | |||||||
8454 | SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1); | ||||||
8455 | SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1); | ||||||
8456 | |||||||
8457 | SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32); | ||||||
8458 | SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag); | ||||||
8459 | |||||||
8460 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0); | ||||||
8461 | } | ||||||
8462 | |||||||
8463 | // Faster 2.5 ULP division that does not support denormals. | ||||||
8464 | SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { | ||||||
8465 | SDLoc SL(Op); | ||||||
8466 | SDValue LHS = Op.getOperand(1); | ||||||
8467 | SDValue RHS = Op.getOperand(2); | ||||||
8468 | |||||||
8469 | SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); | ||||||
8470 | |||||||
8471 | const APFloat K0Val(BitsToFloat(0x6f800000)); | ||||||
8472 | const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); | ||||||
8473 | |||||||
8474 | const APFloat K1Val(BitsToFloat(0x2f800000)); | ||||||
8475 | const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); | ||||||
8476 | |||||||
8477 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); | ||||||
8478 | |||||||
8479 | EVT SetCCVT = | ||||||
8480 | getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); | ||||||
8481 | |||||||
8482 | SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); | ||||||
8483 | |||||||
8484 | SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); | ||||||
8485 | |||||||
8486 | // TODO: Should this propagate fast-math-flags? | ||||||
8487 | r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); | ||||||
8488 | |||||||
8489 | // rcp does not support denormals. | ||||||
8490 | SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); | ||||||
8491 | |||||||
8492 | SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); | ||||||
8493 | |||||||
8494 | return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); | ||||||
8495 | } | ||||||
8496 | |||||||
8497 | // Returns immediate value for setting the F32 denorm mode when using the | ||||||
8498 | // S_DENORM_MODE instruction. | ||||||
8499 | static SDValue getSPDenormModeValue(int SPDenormMode, SelectionDAG &DAG, | ||||||
8500 | const SDLoc &SL, const GCNSubtarget *ST) { | ||||||
8501 | assert(ST->hasDenormModeInst() && "Requires S_DENORM_MODE")(static_cast <bool> (ST->hasDenormModeInst() && "Requires S_DENORM_MODE") ? void (0) : __assert_fail ("ST->hasDenormModeInst() && \"Requires S_DENORM_MODE\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8501, __extension__ __PRETTY_FUNCTION__)); | ||||||
8502 | int DPDenormModeDefault = hasFP64FP16Denormals(DAG.getMachineFunction()) | ||||||
8503 | ? FP_DENORM_FLUSH_NONE3 | ||||||
8504 | : FP_DENORM_FLUSH_IN_FLUSH_OUT0; | ||||||
8505 | |||||||
8506 | int Mode = SPDenormMode | (DPDenormModeDefault << 2); | ||||||
8507 | return DAG.getTargetConstant(Mode, SL, MVT::i32); | ||||||
8508 | } | ||||||
8509 | |||||||
8510 | SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { | ||||||
8511 | if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) | ||||||
8512 | return FastLowered; | ||||||
8513 | |||||||
8514 | // The selection matcher assumes anything with a chain selecting to a | ||||||
8515 | // mayRaiseFPException machine instruction. Since we're introducing a chain | ||||||
8516 | // here, we need to explicitly report nofpexcept for the regular fdiv | ||||||
8517 | // lowering. | ||||||
8518 | SDNodeFlags Flags = Op->getFlags(); | ||||||
8519 | Flags.setNoFPExcept(true); | ||||||
8520 | |||||||
8521 | SDLoc SL(Op); | ||||||
8522 | SDValue LHS = Op.getOperand(0); | ||||||
8523 | SDValue RHS = Op.getOperand(1); | ||||||
8524 | |||||||
8525 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); | ||||||
8526 | |||||||
8527 | SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); | ||||||
8528 | |||||||
8529 | SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, | ||||||
8530 | {RHS, RHS, LHS}, Flags); | ||||||
8531 | SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, | ||||||
8532 | {LHS, RHS, LHS}, Flags); | ||||||
8533 | |||||||
8534 | // Denominator is scaled to not be denormal, so using rcp is ok. | ||||||
8535 | SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, | ||||||
8536 | DenominatorScaled, Flags); | ||||||
8537 | SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, | ||||||
8538 | DenominatorScaled, Flags); | ||||||
8539 | |||||||
8540 | const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE | | ||||||
8541 | (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) | | ||||||
8542 | (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_); | ||||||
8543 | const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i32); | ||||||
8544 | |||||||
8545 | const bool HasFP32Denormals = hasFP32Denormals(DAG.getMachineFunction()); | ||||||
8546 | |||||||
8547 | if (!HasFP32Denormals) { | ||||||
8548 | // Note we can't use the STRICT_FMA/STRICT_FMUL for the non-strict FDIV | ||||||
8549 | // lowering. The chain dependence is insufficient, and we need glue. We do | ||||||
8550 | // not need the glue variants in a strictfp function. | ||||||
8551 | |||||||
8552 | SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); | ||||||
8553 | |||||||
8554 | SDNode *EnableDenorm; | ||||||
8555 | if (Subtarget->hasDenormModeInst()) { | ||||||
8556 | const SDValue EnableDenormValue = | ||||||
8557 | getSPDenormModeValue(FP_DENORM_FLUSH_NONE3, DAG, SL, Subtarget); | ||||||
8558 | |||||||
8559 | EnableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, BindParamVTs, | ||||||
8560 | DAG.getEntryNode(), EnableDenormValue).getNode(); | ||||||
8561 | } else { | ||||||
8562 | const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE3, | ||||||
8563 | SL, MVT::i32); | ||||||
8564 | EnableDenorm = | ||||||
8565 | DAG.getMachineNode(AMDGPU::S_SETREG_B32, SL, BindParamVTs, | ||||||
8566 | {EnableDenormValue, BitField, DAG.getEntryNode()}); | ||||||
8567 | } | ||||||
8568 | |||||||
8569 | SDValue Ops[3] = { | ||||||
8570 | NegDivScale0, | ||||||
8571 | SDValue(EnableDenorm, 0), | ||||||
8572 | SDValue(EnableDenorm, 1) | ||||||
8573 | }; | ||||||
8574 | |||||||
8575 | NegDivScale0 = DAG.getMergeValues(Ops, SL); | ||||||
8576 | } | ||||||
8577 | |||||||
8578 | SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, | ||||||
8579 | ApproxRcp, One, NegDivScale0, Flags); | ||||||
8580 | |||||||
8581 | SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, | ||||||
8582 | ApproxRcp, Fma0, Flags); | ||||||
8583 | |||||||
8584 | SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled, | ||||||
8585 | Fma1, Fma1, Flags); | ||||||
8586 | |||||||
8587 | SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, | ||||||
8588 | NumeratorScaled, Mul, Flags); | ||||||
8589 | |||||||
8590 | SDValue Fma3 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, | ||||||
8591 | Fma2, Fma1, Mul, Fma2, Flags); | ||||||
8592 | |||||||
8593 | SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, | ||||||
8594 | NumeratorScaled, Fma3, Flags); | ||||||
8595 | |||||||
8596 | if (!HasFP32Denormals) { | ||||||
8597 | SDNode *DisableDenorm; | ||||||
8598 | if (Subtarget->hasDenormModeInst()) { | ||||||
8599 | const SDValue DisableDenormValue = | ||||||
8600 | getSPDenormModeValue(FP_DENORM_FLUSH_IN_FLUSH_OUT0, DAG, SL, Subtarget); | ||||||
8601 | |||||||
8602 | DisableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, MVT::Other, | ||||||
8603 | Fma4.getValue(1), DisableDenormValue, | ||||||
8604 | Fma4.getValue(2)).getNode(); | ||||||
8605 | } else { | ||||||
8606 | const SDValue DisableDenormValue = | ||||||
8607 | DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT0, SL, MVT::i32); | ||||||
8608 | |||||||
8609 | DisableDenorm = DAG.getMachineNode( | ||||||
8610 | AMDGPU::S_SETREG_B32, SL, MVT::Other, | ||||||
8611 | {DisableDenormValue, BitField, Fma4.getValue(1), Fma4.getValue(2)}); | ||||||
8612 | } | ||||||
8613 | |||||||
8614 | SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, | ||||||
8615 | SDValue(DisableDenorm, 0), DAG.getRoot()); | ||||||
8616 | DAG.setRoot(OutputChain); | ||||||
8617 | } | ||||||
8618 | |||||||
8619 | SDValue Scale = NumeratorScaled.getValue(1); | ||||||
8620 | SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, | ||||||
8621 | {Fma4, Fma1, Fma3, Scale}, Flags); | ||||||
8622 | |||||||
8623 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS, Flags); | ||||||
8624 | } | ||||||
8625 | |||||||
8626 | SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { | ||||||
8627 | if (SDValue FastLowered = lowerFastUnsafeFDIV64(Op, DAG)) | ||||||
8628 | return FastLowered; | ||||||
8629 | |||||||
8630 | SDLoc SL(Op); | ||||||
8631 | SDValue X = Op.getOperand(0); | ||||||
8632 | SDValue Y = Op.getOperand(1); | ||||||
8633 | |||||||
8634 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); | ||||||
8635 | |||||||
8636 | SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); | ||||||
8637 | |||||||
8638 | SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); | ||||||
8639 | |||||||
8640 | SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); | ||||||
8641 | |||||||
8642 | SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); | ||||||
8643 | |||||||
8644 | SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); | ||||||
8645 | |||||||
8646 | SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); | ||||||
8647 | |||||||
8648 | SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); | ||||||
8649 | |||||||
8650 | SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); | ||||||
8651 | |||||||
8652 | SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); | ||||||
8653 | SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); | ||||||
8654 | |||||||
8655 | SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, | ||||||
8656 | NegDivScale0, Mul, DivScale1); | ||||||
8657 | |||||||
8658 | SDValue Scale; | ||||||
8659 | |||||||
8660 | if (!Subtarget->hasUsableDivScaleConditionOutput()) { | ||||||
8661 | // Workaround a hardware bug on SI where the condition output from div_scale | ||||||
8662 | // is not usable. | ||||||
8663 | |||||||
8664 | const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); | ||||||
8665 | |||||||
8666 | // Figure out if the scale to use for div_fmas. | ||||||
8667 | SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); | ||||||
8668 | SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); | ||||||
8669 | SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); | ||||||
8670 | SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); | ||||||
8671 | |||||||
8672 | SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); | ||||||
8673 | SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); | ||||||
8674 | |||||||
8675 | SDValue Scale0Hi | ||||||
8676 | = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); | ||||||
8677 | SDValue Scale1Hi | ||||||
8678 | = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); | ||||||
8679 | |||||||
8680 | SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); | ||||||
8681 | SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); | ||||||
8682 | Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); | ||||||
8683 | } else { | ||||||
8684 | Scale = DivScale1.getValue(1); | ||||||
8685 | } | ||||||
8686 | |||||||
8687 | SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, | ||||||
8688 | Fma4, Fma3, Mul, Scale); | ||||||
8689 | |||||||
8690 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); | ||||||
8691 | } | ||||||
8692 | |||||||
8693 | SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { | ||||||
8694 | EVT VT = Op.getValueType(); | ||||||
8695 | |||||||
8696 | if (VT == MVT::f32) | ||||||
8697 | return LowerFDIV32(Op, DAG); | ||||||
8698 | |||||||
8699 | if (VT == MVT::f64) | ||||||
8700 | return LowerFDIV64(Op, DAG); | ||||||
8701 | |||||||
8702 | if (VT == MVT::f16) | ||||||
8703 | return LowerFDIV16(Op, DAG); | ||||||
8704 | |||||||
8705 | llvm_unreachable("Unexpected type for fdiv")::llvm::llvm_unreachable_internal("Unexpected type for fdiv", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8705); | ||||||
8706 | } | ||||||
8707 | |||||||
8708 | SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { | ||||||
8709 | SDLoc DL(Op); | ||||||
8710 | StoreSDNode *Store = cast<StoreSDNode>(Op); | ||||||
8711 | EVT VT = Store->getMemoryVT(); | ||||||
8712 | |||||||
8713 | if (VT == MVT::i1) { | ||||||
8714 | return DAG.getTruncStore(Store->getChain(), DL, | ||||||
8715 | DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), | ||||||
8716 | Store->getBasePtr(), MVT::i1, Store->getMemOperand()); | ||||||
8717 | } | ||||||
8718 | |||||||
8719 | assert(VT.isVector() &&(static_cast <bool> (VT.isVector() && Store-> getValue().getValueType().getScalarType() == MVT::i32) ? void (0) : __assert_fail ("VT.isVector() && Store->getValue().getValueType().getScalarType() == MVT::i32" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8720, __extension__ __PRETTY_FUNCTION__)) | ||||||
8720 | Store->getValue().getValueType().getScalarType() == MVT::i32)(static_cast <bool> (VT.isVector() && Store-> getValue().getValueType().getScalarType() == MVT::i32) ? void (0) : __assert_fail ("VT.isVector() && Store->getValue().getValueType().getScalarType() == MVT::i32" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8720, __extension__ __PRETTY_FUNCTION__)); | ||||||
8721 | |||||||
8722 | unsigned AS = Store->getAddressSpace(); | ||||||
8723 | if (Subtarget->hasLDSMisalignedBug() && | ||||||
8724 | AS == AMDGPUAS::FLAT_ADDRESS && | ||||||
8725 | Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) { | ||||||
8726 | return SplitVectorStore(Op, DAG); | ||||||
8727 | } | ||||||
8728 | |||||||
8729 | MachineFunction &MF = DAG.getMachineFunction(); | ||||||
8730 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
8731 | // If there is a possibilty that flat instruction access scratch memory | ||||||
8732 | // then we need to use the same legalization rules we use for private. | ||||||
8733 | if (AS == AMDGPUAS::FLAT_ADDRESS && | ||||||
8734 | !Subtarget->hasMultiDwordFlatScratchAddressing()) | ||||||
8735 | AS = MFI->hasFlatScratchInit() ? | ||||||
8736 | AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; | ||||||
8737 | |||||||
8738 | unsigned NumElements = VT.getVectorNumElements(); | ||||||
8739 | if (AS == AMDGPUAS::GLOBAL_ADDRESS || | ||||||
8740 | AS == AMDGPUAS::FLAT_ADDRESS) { | ||||||
8741 | if (NumElements > 4) | ||||||
8742 | return SplitVectorStore(Op, DAG); | ||||||
8743 | // v3 stores not supported on SI. | ||||||
8744 | if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) | ||||||
8745 | return SplitVectorStore(Op, DAG); | ||||||
8746 | |||||||
8747 | if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), | ||||||
8748 | VT, *Store->getMemOperand())) | ||||||
8749 | return expandUnalignedStore(Store, DAG); | ||||||
8750 | |||||||
8751 | return SDValue(); | ||||||
8752 | } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { | ||||||
8753 | switch (Subtarget->getMaxPrivateElementSize()) { | ||||||
8754 | case 4: | ||||||
8755 | return scalarizeVectorStore(Store, DAG); | ||||||
8756 | case 8: | ||||||
8757 | if (NumElements > 2) | ||||||
8758 | return SplitVectorStore(Op, DAG); | ||||||
8759 | return SDValue(); | ||||||
8760 | case 16: | ||||||
8761 | if (NumElements > 4 || | ||||||
8762 | (NumElements == 3 && !Subtarget->enableFlatScratch())) | ||||||
8763 | return SplitVectorStore(Op, DAG); | ||||||
8764 | return SDValue(); | ||||||
8765 | default: | ||||||
8766 | llvm_unreachable("unsupported private_element_size")::llvm::llvm_unreachable_internal("unsupported private_element_size" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8766); | ||||||
8767 | } | ||||||
8768 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { | ||||||
8769 | // Use ds_write_b128 or ds_write_b96 when possible. | ||||||
8770 | if (Subtarget->hasDS96AndDS128() && | ||||||
8771 | ((Subtarget->useDS128() && VT.getStoreSize() == 16) || | ||||||
8772 | (VT.getStoreSize() == 12)) && | ||||||
8773 | allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AS, | ||||||
8774 | Store->getAlign())) | ||||||
8775 | return SDValue(); | ||||||
8776 | |||||||
8777 | if (NumElements > 2) | ||||||
8778 | return SplitVectorStore(Op, DAG); | ||||||
8779 | |||||||
8780 | // SI has a hardware bug in the LDS / GDS boounds checking: if the base | ||||||
8781 | // address is negative, then the instruction is incorrectly treated as | ||||||
8782 | // out-of-bounds even if base + offsets is in bounds. Split vectorized | ||||||
8783 | // stores here to avoid emitting ds_write2_b32. We may re-combine the | ||||||
8784 | // store later in the SILoadStoreOptimizer. | ||||||
8785 | if (!Subtarget->hasUsableDSOffset() && | ||||||
8786 | NumElements == 2 && VT.getStoreSize() == 8 && | ||||||
8787 | Store->getAlignment() < 8) { | ||||||
8788 | return SplitVectorStore(Op, DAG); | ||||||
8789 | } | ||||||
8790 | |||||||
8791 | if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), | ||||||
8792 | VT, *Store->getMemOperand())) { | ||||||
8793 | if (VT.isVector()) | ||||||
8794 | return SplitVectorStore(Op, DAG); | ||||||
8795 | return expandUnalignedStore(Store, DAG); | ||||||
8796 | } | ||||||
8797 | |||||||
8798 | return SDValue(); | ||||||
8799 | } else { | ||||||
8800 | llvm_unreachable("unhandled address space")::llvm::llvm_unreachable_internal("unhandled address space", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8800); | ||||||
8801 | } | ||||||
8802 | } | ||||||
8803 | |||||||
8804 | SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { | ||||||
8805 | SDLoc DL(Op); | ||||||
8806 | EVT VT = Op.getValueType(); | ||||||
8807 | SDValue Arg = Op.getOperand(0); | ||||||
8808 | SDValue TrigVal; | ||||||
8809 | |||||||
8810 | // Propagate fast-math flags so that the multiply we introduce can be folded | ||||||
8811 | // if Arg is already the result of a multiply by constant. | ||||||
8812 | auto Flags = Op->getFlags(); | ||||||
8813 | |||||||
8814 | SDValue OneOver2Pi = DAG.getConstantFP(0.5 * numbers::inv_pi, DL, VT); | ||||||
8815 | |||||||
8816 | if (Subtarget->hasTrigReducedRange()) { | ||||||
8817 | SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi, Flags); | ||||||
8818 | TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal, Flags); | ||||||
8819 | } else { | ||||||
8820 | TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi, Flags); | ||||||
8821 | } | ||||||
8822 | |||||||
8823 | switch (Op.getOpcode()) { | ||||||
8824 | case ISD::FCOS: | ||||||
8825 | return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal, Flags); | ||||||
8826 | case ISD::FSIN: | ||||||
8827 | return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal, Flags); | ||||||
8828 | default: | ||||||
8829 | llvm_unreachable("Wrong trig opcode")::llvm::llvm_unreachable_internal("Wrong trig opcode", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8829); | ||||||
8830 | } | ||||||
8831 | } | ||||||
8832 | |||||||
8833 | SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { | ||||||
8834 | AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); | ||||||
8835 | assert(AtomicNode->isCompareAndSwap())(static_cast <bool> (AtomicNode->isCompareAndSwap()) ? void (0) : __assert_fail ("AtomicNode->isCompareAndSwap()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8835, __extension__ __PRETTY_FUNCTION__)); | ||||||
8836 | unsigned AS = AtomicNode->getAddressSpace(); | ||||||
8837 | |||||||
8838 | // No custom lowering required for local address space | ||||||
8839 | if (!AMDGPU::isFlatGlobalAddrSpace(AS)) | ||||||
8840 | return Op; | ||||||
8841 | |||||||
8842 | // Non-local address space requires custom lowering for atomic compare | ||||||
8843 | // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 | ||||||
8844 | SDLoc DL(Op); | ||||||
8845 | SDValue ChainIn = Op.getOperand(0); | ||||||
8846 | SDValue Addr = Op.getOperand(1); | ||||||
8847 | SDValue Old = Op.getOperand(2); | ||||||
8848 | SDValue New = Op.getOperand(3); | ||||||
8849 | EVT VT = Op.getValueType(); | ||||||
8850 | MVT SimpleVT = VT.getSimpleVT(); | ||||||
8851 | MVT VecType = MVT::getVectorVT(SimpleVT, 2); | ||||||
8852 | |||||||
8853 | SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); | ||||||
8854 | SDValue Ops[] = { ChainIn, Addr, NewOld }; | ||||||
8855 | |||||||
8856 | return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), | ||||||
8857 | Ops, VT, AtomicNode->getMemOperand()); | ||||||
8858 | } | ||||||
8859 | |||||||
8860 | //===----------------------------------------------------------------------===// | ||||||
8861 | // Custom DAG optimizations | ||||||
8862 | //===----------------------------------------------------------------------===// | ||||||
8863 | |||||||
8864 | SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, | ||||||
8865 | DAGCombinerInfo &DCI) const { | ||||||
8866 | EVT VT = N->getValueType(0); | ||||||
8867 | EVT ScalarVT = VT.getScalarType(); | ||||||
8868 | if (ScalarVT != MVT::f32 && ScalarVT != MVT::f16) | ||||||
8869 | return SDValue(); | ||||||
8870 | |||||||
8871 | SelectionDAG &DAG = DCI.DAG; | ||||||
8872 | SDLoc DL(N); | ||||||
8873 | |||||||
8874 | SDValue Src = N->getOperand(0); | ||||||
8875 | EVT SrcVT = Src.getValueType(); | ||||||
8876 | |||||||
8877 | // TODO: We could try to match extracting the higher bytes, which would be | ||||||
8878 | // easier if i8 vectors weren't promoted to i32 vectors, particularly after | ||||||
8879 | // types are legalized. v4i8 -> v4f32 is probably the only case to worry | ||||||
8880 | // about in practice. | ||||||
8881 | if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) { | ||||||
8882 | if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { | ||||||
8883 | SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, MVT::f32, Src); | ||||||
8884 | DCI.AddToWorklist(Cvt.getNode()); | ||||||
8885 | |||||||
8886 | // For the f16 case, fold to a cast to f32 and then cast back to f16. | ||||||
8887 | if (ScalarVT != MVT::f32) { | ||||||
8888 | Cvt = DAG.getNode(ISD::FP_ROUND, DL, VT, Cvt, | ||||||
8889 | DAG.getTargetConstant(0, DL, MVT::i32)); | ||||||
8890 | } | ||||||
8891 | return Cvt; | ||||||
8892 | } | ||||||
8893 | } | ||||||
8894 | |||||||
8895 | return SDValue(); | ||||||
8896 | } | ||||||
8897 | |||||||
8898 | // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) | ||||||
8899 | |||||||
8900 | // This is a variant of | ||||||
8901 | // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), | ||||||
8902 | // | ||||||
8903 | // The normal DAG combiner will do this, but only if the add has one use since | ||||||
8904 | // that would increase the number of instructions. | ||||||
8905 | // | ||||||
8906 | // This prevents us from seeing a constant offset that can be folded into a | ||||||
8907 | // memory instruction's addressing mode. If we know the resulting add offset of | ||||||
8908 | // a pointer can be folded into an addressing offset, we can replace the pointer | ||||||
8909 | // operand with the add of new constant offset. This eliminates one of the uses, | ||||||
8910 | // and may allow the remaining use to also be simplified. | ||||||
8911 | // | ||||||
8912 | SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, | ||||||
8913 | unsigned AddrSpace, | ||||||
8914 | EVT MemVT, | ||||||
8915 | DAGCombinerInfo &DCI) const { | ||||||
8916 | SDValue N0 = N->getOperand(0); | ||||||
8917 | SDValue N1 = N->getOperand(1); | ||||||
8918 | |||||||
8919 | // We only do this to handle cases where it's profitable when there are | ||||||
8920 | // multiple uses of the add, so defer to the standard combine. | ||||||
8921 | if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) || | ||||||
8922 | N0->hasOneUse()) | ||||||
8923 | return SDValue(); | ||||||
8924 | |||||||
8925 | const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); | ||||||
8926 | if (!CN1) | ||||||
8927 | return SDValue(); | ||||||
8928 | |||||||
8929 | const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); | ||||||
8930 | if (!CAdd) | ||||||
8931 | return SDValue(); | ||||||
8932 | |||||||
8933 | // If the resulting offset is too large, we can't fold it into the addressing | ||||||
8934 | // mode offset. | ||||||
8935 | APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); | ||||||
8936 | Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext()); | ||||||
8937 | |||||||
8938 | AddrMode AM; | ||||||
8939 | AM.HasBaseReg = true; | ||||||
8940 | AM.BaseOffs = Offset.getSExtValue(); | ||||||
8941 | if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace)) | ||||||
8942 | return SDValue(); | ||||||
8943 | |||||||
8944 | SelectionDAG &DAG = DCI.DAG; | ||||||
8945 | SDLoc SL(N); | ||||||
8946 | EVT VT = N->getValueType(0); | ||||||
8947 | |||||||
8948 | SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); | ||||||
8949 | SDValue COffset = DAG.getConstant(Offset, SL, VT); | ||||||
8950 | |||||||
8951 | SDNodeFlags Flags; | ||||||
8952 | Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() && | ||||||
8953 | (N0.getOpcode() == ISD::OR || | ||||||
8954 | N0->getFlags().hasNoUnsignedWrap())); | ||||||
8955 | |||||||
8956 | return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags); | ||||||
8957 | } | ||||||
8958 | |||||||
8959 | /// MemSDNode::getBasePtr() does not work for intrinsics, which needs to offset | ||||||
8960 | /// by the chain and intrinsic ID. Theoretically we would also need to check the | ||||||
8961 | /// specific intrinsic, but they all place the pointer operand first. | ||||||
8962 | static unsigned getBasePtrIndex(const MemSDNode *N) { | ||||||
8963 | switch (N->getOpcode()) { | ||||||
8964 | case ISD::STORE: | ||||||
8965 | case ISD::INTRINSIC_W_CHAIN: | ||||||
8966 | case ISD::INTRINSIC_VOID: | ||||||
8967 | return 2; | ||||||
8968 | default: | ||||||
8969 | return 1; | ||||||
8970 | } | ||||||
8971 | } | ||||||
8972 | |||||||
8973 | SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N, | ||||||
8974 | DAGCombinerInfo &DCI) const { | ||||||
8975 | SelectionDAG &DAG = DCI.DAG; | ||||||
8976 | SDLoc SL(N); | ||||||
8977 | |||||||
8978 | unsigned PtrIdx = getBasePtrIndex(N); | ||||||
8979 | SDValue Ptr = N->getOperand(PtrIdx); | ||||||
8980 | |||||||
8981 | // TODO: We could also do this for multiplies. | ||||||
8982 | if (Ptr.getOpcode() == ISD::SHL) { | ||||||
8983 | SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(), | ||||||
8984 | N->getMemoryVT(), DCI); | ||||||
8985 | if (NewPtr) { | ||||||
8986 | SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end()); | ||||||
8987 | |||||||
8988 | NewOps[PtrIdx] = NewPtr; | ||||||
8989 | return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); | ||||||
8990 | } | ||||||
8991 | } | ||||||
8992 | |||||||
8993 | return SDValue(); | ||||||
8994 | } | ||||||
8995 | |||||||
8996 | static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { | ||||||
8997 | return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || | ||||||
8998 | (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || | ||||||
8999 | (Opc == ISD::XOR && Val == 0); | ||||||
9000 | } | ||||||
9001 | |||||||
9002 | // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This | ||||||
9003 | // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit | ||||||
9004 | // integer combine opportunities since most 64-bit operations are decomposed | ||||||
9005 | // this way. TODO: We won't want this for SALU especially if it is an inline | ||||||
9006 | // immediate. | ||||||
9007 | SDValue SITargetLowering::splitBinaryBitConstantOp( | ||||||
9008 | DAGCombinerInfo &DCI, | ||||||
9009 | const SDLoc &SL, | ||||||
9010 | unsigned Opc, SDValue LHS, | ||||||
9011 | const ConstantSDNode *CRHS) const { | ||||||
9012 | uint64_t Val = CRHS->getZExtValue(); | ||||||
9013 | uint32_t ValLo = Lo_32(Val); | ||||||
9014 | uint32_t ValHi = Hi_32(Val); | ||||||
9015 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | ||||||
9016 | |||||||
9017 | if ((bitOpWithConstantIsReducible(Opc, ValLo) || | ||||||
9018 | bitOpWithConstantIsReducible(Opc, ValHi)) || | ||||||
9019 | (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { | ||||||
9020 | // If we need to materialize a 64-bit immediate, it will be split up later | ||||||
9021 | // anyway. Avoid creating the harder to understand 64-bit immediate | ||||||
9022 | // materialization. | ||||||
9023 | return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); | ||||||
9024 | } | ||||||
9025 | |||||||
9026 | return SDValue(); | ||||||
9027 | } | ||||||
9028 | |||||||
9029 | // Returns true if argument is a boolean value which is not serialized into | ||||||
9030 | // memory or argument and does not require v_cndmask_b32 to be deserialized. | ||||||
9031 | static bool isBoolSGPR(SDValue V) { | ||||||
9032 | if (V.getValueType() != MVT::i1) | ||||||
9033 | return false; | ||||||
9034 | switch (V.getOpcode()) { | ||||||
9035 | default: | ||||||
9036 | break; | ||||||
9037 | case ISD::SETCC: | ||||||
9038 | case AMDGPUISD::FP_CLASS: | ||||||
9039 | return true; | ||||||
9040 | case ISD::AND: | ||||||
9041 | case ISD::OR: | ||||||
9042 | case ISD::XOR: | ||||||
9043 | return isBoolSGPR(V.getOperand(0)) && isBoolSGPR(V.getOperand(1)); | ||||||
9044 | } | ||||||
9045 | return false; | ||||||
9046 | } | ||||||
9047 | |||||||
9048 | // If a constant has all zeroes or all ones within each byte return it. | ||||||
9049 | // Otherwise return 0. | ||||||
9050 | static uint32_t getConstantPermuteMask(uint32_t C) { | ||||||
9051 | // 0xff for any zero byte in the mask | ||||||
9052 | uint32_t ZeroByteMask = 0; | ||||||
9053 | if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff; | ||||||
9054 | if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00; | ||||||
9055 | if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000; | ||||||
9056 | if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000; | ||||||
9057 | uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte | ||||||
9058 | if ((NonZeroByteMask & C) != NonZeroByteMask) | ||||||
9059 | return 0; // Partial bytes selected. | ||||||
9060 | return C; | ||||||
9061 | } | ||||||
9062 | |||||||
9063 | // Check if a node selects whole bytes from its operand 0 starting at a byte | ||||||
9064 | // boundary while masking the rest. Returns select mask as in the v_perm_b32 | ||||||
9065 | // or -1 if not succeeded. | ||||||
9066 | // Note byte select encoding: | ||||||
9067 | // value 0-3 selects corresponding source byte; | ||||||
9068 | // value 0xc selects zero; | ||||||
9069 | // value 0xff selects 0xff. | ||||||
9070 | static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) { | ||||||
9071 | assert(V.getValueSizeInBits() == 32)(static_cast <bool> (V.getValueSizeInBits() == 32) ? void (0) : __assert_fail ("V.getValueSizeInBits() == 32", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 9071, __extension__ __PRETTY_FUNCTION__)); | ||||||
9072 | |||||||
9073 | if (V.getNumOperands() != 2) | ||||||
9074 | return ~0; | ||||||
9075 | |||||||
9076 | ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1)); | ||||||
9077 | if (!N1) | ||||||
9078 | return ~0; | ||||||
9079 | |||||||
9080 | uint32_t C = N1->getZExtValue(); | ||||||
9081 | |||||||
9082 | switch (V.getOpcode()) { | ||||||
9083 | default: | ||||||
9084 | break; | ||||||
9085 | case ISD::AND: | ||||||
9086 | if (uint32_t ConstMask = getConstantPermuteMask(C)) { | ||||||
9087 | return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask); | ||||||
9088 | } | ||||||
9089 | break; | ||||||
9090 | |||||||
9091 | case ISD::OR: | ||||||
9092 | if (uint32_t ConstMask = getConstantPermuteMask(C)) { | ||||||
9093 | return (0x03020100 & ~ConstMask) | ConstMask; | ||||||
9094 | } | ||||||
9095 | break; | ||||||
9096 | |||||||
9097 | case ISD::SHL: | ||||||
9098 | if (C % 8) | ||||||
9099 | return ~0; | ||||||
9100 | |||||||
9101 | return uint32_t((0x030201000c0c0c0cull << C) >> 32); | ||||||
9102 | |||||||
9103 | case ISD::SRL: | ||||||
9104 | if (C % 8) | ||||||
9105 | return ~0; | ||||||
9106 | |||||||
9107 | return uint32_t(0x0c0c0c0c03020100ull >> C); | ||||||
9108 | } | ||||||
9109 | |||||||
9110 | return ~0; | ||||||
9111 | } | ||||||
9112 | |||||||
9113 | SDValue SITargetLowering::performAndCombine(SDNode *N, | ||||||
9114 | DAGCombinerInfo &DCI) const { | ||||||
9115 | if (DCI.isBeforeLegalize()) | ||||||
9116 | return SDValue(); | ||||||
9117 | |||||||
9118 | SelectionDAG &DAG = DCI.DAG; | ||||||
9119 | EVT VT = N->getValueType(0); | ||||||
9120 | SDValue LHS = N->getOperand(0); | ||||||
9121 | SDValue RHS = N->getOperand(1); | ||||||
9122 | |||||||
9123 | |||||||
9124 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); | ||||||
9125 | if (VT == MVT::i64 && CRHS) { | ||||||
9126 | if (SDValue Split | ||||||
9127 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) | ||||||
9128 | return Split; | ||||||
9129 | } | ||||||
9130 | |||||||
9131 | if (CRHS && VT == MVT::i32) { | ||||||
9132 | // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb | ||||||
9133 | // nb = number of trailing zeroes in mask | ||||||
9134 | // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass, | ||||||
9135 | // given that we are selecting 8 or 16 bit fields starting at byte boundary. | ||||||
9136 | uint64_t Mask = CRHS->getZExtValue(); | ||||||
9137 | unsigned Bits = countPopulation(Mask); | ||||||
9138 | if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL && | ||||||
9139 | (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) { | ||||||
9140 | if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) { | ||||||
9141 | unsigned Shift = CShift->getZExtValue(); | ||||||
9142 | unsigned NB = CRHS->getAPIntValue().countTrailingZeros(); | ||||||
9143 | unsigned Offset = NB + Shift; | ||||||
9144 | if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary. | ||||||
9145 | SDLoc SL(N); | ||||||
9146 | SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, | ||||||
9147 | LHS->getOperand(0), | ||||||
9148 | DAG.getConstant(Offset, SL, MVT::i32), | ||||||
9149 | DAG.getConstant(Bits, SL, MVT::i32)); | ||||||
9150 | EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits); | ||||||
9151 | SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE, | ||||||
9152 | DAG.getValueType(NarrowVT)); | ||||||
9153 | SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext, | ||||||
9154 | DAG.getConstant(NB, SDLoc(CRHS), MVT::i32)); | ||||||
9155 | return Shl; | ||||||
9156 | } | ||||||
9157 | } | ||||||
9158 | } | ||||||
9159 | |||||||
9160 | // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2) | ||||||
9161 | if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM && | ||||||
9162 | isa<ConstantSDNode>(LHS.getOperand(2))) { | ||||||
9163 | uint32_t Sel = getConstantPermuteMask(Mask); | ||||||
9164 | if (!Sel) | ||||||
9165 | return SDValue(); | ||||||
9166 | |||||||
9167 | // Select 0xc for all zero bytes | ||||||
9168 | Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c); | ||||||
9169 | SDLoc DL(N); | ||||||
9170 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), | ||||||
9171 | LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); | ||||||
9172 | } | ||||||
9173 | } | ||||||
9174 | |||||||
9175 | // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> | ||||||
9176 | // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) | ||||||
9177 | if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { | ||||||
9178 | ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); | ||||||
9179 | ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); | ||||||
9180 | |||||||
9181 | SDValue X = LHS.getOperand(0); | ||||||
9182 | SDValue Y = RHS.getOperand(0); | ||||||
9183 | if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) | ||||||
9184 | return SDValue(); | ||||||
9185 | |||||||
9186 | if (LCC == ISD::SETO) { | ||||||
9187 | if (X != LHS.getOperand(1)) | ||||||
9188 | return SDValue(); | ||||||
9189 | |||||||
9190 | if (RCC == ISD::SETUNE) { | ||||||
9191 | const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); | ||||||
9192 | if (!C1 || !C1->isInfinity() || C1->isNegative()) | ||||||
9193 | return SDValue(); | ||||||
9194 | |||||||
9195 | const uint32_t Mask = SIInstrFlags::N_NORMAL | | ||||||
9196 | SIInstrFlags::N_SUBNORMAL | | ||||||
9197 | SIInstrFlags::N_ZERO | | ||||||
9198 | SIInstrFlags::P_ZERO | | ||||||
9199 | SIInstrFlags::P_SUBNORMAL | | ||||||
9200 | SIInstrFlags::P_NORMAL; | ||||||
9201 | |||||||
9202 | static_assert(((~(SIInstrFlags::S_NAN | | ||||||
9203 | SIInstrFlags::Q_NAN | | ||||||
9204 | SIInstrFlags::N_INFINITY | | ||||||
9205 | SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, | ||||||
9206 | "mask not equal"); | ||||||
9207 | |||||||
9208 | SDLoc DL(N); | ||||||
9209 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, | ||||||
9210 | X, DAG.getConstant(Mask, DL, MVT::i32)); | ||||||
9211 | } | ||||||
9212 | } | ||||||
9213 | } | ||||||
9214 | |||||||
9215 | if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS) | ||||||
9216 | std::swap(LHS, RHS); | ||||||
9217 | |||||||
9218 | if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS && | ||||||
9219 | RHS.hasOneUse()) { | ||||||
9220 | ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); | ||||||
9221 | // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan) | ||||||
9222 | // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan) | ||||||
9223 | const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); | ||||||
9224 | if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask && | ||||||
9225 | (RHS.getOperand(0) == LHS.getOperand(0) && | ||||||
9226 | LHS.getOperand(0) == LHS.getOperand(1))) { | ||||||
9227 | const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN; | ||||||
9228 | unsigned NewMask = LCC == ISD::SETO ? | ||||||
9229 | Mask->getZExtValue() & ~OrdMask : | ||||||
9230 | Mask->getZExtValue() & OrdMask; | ||||||
9231 | |||||||
9232 | SDLoc DL(N); | ||||||
9233 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0), | ||||||
9234 | DAG.getConstant(NewMask, DL, MVT::i32)); | ||||||
9235 | } | ||||||
9236 | } | ||||||
9237 | |||||||
9238 | if (VT == MVT::i32 && | ||||||
9239 | (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) { | ||||||
9240 | // and x, (sext cc from i1) => select cc, x, 0 | ||||||
9241 | if (RHS.getOpcode() != ISD::SIGN_EXTEND) | ||||||
9242 | std::swap(LHS, RHS); | ||||||
9243 | if (isBoolSGPR(RHS.getOperand(0))) | ||||||
9244 | return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0), | ||||||
9245 | LHS, DAG.getConstant(0, SDLoc(N), MVT::i32)); | ||||||
9246 | } | ||||||
9247 | |||||||
9248 | // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2) | ||||||
9249 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | ||||||
9250 | if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && | ||||||
9251 | N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32_e64) != -1) { | ||||||
9252 | uint32_t LHSMask = getPermuteMask(DAG, LHS); | ||||||
9253 | uint32_t RHSMask = getPermuteMask(DAG, RHS); | ||||||
9254 | if (LHSMask != ~0u && RHSMask != ~0u) { | ||||||
9255 | // Canonicalize the expression in an attempt to have fewer unique masks | ||||||
9256 | // and therefore fewer registers used to hold the masks. | ||||||
9257 | if (LHSMask > RHSMask) { | ||||||
9258 | std::swap(LHSMask, RHSMask); | ||||||
9259 | std::swap(LHS, RHS); | ||||||
9260 | } | ||||||
9261 | |||||||
9262 | // Select 0xc for each lane used from source operand. Zero has 0xc mask | ||||||
9263 | // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range. | ||||||
9264 | uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; | ||||||
9265 | uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; | ||||||
9266 | |||||||
9267 | // Check of we need to combine values from two sources within a byte. | ||||||
9268 | if (!(LHSUsedLanes & RHSUsedLanes) && | ||||||
9269 | // If we select high and lower word keep it for SDWA. | ||||||
9270 | // TODO: teach SDWA to work with v_perm_b32 and remove the check. | ||||||
9271 | !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { | ||||||
9272 | // Each byte in each mask is either selector mask 0-3, or has higher | ||||||
9273 | // bits set in either of masks, which can be 0xff for 0xff or 0x0c for | ||||||
9274 | // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise | ||||||
9275 | // mask which is not 0xff wins. By anding both masks we have a correct | ||||||
9276 | // result except that 0x0c shall be corrected to give 0x0c only. | ||||||
9277 | uint32_t Mask = LHSMask & RHSMask; | ||||||
9278 | for (unsigned I = 0; I < 32; I += 8) { | ||||||
9279 | uint32_t ByteSel = 0xff << I; | ||||||
9280 | if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c) | ||||||
9281 | Mask &= (0x0c << I) & 0xffffffff; | ||||||
9282 | } | ||||||
9283 | |||||||
9284 | // Add 4 to each active LHS lane. It will not affect any existing 0xff | ||||||
9285 | // or 0x0c. | ||||||
9286 | uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404); | ||||||
9287 | SDLoc DL(N); | ||||||
9288 | |||||||
9289 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, | ||||||
9290 | LHS.getOperand(0), RHS.getOperand(0), | ||||||
9291 | DAG.getConstant(Sel, DL, MVT::i32)); | ||||||
9292 | } | ||||||
9293 | } | ||||||
9294 | } | ||||||
9295 | |||||||
9296 | return SDValue(); | ||||||
9297 | } | ||||||
9298 | |||||||
9299 | SDValue SITargetLowering::performOrCombine(SDNode *N, | ||||||
9300 | DAGCombinerInfo &DCI) const { | ||||||
9301 | SelectionDAG &DAG = DCI.DAG; | ||||||
9302 | SDValue LHS = N->getOperand(0); | ||||||
9303 | SDValue RHS = N->getOperand(1); | ||||||
9304 | |||||||
9305 | EVT VT = N->getValueType(0); | ||||||
9306 | if (VT == MVT::i1) { | ||||||
9307 | // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) | ||||||
9308 | if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && | ||||||
9309 | RHS.getOpcode() == AMDGPUISD::FP_CLASS) { | ||||||
9310 | SDValue Src = LHS.getOperand(0); | ||||||
9311 | if (Src != RHS.getOperand(0)) | ||||||
9312 | return SDValue(); | ||||||
9313 | |||||||
9314 | const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); | ||||||
9315 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); | ||||||
9316 | if (!CLHS || !CRHS) | ||||||
9317 | return SDValue(); | ||||||
9318 | |||||||
9319 | // Only 10 bits are used. | ||||||
9320 | static const uint32_t MaxMask = 0x3ff; | ||||||
9321 | |||||||
9322 | uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; | ||||||
9323 | SDLoc DL(N); | ||||||
9324 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, | ||||||
9325 | Src, DAG.getConstant(NewMask, DL, MVT::i32)); | ||||||
9326 | } | ||||||
9327 | |||||||
9328 | return SDValue(); | ||||||
9329 | } | ||||||
9330 | |||||||
9331 | // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2) | ||||||
9332 | if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() && | ||||||
9333 | LHS.getOpcode() == AMDGPUISD::PERM && | ||||||
9334 | isa<ConstantSDNode>(LHS.getOperand(2))) { | ||||||
9335 | uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1)); | ||||||
9336 | if (!Sel) | ||||||
9337 | return SDValue(); | ||||||
9338 | |||||||
9339 | Sel |= LHS.getConstantOperandVal(2); | ||||||
9340 | SDLoc DL(N); | ||||||
9341 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), | ||||||
9342 | LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); | ||||||
9343 | } | ||||||
9344 | |||||||
9345 | // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2) | ||||||
9346 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | ||||||
9347 | if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && | ||||||
9348 | N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32_e64) != -1) { | ||||||
9349 | uint32_t LHSMask = getPermuteMask(DAG, LHS); | ||||||
9350 | uint32_t RHSMask = getPermuteMask(DAG, RHS); | ||||||
9351 | if (LHSMask != ~0u && RHSMask != ~0u) { | ||||||
9352 | // Canonicalize the expression in an attempt to have fewer unique masks | ||||||
9353 | // and therefore fewer registers used to hold the masks. | ||||||
9354 | if (LHSMask > RHSMask) { | ||||||
9355 | std::swap(LHSMask, RHSMask); | ||||||
9356 | std::swap(LHS, RHS); | ||||||
9357 | } | ||||||
9358 | |||||||
9359 | // Select 0xc for each lane used from source operand. Zero has 0xc mask | ||||||
9360 | // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range. | ||||||
9361 | uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; | ||||||
9362 | uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; | ||||||
9363 | |||||||
9364 | // Check of we need to combine values from two sources within a byte. | ||||||
9365 | if (!(LHSUsedLanes & RHSUsedLanes) && | ||||||
9366 | // If we select high and lower word keep it for SDWA. | ||||||
9367 | // TODO: teach SDWA to work with v_perm_b32 and remove the check. | ||||||
9368 | !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { | ||||||
9369 | // Kill zero bytes selected by other mask. Zero value is 0xc. | ||||||
9370 | LHSMask &= ~RHSUsedLanes; | ||||||
9371 | RHSMask &= ~LHSUsedLanes; | ||||||
9372 | // Add 4 to each active LHS lane | ||||||
9373 | LHSMask |= LHSUsedLanes & 0x04040404; | ||||||
9374 | // Combine masks | ||||||
9375 | uint32_t Sel = LHSMask | RHSMask; | ||||||
9376 | SDLoc DL(N); | ||||||
9377 | |||||||
9378 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, | ||||||
9379 | LHS.getOperand(0), RHS.getOperand(0), | ||||||
9380 | DAG.getConstant(Sel, DL, MVT::i32)); | ||||||
9381 | } | ||||||
9382 | } | ||||||
9383 | } | ||||||
9384 | |||||||
9385 | if (VT != MVT::i64 || DCI.isBeforeLegalizeOps()) | ||||||
9386 | return SDValue(); | ||||||
9387 | |||||||
9388 | // TODO: This could be a generic combine with a predicate for extracting the | ||||||
9389 | // high half of an integer being free. | ||||||
9390 | |||||||
9391 | // (or i64:x, (zero_extend i32:y)) -> | ||||||
9392 | // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) | ||||||
9393 | if (LHS.getOpcode() == ISD::ZERO_EXTEND && | ||||||
9394 | RHS.getOpcode() != ISD::ZERO_EXTEND) | ||||||
9395 | std::swap(LHS, RHS); | ||||||
9396 | |||||||
9397 | if (RHS.getOpcode() == ISD::ZERO_EXTEND) { | ||||||
9398 | SDValue ExtSrc = RHS.getOperand(0); | ||||||
9399 | EVT SrcVT = ExtSrc.getValueType(); | ||||||
9400 | if (SrcVT == MVT::i32) { | ||||||
9401 | SDLoc SL(N); | ||||||
9402 | SDValue LowLHS, HiBits; | ||||||
9403 | std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); | ||||||
9404 | SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); | ||||||
9405 | |||||||
9406 | DCI.AddToWorklist(LowOr.getNode()); | ||||||
9407 | DCI.AddToWorklist(HiBits.getNode()); | ||||||
9408 | |||||||
9409 | SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, | ||||||
9410 | LowOr, HiBits); | ||||||
9411 | return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); | ||||||
9412 | } | ||||||
9413 | } | ||||||
9414 | |||||||
9415 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); | ||||||
9416 | if (CRHS) { | ||||||
9417 | if (SDValue Split | ||||||
9418 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS)) | ||||||
9419 | return Split; | ||||||
9420 | } | ||||||
9421 | |||||||
9422 | return SDValue(); | ||||||
9423 | } | ||||||
9424 | |||||||
9425 | SDValue SITargetLowering::performXorCombine(SDNode *N, | ||||||
9426 | DAGCombinerInfo &DCI) const { | ||||||
9427 | EVT VT = N->getValueType(0); | ||||||
9428 | if (VT != MVT::i64) | ||||||
9429 | return SDValue(); | ||||||
9430 | |||||||
9431 | SDValue LHS = N->getOperand(0); | ||||||
9432 | SDValue RHS = N->getOperand(1); | ||||||
9433 | |||||||
9434 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); | ||||||
9435 | if (CRHS) { | ||||||
9436 | if (SDValue Split | ||||||
9437 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) | ||||||
9438 | return Split; | ||||||
9439 | } | ||||||
9440 | |||||||
9441 | return SDValue(); | ||||||
9442 | } | ||||||
9443 | |||||||
9444 | SDValue SITargetLowering::performZeroExtendCombine(SDNode *N, | ||||||
9445 | DAGCombinerInfo &DCI) const { | ||||||
9446 | if (!Subtarget->has16BitInsts() || | ||||||
9447 | DCI.getDAGCombineLevel() < AfterLegalizeDAG) | ||||||
9448 | return SDValue(); | ||||||
9449 | |||||||
9450 | EVT VT = N->getValueType(0); | ||||||
9451 | if (VT != MVT::i32) | ||||||
9452 | return SDValue(); | ||||||
9453 | |||||||
9454 | SDValue Src = N->getOperand(0); | ||||||
9455 | if (Src.getValueType() != MVT::i16) | ||||||
9456 | return SDValue(); | ||||||
9457 | |||||||
9458 | return SDValue(); | ||||||
9459 | } | ||||||
9460 | |||||||
9461 | SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N, | ||||||
9462 | DAGCombinerInfo &DCI) | ||||||
9463 | const { | ||||||
9464 | SDValue Src = N->getOperand(0); | ||||||
9465 | auto *VTSign = cast<VTSDNode>(N->getOperand(1)); | ||||||
9466 | |||||||
9467 | if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE && | ||||||
9468 | VTSign->getVT() == MVT::i8) || | ||||||
9469 | (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT && | ||||||
9470 | VTSign->getVT() == MVT::i16)) && | ||||||
9471 | Src.hasOneUse()) { | ||||||
9472 | auto *M = cast<MemSDNode>(Src); | ||||||
9473 | SDValue Ops[] = { | ||||||
9474 | Src.getOperand(0), // Chain | ||||||
9475 | Src.getOperand(1), // rsrc | ||||||
9476 | Src.getOperand(2), // vindex | ||||||
9477 | Src.getOperand(3), // voffset | ||||||
9478 | Src.getOperand(4), // soffset | ||||||
9479 | Src.getOperand(5), // offset | ||||||
9480 | Src.getOperand(6), | ||||||
9481 | Src.getOperand(7) | ||||||
9482 | }; | ||||||
9483 | // replace with BUFFER_LOAD_BYTE/SHORT | ||||||
9484 | SDVTList ResList = DCI.DAG.getVTList(MVT::i32, | ||||||
9485 | Src.getOperand(0).getValueType()); | ||||||
9486 | unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ? | ||||||
9487 | AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT; | ||||||
9488 | SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N), | ||||||
9489 | ResList, | ||||||
9490 | Ops, M->getMemoryVT(), | ||||||
9491 | M->getMemOperand()); | ||||||
9492 | return DCI.DAG.getMergeValues({BufferLoadSignExt, | ||||||
9493 | BufferLoadSignExt.getValue(1)}, SDLoc(N)); | ||||||
9494 | } | ||||||
9495 | return SDValue(); | ||||||
9496 | } | ||||||
9497 | |||||||
9498 | SDValue SITargetLowering::performClassCombine(SDNode *N, | ||||||
9499 | DAGCombinerInfo &DCI) const { | ||||||
9500 | SelectionDAG &DAG = DCI.DAG; | ||||||
9501 | SDValue Mask = N->getOperand(1); | ||||||
9502 | |||||||
9503 | // fp_class x, 0 -> false | ||||||
9504 | if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { | ||||||
9505 | if (CMask->isNullValue()) | ||||||
9506 | return DAG.getConstant(0, SDLoc(N), MVT::i1); | ||||||
9507 | } | ||||||
9508 | |||||||
9509 | if (N->getOperand(0).isUndef()) | ||||||
9510 | return DAG.getUNDEF(MVT::i1); | ||||||
9511 | |||||||
9512 | return SDValue(); | ||||||
9513 | } | ||||||
9514 | |||||||
9515 | SDValue SITargetLowering::performRcpCombine(SDNode *N, | ||||||
9516 | DAGCombinerInfo &DCI) const { | ||||||
9517 | EVT VT = N->getValueType(0); | ||||||
9518 | SDValue N0 = N->getOperand(0); | ||||||
9519 | |||||||
9520 | if (N0.isUndef()) | ||||||
9521 | return N0; | ||||||
9522 | |||||||
9523 | if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP || | ||||||
9524 | N0.getOpcode() == ISD::SINT_TO_FP)) { | ||||||
9525 | return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0, | ||||||
9526 | N->getFlags()); | ||||||
9527 | } | ||||||
9528 | |||||||
9529 | if ((VT == MVT::f32 || VT == MVT::f16) && N0.getOpcode() == ISD::FSQRT) { | ||||||
9530 | return DCI.DAG.getNode(AMDGPUISD::RSQ, SDLoc(N), VT, | ||||||
9531 | N0.getOperand(0), N->getFlags()); | ||||||
9532 | } | ||||||
9533 | |||||||
9534 | return AMDGPUTargetLowering::performRcpCombine(N, DCI); | ||||||
9535 | } | ||||||
9536 | |||||||
9537 | bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op, | ||||||
9538 | unsigned MaxDepth) const { | ||||||
9539 | unsigned Opcode = Op.getOpcode(); | ||||||
9540 | if (Opcode == ISD::FCANONICALIZE) | ||||||
9541 | return true; | ||||||
9542 | |||||||
9543 | if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) { | ||||||
9544 | auto F = CFP->getValueAPF(); | ||||||
9545 | if (F.isNaN() && F.isSignaling()) | ||||||
9546 | return false; | ||||||
9547 | return !F.isDenormal() || denormalsEnabledForType(DAG, Op.getValueType()); | ||||||
9548 | } | ||||||
9549 | |||||||
9550 | // If source is a result of another standard FP operation it is already in | ||||||
9551 | // canonical form. | ||||||
9552 | if (MaxDepth == 0) | ||||||
9553 | return false; | ||||||
9554 | |||||||
9555 | switch (Opcode) { | ||||||
9556 | // These will flush denorms if required. | ||||||
9557 | case ISD::FADD: | ||||||
9558 | case ISD::FSUB: | ||||||
9559 | case ISD::FMUL: | ||||||
9560 | case ISD::FCEIL: | ||||||
9561 | case ISD::FFLOOR: | ||||||
9562 | case ISD::FMA: | ||||||
9563 | case ISD::FMAD: | ||||||
9564 | case ISD::FSQRT: | ||||||
9565 | case ISD::FDIV: | ||||||
9566 | case ISD::FREM: | ||||||
9567 | case ISD::FP_ROUND: | ||||||
9568 | case ISD::FP_EXTEND: | ||||||
9569 | case AMDGPUISD::FMUL_LEGACY: | ||||||
9570 | case AMDGPUISD::FMAD_FTZ: | ||||||
9571 | case AMDGPUISD::RCP: | ||||||
9572 | case AMDGPUISD::RSQ: | ||||||
9573 | case AMDGPUISD::RSQ_CLAMP: | ||||||
9574 | case AMDGPUISD::RCP_LEGACY: | ||||||
9575 | case AMDGPUISD::RCP_IFLAG: | ||||||
9576 | case AMDGPUISD::DIV_SCALE: | ||||||
9577 | case AMDGPUISD::DIV_FMAS: | ||||||
9578 | case AMDGPUISD::DIV_FIXUP: | ||||||
9579 | case AMDGPUISD::FRACT: | ||||||
9580 | case AMDGPUISD::LDEXP: | ||||||
9581 | case AMDGPUISD::CVT_PKRTZ_F16_F32: | ||||||
9582 | case AMDGPUISD::CVT_F32_UBYTE0: | ||||||
9583 | case AMDGPUISD::CVT_F32_UBYTE1: | ||||||
9584 | case AMDGPUISD::CVT_F32_UBYTE2: | ||||||
9585 | case AMDGPUISD::CVT_F32_UBYTE3: | ||||||
9586 | return true; | ||||||
9587 | |||||||
9588 | // It can/will be lowered or combined as a bit operation. | ||||||
9589 | // Need to check their input recursively to handle. | ||||||
9590 | case ISD::FNEG: | ||||||
9591 | case ISD::FABS: | ||||||
9592 | case ISD::FCOPYSIGN: | ||||||
9593 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); | ||||||
9594 | |||||||
9595 | case ISD::FSIN: | ||||||
9596 | case ISD::FCOS: | ||||||
9597 | case ISD::FSINCOS: | ||||||
9598 | return Op.getValueType().getScalarType() != MVT::f16; | ||||||
9599 | |||||||
9600 | case ISD::FMINNUM: | ||||||
9601 | case ISD::FMAXNUM: | ||||||
9602 | case ISD::FMINNUM_IEEE: | ||||||
9603 | case ISD::FMAXNUM_IEEE: | ||||||
9604 | case AMDGPUISD::CLAMP: | ||||||
9605 | case AMDGPUISD::FMED3: | ||||||
9606 | case AMDGPUISD::FMAX3: | ||||||
9607 | case AMDGPUISD::FMIN3: { | ||||||
9608 | // FIXME: Shouldn't treat the generic operations different based these. | ||||||
9609 | // However, we aren't really required to flush the result from | ||||||
9610 | // minnum/maxnum.. | ||||||
9611 | |||||||
9612 | // snans will be quieted, so we only need to worry about denormals. | ||||||
9613 | if (Subtarget->supportsMinMaxDenormModes() || | ||||||
9614 | denormalsEnabledForType(DAG, Op.getValueType())) | ||||||
9615 | return true; | ||||||
9616 | |||||||
9617 | // Flushing may be required. | ||||||
9618 | // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such | ||||||
9619 | // targets need to check their input recursively. | ||||||
9620 | |||||||
9621 | // FIXME: Does this apply with clamp? It's implemented with max. | ||||||
9622 | for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) { | ||||||
9623 | if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1)) | ||||||
9624 | return false; | ||||||
9625 | } | ||||||
9626 | |||||||
9627 | return true; | ||||||
9628 | } | ||||||
9629 | case ISD::SELECT: { | ||||||
9630 | return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) && | ||||||
9631 | isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1); | ||||||
9632 | } | ||||||
9633 | case ISD::BUILD_VECTOR: { | ||||||
9634 | for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { | ||||||
9635 | SDValue SrcOp = Op.getOperand(i); | ||||||
9636 | if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1)) | ||||||
9637 | return false; | ||||||
9638 | } | ||||||
9639 | |||||||
9640 | return true; | ||||||
9641 | } | ||||||
9642 | case ISD::EXTRACT_VECTOR_ELT: | ||||||
9643 | case ISD::EXTRACT_SUBVECTOR: { | ||||||
9644 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); | ||||||
9645 | } | ||||||
9646 | case ISD::INSERT_VECTOR_ELT: { | ||||||
9647 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) && | ||||||
9648 | isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1); | ||||||
9649 | } | ||||||
9650 | case ISD::UNDEF: | ||||||
9651 | // Could be anything. | ||||||
9652 | return false; | ||||||
9653 | |||||||
9654 | case ISD::BITCAST: | ||||||
9655 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); | ||||||
9656 | case ISD::TRUNCATE: { | ||||||
9657 | // Hack round the mess we make when legalizing extract_vector_elt | ||||||
9658 | if (Op.getValueType() == MVT::i16) { | ||||||
9659 | SDValue TruncSrc = Op.getOperand(0); | ||||||
9660 | if (TruncSrc.getValueType() == MVT::i32 && | ||||||
9661 | TruncSrc.getOpcode() == ISD::BITCAST && | ||||||
9662 | TruncSrc.getOperand(0).getValueType() == MVT::v2f16) { | ||||||
9663 | return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1); | ||||||
9664 | } | ||||||
9665 | } | ||||||
9666 | return false; | ||||||
9667 | } | ||||||
9668 | case ISD::INTRINSIC_WO_CHAIN: { | ||||||
9669 | unsigned IntrinsicID | ||||||
9670 | = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | ||||||
9671 | // TODO: Handle more intrinsics | ||||||
9672 | switch (IntrinsicID) { | ||||||
9673 | case Intrinsic::amdgcn_cvt_pkrtz: | ||||||
9674 | case Intrinsic::amdgcn_cubeid: | ||||||
9675 | case Intrinsic::amdgcn_frexp_mant: | ||||||
9676 | case Intrinsic::amdgcn_fdot2: | ||||||
9677 | case Intrinsic::amdgcn_rcp: | ||||||
9678 | case Intrinsic::amdgcn_rsq: | ||||||
9679 | case Intrinsic::amdgcn_rsq_clamp: | ||||||
9680 | case Intrinsic::amdgcn_rcp_legacy: | ||||||
9681 | case Intrinsic::amdgcn_rsq_legacy: | ||||||
9682 | case Intrinsic::amdgcn_trig_preop: | ||||||
9683 | return true; | ||||||
9684 | default: | ||||||
9685 | break; | ||||||
9686 | } | ||||||
9687 | |||||||
9688 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||||
9689 | } | ||||||
9690 | default: | ||||||
9691 | return denormalsEnabledForType(DAG, Op.getValueType()) && | ||||||
9692 | DAG.isKnownNeverSNaN(Op); | ||||||
9693 | } | ||||||
9694 | |||||||
9695 | llvm_unreachable("invalid operation")::llvm::llvm_unreachable_internal("invalid operation", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 9695); | ||||||
9696 | } | ||||||
9697 | |||||||
9698 | bool SITargetLowering::isCanonicalized(Register Reg, MachineFunction &MF, | ||||||
9699 | unsigned MaxDepth) const { | ||||||
9700 | MachineRegisterInfo &MRI = MF.getRegInfo(); | ||||||
9701 | MachineInstr *MI = MRI.getVRegDef(Reg); | ||||||
9702 | unsigned Opcode = MI->getOpcode(); | ||||||
9703 | |||||||
9704 | if (Opcode == AMDGPU::G_FCANONICALIZE) | ||||||
9705 | return true; | ||||||
9706 | |||||||
9707 | if (Opcode == AMDGPU::G_FCONSTANT) { | ||||||
9708 | auto F = MI->getOperand(1).getFPImm()->getValueAPF(); | ||||||
9709 | if (F.isNaN() && F.isSignaling()) | ||||||
9710 | return false; | ||||||
9711 | return !F.isDenormal() || denormalsEnabledForType(MRI.getType(Reg), MF); | ||||||
9712 | } | ||||||
9713 | |||||||
9714 | if (MaxDepth == 0) | ||||||
9715 | return false; | ||||||
9716 | |||||||
9717 | switch (Opcode) { | ||||||
9718 | case AMDGPU::G_FMINNUM_IEEE: | ||||||
9719 | case AMDGPU::G_FMAXNUM_IEEE: { | ||||||
9720 | if (Subtarget->supportsMinMaxDenormModes() || | ||||||
9721 | denormalsEnabledForType(MRI.getType(Reg), MF)) | ||||||
9722 | return true; | ||||||
9723 | for (unsigned I = 1, E = MI->getNumOperands(); I != E; ++I) { | ||||||
9724 | if (!isCanonicalized(MI->getOperand(I).getReg(), MF, MaxDepth - 1)) | ||||||
9725 | return false; | ||||||
9726 | } | ||||||
9727 | return true; | ||||||
9728 | } | ||||||
9729 | default: | ||||||
9730 | return denormalsEnabledForType(MRI.getType(Reg), MF) && | ||||||
9731 | isKnownNeverSNaN(Reg, MRI); | ||||||
9732 | } | ||||||
9733 | |||||||
9734 | llvm_unreachable("invalid operation")::llvm::llvm_unreachable_internal("invalid operation", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 9734); | ||||||
9735 | } | ||||||
9736 | |||||||
9737 | // Constant fold canonicalize. | ||||||
9738 | SDValue SITargetLowering::getCanonicalConstantFP( | ||||||
9739 | SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const { | ||||||
9740 | // Flush denormals to 0 if not enabled. | ||||||
9741 | if (C.isDenormal() && !denormalsEnabledForType(DAG, VT)) | ||||||
9742 | return DAG.getConstantFP(0.0, SL, VT); | ||||||
9743 | |||||||
9744 | if (C.isNaN()) { | ||||||
9745 | APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); | ||||||
9746 | if (C.isSignaling()) { | ||||||
9747 | // Quiet a signaling NaN. | ||||||
9748 | // FIXME: Is this supposed to preserve payload bits? | ||||||
9749 | return DAG.getConstantFP(CanonicalQNaN, SL, VT); | ||||||
9750 | } | ||||||
9751 | |||||||
9752 | // Make sure it is the canonical NaN bitpattern. | ||||||
9753 | // | ||||||
9754 | // TODO: Can we use -1 as the canonical NaN value since it's an inline | ||||||
9755 | // immediate? | ||||||
9756 | if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) | ||||||
9757 | return DAG.getConstantFP(CanonicalQNaN, SL, VT); | ||||||
9758 | } | ||||||
9759 | |||||||
9760 | // Already canonical. | ||||||
9761 | return DAG.getConstantFP(C, SL, VT); | ||||||
9762 | } | ||||||
9763 | |||||||
9764 | static bool vectorEltWillFoldAway(SDValue Op) { | ||||||
9765 | return Op.isUndef() || isa<ConstantFPSDNode>(Op); | ||||||
9766 | } | ||||||
9767 | |||||||
9768 | SDValue SITargetLowering::performFCanonicalizeCombine( | ||||||
9769 | SDNode *N, | ||||||
9770 | DAGCombinerInfo &DCI) const { | ||||||
9771 | SelectionDAG &DAG = DCI.DAG; | ||||||
9772 | SDValue N0 = N->getOperand(0); | ||||||
9773 | EVT VT = N->getValueType(0); | ||||||
9774 | |||||||
9775 | // fcanonicalize undef -> qnan | ||||||
9776 | if (N0.isUndef()) { | ||||||
9777 | APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT)); | ||||||
9778 | return DAG.getConstantFP(QNaN, SDLoc(N), VT); | ||||||
9779 | } | ||||||
9780 | |||||||
9781 | if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) { | ||||||
9782 | EVT VT = N->getValueType(0); | ||||||
9783 | return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF()); | ||||||
9784 | } | ||||||
9785 | |||||||
9786 | // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x), | ||||||
9787 | // (fcanonicalize k) | ||||||
9788 | // | ||||||
9789 | // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0 | ||||||
9790 | |||||||
9791 | // TODO: This could be better with wider vectors that will be split to v2f16, | ||||||
9792 | // and to consider uses since there aren't that many packed operations. | ||||||
9793 | if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 && | ||||||
9794 | isTypeLegal(MVT::v2f16)) { | ||||||
9795 | SDLoc SL(N); | ||||||
9796 | SDValue NewElts[2]; | ||||||
9797 | SDValue Lo = N0.getOperand(0); | ||||||
9798 | SDValue Hi = N0.getOperand(1); | ||||||
9799 | EVT EltVT = Lo.getValueType(); | ||||||
9800 | |||||||
9801 | if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) { | ||||||
9802 | for (unsigned I = 0; I != 2; ++I) { | ||||||
9803 | SDValue Op = N0.getOperand(I); | ||||||
9804 | if (ConstantFPSDNode *CFP
| ||||||
9805 | NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT, | ||||||
9806 | CFP->getValueAPF()); | ||||||
9807 | } else if (Op.isUndef()) { | ||||||
9808 | // Handled below based on what the other operand is. | ||||||
9809 | NewElts[I] = Op; | ||||||
9810 | } else { | ||||||
9811 | NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op); | ||||||
9812 | } | ||||||
9813 | } | ||||||
9814 | |||||||
9815 | // If one half is undef, and one is constant, perfer a splat vector rather | ||||||
9816 | // than the normal qNaN. If it's a register, prefer 0.0 since that's | ||||||
9817 | // cheaper to use and may be free with a packed operation. | ||||||
9818 | if (NewElts[0].isUndef()) { | ||||||
9819 | if (isa<ConstantFPSDNode>(NewElts[1])) | ||||||
9820 | NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ? | ||||||
9821 | NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT); | ||||||
9822 | } | ||||||
9823 | |||||||
9824 | if (NewElts[1].isUndef()) { | ||||||
9825 | NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ? | ||||||
9826 | NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT); | ||||||
9827 | } | ||||||
9828 | |||||||
9829 | return DAG.getBuildVector(VT, SL, NewElts); | ||||||
9830 | } | ||||||
9831 | } | ||||||
9832 | |||||||
9833 | unsigned SrcOpc = N0.getOpcode(); | ||||||
9834 | |||||||
9835 | // If it's free to do so, push canonicalizes further up the source, which may | ||||||
9836 | // find a canonical source. | ||||||
9837 | // | ||||||
9838 | // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for | ||||||
9839 | // sNaNs. | ||||||
9840 | if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) { | ||||||
9841 | auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1)); | ||||||
9842 | if (CRHS && N0.hasOneUse()) { | ||||||
9843 | SDLoc SL(N); | ||||||
9844 | SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT, | ||||||
9845 | N0.getOperand(0)); | ||||||
9846 | SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF()); | ||||||
9847 | DCI.AddToWorklist(Canon0.getNode()); | ||||||
9848 | |||||||
9849 | return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1); | ||||||
9850 | } | ||||||
9851 | } | ||||||
9852 | |||||||
9853 | return isCanonicalized(DAG, N0) ? N0 : SDValue(); | ||||||
9854 | } | ||||||
9855 | |||||||
9856 | static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { | ||||||
9857 | switch (Opc) { | ||||||
9858 | case ISD::FMAXNUM: | ||||||
9859 | case ISD::FMAXNUM_IEEE: | ||||||
9860 | return AMDGPUISD::FMAX3; | ||||||
9861 | case ISD::SMAX: | ||||||
9862 | return AMDGPUISD::SMAX3; | ||||||
9863 | case ISD::UMAX: | ||||||
9864 | return AMDGPUISD::UMAX3; | ||||||
9865 | case ISD::FMINNUM: | ||||||
9866 | case ISD::FMINNUM_IEEE: | ||||||
9867 | return AMDGPUISD::FMIN3; | ||||||
9868 | case ISD::SMIN: | ||||||
9869 | return AMDGPUISD::SMIN3; | ||||||
9870 | case ISD::UMIN: | ||||||
9871 | return AMDGPUISD::UMIN3; | ||||||
9872 | default: | ||||||
9873 | llvm_unreachable("Not a min/max opcode")::llvm::llvm_unreachable_internal("Not a min/max opcode", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 9873); | ||||||
9874 | } | ||||||
9875 | } | ||||||
9876 | |||||||
9877 | SDValue SITargetLowering::performIntMed3ImmCombine( | ||||||
9878 | SelectionDAG &DAG, const SDLoc &SL, | ||||||
9879 | SDValue Op0, SDValue Op1, bool Signed) const { | ||||||
9880 | ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); | ||||||
9881 | if (!K1) | ||||||
9882 | return SDValue(); | ||||||
9883 | |||||||
9884 | ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); | ||||||
9885 | if (!K0) | ||||||
9886 | return SDValue(); | ||||||
9887 | |||||||
9888 | if (Signed) { | ||||||
9889 | if (K0->getAPIntValue().sge(K1->getAPIntValue())) | ||||||
9890 | return SDValue(); | ||||||
9891 | } else { | ||||||
9892 | if (K0->getAPIntValue().uge(K1->getAPIntValue())) | ||||||
9893 | return SDValue(); | ||||||
9894 | } | ||||||
9895 | |||||||
9896 | EVT VT = K0->getValueType(0); | ||||||
9897 | unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3; | ||||||
9898 | if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) { | ||||||
9899 | return DAG.getNode(Med3Opc, SL, VT, | ||||||
9900 | Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); | ||||||
9901 | } | ||||||
9902 | |||||||
9903 | // If there isn't a 16-bit med3 operation, convert to 32-bit. | ||||||
9904 | if (VT == MVT::i16) { | ||||||
9905 | MVT NVT = MVT::i32; | ||||||
9906 | unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; | ||||||
9907 | |||||||
9908 | SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0)); | ||||||
9909 | SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1)); | ||||||
9910 | SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1); | ||||||
9911 | |||||||
9912 | SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3); | ||||||
9913 | return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3); | ||||||
9914 | } | ||||||
9915 | |||||||
9916 | return SDValue(); | ||||||
9917 | } | ||||||
9918 | |||||||
9919 | static ConstantFPSDNode *getSplatConstantFP(SDValue Op) { | ||||||
9920 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) | ||||||
9921 | return C; | ||||||
9922 | |||||||
9923 | if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) { | ||||||
9924 | if (ConstantFPSDNode *C = BV->getConstantFPSplatNode()) | ||||||
9925 | return C; | ||||||
9926 | } | ||||||
9927 | |||||||
9928 | return nullptr; | ||||||
9929 | } | ||||||
9930 | |||||||
9931 | SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG, | ||||||
9932 | const SDLoc &SL, | ||||||
9933 | SDValue Op0, | ||||||
9934 | SDValue Op1) const { | ||||||
9935 | ConstantFPSDNode *K1 = getSplatConstantFP(Op1); | ||||||
9936 | if (!K1) | ||||||
9937 | return SDValue(); | ||||||
9938 | |||||||
9939 | ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1)); | ||||||
9940 | if (!K0) | ||||||
9941 | return SDValue(); | ||||||
9942 | |||||||
9943 | // Ordered >= (although NaN inputs should have folded away by now). | ||||||
9944 | if (K0->getValueAPF() > K1->getValueAPF()) | ||||||
9945 | return SDValue(); | ||||||
9946 | |||||||
9947 | const MachineFunction &MF = DAG.getMachineFunction(); | ||||||
9948 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
9949 | |||||||
9950 | // TODO: Check IEEE bit enabled? | ||||||
9951 | EVT VT = Op0.getValueType(); | ||||||
9952 | if (Info->getMode().DX10Clamp) { | ||||||
9953 | // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the | ||||||
9954 | // hardware fmed3 behavior converting to a min. | ||||||
9955 | // FIXME: Should this be allowing -0.0? | ||||||
9956 | if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0)) | ||||||
9957 | return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0)); | ||||||
9958 | } | ||||||
9959 | |||||||
9960 | // med3 for f16 is only available on gfx9+, and not available for v2f16. | ||||||
9961 | if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) { | ||||||
9962 | // This isn't safe with signaling NaNs because in IEEE mode, min/max on a | ||||||
9963 | // signaling NaN gives a quiet NaN. The quiet NaN input to the min would | ||||||
9964 | // then give the other result, which is different from med3 with a NaN | ||||||
9965 | // input. | ||||||
9966 | SDValue Var = Op0.getOperand(0); | ||||||
9967 | if (!DAG.isKnownNeverSNaN(Var)) | ||||||
9968 | return SDValue(); | ||||||
9969 | |||||||
9970 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | ||||||
9971 | |||||||
9972 | if ((!K0->hasOneUse() || | ||||||
9973 | TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) && | ||||||
9974 | (!K1->hasOneUse() || | ||||||
9975 | TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) { | ||||||
9976 | return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), | ||||||
9977 | Var, SDValue(K0, 0), SDValue(K1, 0)); | ||||||
9978 | } | ||||||
9979 | } | ||||||
9980 | |||||||
9981 | return SDValue(); | ||||||
9982 | } | ||||||
9983 | |||||||
9984 | SDValue SITargetLowering::performMinMaxCombine(SDNode *N, | ||||||
9985 | DAGCombinerInfo &DCI) const { | ||||||
9986 | SelectionDAG &DAG = DCI.DAG; | ||||||
9987 | |||||||
9988 | EVT VT = N->getValueType(0); | ||||||
9989 | unsigned Opc = N->getOpcode(); | ||||||
9990 | SDValue Op0 = N->getOperand(0); | ||||||
9991 | SDValue Op1 = N->getOperand(1); | ||||||
9992 | |||||||
9993 | // Only do this if the inner op has one use since this will just increases | ||||||
9994 | // register pressure for no benefit. | ||||||
9995 | |||||||
9996 | if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY && | ||||||
9997 | !VT.isVector() && | ||||||
9998 | (VT == MVT::i32 || VT == MVT::f32 || | ||||||
9999 | ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) { | ||||||
10000 | // max(max(a, b), c) -> max3(a, b, c) | ||||||
10001 | // min(min(a, b), c) -> min3(a, b, c) | ||||||
10002 | if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { | ||||||
10003 | SDLoc DL(N); | ||||||
10004 | return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), | ||||||
10005 | DL, | ||||||
10006 | N->getValueType(0), | ||||||
10007 | Op0.getOperand(0), | ||||||
10008 | Op0.getOperand(1), | ||||||
10009 | Op1); | ||||||
10010 | } | ||||||
10011 | |||||||
10012 | // Try commuted. | ||||||
10013 | // max(a, max(b, c)) -> max3(a, b, c) | ||||||
10014 | // min(a, min(b, c)) -> min3(a, b, c) | ||||||
10015 | if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { | ||||||
10016 | SDLoc DL(N); | ||||||
10017 | return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), | ||||||
10018 | DL, | ||||||
10019 | N->getValueType(0), | ||||||
10020 | Op0, | ||||||
10021 | Op1.getOperand(0), | ||||||
10022 | Op1.getOperand(1)); | ||||||
10023 | } | ||||||
10024 | } | ||||||
10025 | |||||||
10026 | // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) | ||||||
10027 | if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { | ||||||
10028 | if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) | ||||||
10029 | return Med3; | ||||||
10030 | } | ||||||
10031 | |||||||
10032 | if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { | ||||||
10033 | if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) | ||||||
10034 | return Med3; | ||||||
10035 | } | ||||||
10036 | |||||||
10037 | // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) | ||||||
10038 | if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || | ||||||
10039 | (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) || | ||||||
10040 | (Opc == AMDGPUISD::FMIN_LEGACY && | ||||||
10041 | Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && | ||||||
10042 | (VT == MVT::f32 || VT == MVT::f64 || | ||||||
10043 | (VT == MVT::f16 && Subtarget->has16BitInsts()) || | ||||||
10044 | (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) && | ||||||
10045 | Op0.hasOneUse()) { | ||||||
10046 | if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) | ||||||
10047 | return Res; | ||||||
10048 | } | ||||||
10049 | |||||||
10050 | return SDValue(); | ||||||
10051 | } | ||||||
10052 | |||||||
10053 | static bool isClampZeroToOne(SDValue A, SDValue B) { | ||||||
10054 | if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) { | ||||||
10055 | if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) { | ||||||
10056 | // FIXME: Should this be allowing -0.0? | ||||||
10057 | return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) || | ||||||
10058 | (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0)); | ||||||
10059 | } | ||||||
10060 | } | ||||||
10061 | |||||||
10062 | return false; | ||||||
10063 | } | ||||||
10064 | |||||||
10065 | // FIXME: Should only worry about snans for version with chain. | ||||||
10066 | SDValue SITargetLowering::performFMed3Combine(SDNode *N, | ||||||
10067 | DAGCombinerInfo &DCI) const { | ||||||
10068 | EVT VT = N->getValueType(0); | ||||||
10069 | // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and | ||||||
10070 | // NaNs. With a NaN input, the order of the operands may change the result. | ||||||
10071 | |||||||
10072 | SelectionDAG &DAG = DCI.DAG; | ||||||
10073 | SDLoc SL(N); | ||||||
10074 | |||||||
10075 | SDValue Src0 = N->getOperand(0); | ||||||
10076 | SDValue Src1 = N->getOperand(1); | ||||||
10077 | SDValue Src2 = N->getOperand(2); | ||||||
10078 | |||||||
10079 | if (isClampZeroToOne(Src0, Src1)) { | ||||||
10080 | // const_a, const_b, x -> clamp is safe in all cases including signaling | ||||||
10081 | // nans. | ||||||
10082 | // FIXME: Should this be allowing -0.0? | ||||||
10083 | return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2); | ||||||
10084 | } | ||||||
10085 | |||||||
10086 | const MachineFunction &MF = DAG.getMachineFunction(); | ||||||
10087 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
10088 | |||||||
10089 | // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother | ||||||
10090 | // handling no dx10-clamp? | ||||||
10091 | if (Info->getMode().DX10Clamp) { | ||||||
10092 | // If NaNs is clamped to 0, we are free to reorder the inputs. | ||||||
10093 | |||||||
10094 | if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) | ||||||
10095 | std::swap(Src0, Src1); | ||||||
10096 | |||||||
10097 | if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2)) | ||||||
10098 | std::swap(Src1, Src2); | ||||||
10099 | |||||||
10100 | if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) | ||||||
10101 | std::swap(Src0, Src1); | ||||||
10102 | |||||||
10103 | if (isClampZeroToOne(Src1, Src2)) | ||||||
10104 | return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0); | ||||||
10105 | } | ||||||
10106 | |||||||
10107 | return SDValue(); | ||||||
10108 | } | ||||||
10109 | |||||||
10110 | SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N, | ||||||
10111 | DAGCombinerInfo &DCI) const { | ||||||
10112 | SDValue Src0 = N->getOperand(0); | ||||||
10113 | SDValue Src1 = N->getOperand(1); | ||||||
10114 | if (Src0.isUndef() && Src1.isUndef()) | ||||||
10115 | return DCI.DAG.getUNDEF(N->getValueType(0)); | ||||||
10116 | return SDValue(); | ||||||
10117 | } | ||||||
10118 | |||||||
10119 | // Check if EXTRACT_VECTOR_ELT/INSERT_VECTOR_ELT (<n x e>, var-idx) should be | ||||||
10120 | // expanded into a set of cmp/select instructions. | ||||||
10121 | bool SITargetLowering::shouldExpandVectorDynExt(unsigned EltSize, | ||||||
10122 | unsigned NumElem, | ||||||
10123 | bool IsDivergentIdx) { | ||||||
10124 | if (UseDivergentRegisterIndexing) | ||||||
10125 | return false; | ||||||
10126 | |||||||
10127 | unsigned VecSize = EltSize * NumElem; | ||||||
10128 | |||||||
10129 | // Sub-dword vectors of size 2 dword or less have better implementation. | ||||||
10130 | if (VecSize <= 64 && EltSize < 32) | ||||||
10131 | return false; | ||||||
10132 | |||||||
10133 | // Always expand the rest of sub-dword instructions, otherwise it will be | ||||||
10134 | // lowered via memory. | ||||||
10135 | if (EltSize < 32) | ||||||
10136 | return true; | ||||||
10137 | |||||||
10138 | // Always do this if var-idx is divergent, otherwise it will become a loop. | ||||||
10139 | if (IsDivergentIdx) | ||||||
10140 | return true; | ||||||
10141 | |||||||
10142 | // Large vectors would yield too many compares and v_cndmask_b32 instructions. | ||||||
10143 | unsigned NumInsts = NumElem /* Number of compares */ + | ||||||
10144 | ((EltSize + 31) / 32) * NumElem /* Number of cndmasks */; | ||||||
10145 | return NumInsts <= 16; | ||||||
10146 | } | ||||||
10147 | |||||||
10148 | static bool shouldExpandVectorDynExt(SDNode *N) { | ||||||
10149 | SDValue Idx = N->getOperand(N->getNumOperands() - 1); | ||||||
10150 | if (isa<ConstantSDNode>(Idx)) | ||||||
10151 | return false; | ||||||
10152 | |||||||
10153 | SDValue Vec = N->getOperand(0); | ||||||
10154 | EVT VecVT = Vec.getValueType(); | ||||||
10155 | EVT EltVT = VecVT.getVectorElementType(); | ||||||
10156 | unsigned EltSize = EltVT.getSizeInBits(); | ||||||
10157 | unsigned NumElem = VecVT.getVectorNumElements(); | ||||||
10158 | |||||||
10159 | return SITargetLowering::shouldExpandVectorDynExt(EltSize, NumElem, | ||||||
10160 | Idx->isDivergent()); | ||||||
10161 | } | ||||||
10162 | |||||||
10163 | SDValue SITargetLowering::performExtractVectorEltCombine( | ||||||
10164 | SDNode *N, DAGCombinerInfo &DCI) const { | ||||||
10165 | SDValue Vec = N->getOperand(0); | ||||||
10166 | SelectionDAG &DAG = DCI.DAG; | ||||||
10167 | |||||||
10168 | EVT VecVT = Vec.getValueType(); | ||||||
10169 | EVT EltVT = VecVT.getVectorElementType(); | ||||||
10170 | |||||||
10171 | if ((Vec.getOpcode() == ISD::FNEG || | ||||||
10172 | Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) { | ||||||
10173 | SDLoc SL(N); | ||||||
10174 | EVT EltVT = N->getValueType(0); | ||||||
10175 | SDValue Idx = N->getOperand(1); | ||||||
10176 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, | ||||||
10177 | Vec.getOperand(0), Idx); | ||||||
10178 | return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt); | ||||||
10179 | } | ||||||
10180 | |||||||
10181 | // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx) | ||||||
10182 | // => | ||||||
10183 | // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx) | ||||||
10184 | // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx) | ||||||
10185 | // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt | ||||||
10186 | if (Vec.hasOneUse() && DCI.isBeforeLegalize()) { | ||||||
10187 | SDLoc SL(N); | ||||||
10188 | EVT EltVT = N->getValueType(0); | ||||||
10189 | SDValue Idx = N->getOperand(1); | ||||||
10190 | unsigned Opc = Vec.getOpcode(); | ||||||
10191 | |||||||
10192 | switch(Opc) { | ||||||
10193 | default: | ||||||
10194 | break; | ||||||
10195 | // TODO: Support other binary operations. | ||||||
10196 | case ISD::FADD: | ||||||
10197 | case ISD::FSUB: | ||||||
10198 | case ISD::FMUL: | ||||||
10199 | case ISD::ADD: | ||||||
10200 | case ISD::UMIN: | ||||||
10201 | case ISD::UMAX: | ||||||
10202 | case ISD::SMIN: | ||||||
10203 | case ISD::SMAX: | ||||||
10204 | case ISD::FMAXNUM: | ||||||
10205 | case ISD::FMINNUM: | ||||||
10206 | case ISD::FMAXNUM_IEEE: | ||||||
10207 | case ISD::FMINNUM_IEEE: { | ||||||
10208 | SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, | ||||||
10209 | Vec.getOperand(0), Idx); | ||||||
10210 | SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, | ||||||
10211 | Vec.getOperand(1), Idx); | ||||||
10212 | |||||||
10213 | DCI.AddToWorklist(Elt0.getNode()); | ||||||
10214 | DCI.AddToWorklist(Elt1.getNode()); | ||||||
10215 | return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags()); | ||||||
10216 | } | ||||||
10217 | } | ||||||
10218 | } | ||||||
10219 | |||||||
10220 | unsigned VecSize = VecVT.getSizeInBits(); | ||||||
10221 | unsigned EltSize = EltVT.getSizeInBits(); | ||||||
10222 | |||||||
10223 | // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx) | ||||||
10224 | if (::shouldExpandVectorDynExt(N)) { | ||||||
10225 | SDLoc SL(N); | ||||||
10226 | SDValue Idx = N->getOperand(1); | ||||||
10227 | SDValue V; | ||||||
10228 | for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) { | ||||||
10229 | SDValue IC = DAG.getVectorIdxConstant(I, SL); | ||||||
10230 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC); | ||||||
10231 | if (I == 0) | ||||||
10232 | V = Elt; | ||||||
10233 | else | ||||||
10234 | V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ); | ||||||
10235 | } | ||||||
10236 | return V; | ||||||
10237 | } | ||||||
10238 | |||||||
10239 | if (!DCI.isBeforeLegalize()) | ||||||
10240 | return SDValue(); | ||||||
10241 | |||||||
10242 | // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit | ||||||
10243 | // elements. This exposes more load reduction opportunities by replacing | ||||||
10244 | // multiple small extract_vector_elements with a single 32-bit extract. | ||||||
10245 | auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1)); | ||||||
10246 | if (isa<MemSDNode>(Vec) && | ||||||
10247 | EltSize <= 16 && | ||||||
10248 | EltVT.isByteSized() && | ||||||
10249 | VecSize > 32 && | ||||||
10250 | VecSize % 32 == 0 && | ||||||
10251 | Idx) { | ||||||
10252 | EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT); | ||||||
10253 | |||||||
10254 | unsigned BitIndex = Idx->getZExtValue() * EltSize; | ||||||
10255 | unsigned EltIdx = BitIndex / 32; | ||||||
10256 | unsigned LeftoverBitIdx = BitIndex % 32; | ||||||
10257 | SDLoc SL(N); | ||||||
10258 | |||||||
10259 | SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec); | ||||||
10260 | DCI.AddToWorklist(Cast.getNode()); | ||||||
10261 | |||||||
10262 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast, | ||||||
10263 | DAG.getConstant(EltIdx, SL, MVT::i32)); | ||||||
10264 | DCI.AddToWorklist(Elt.getNode()); | ||||||
10265 | SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt, | ||||||
10266 | DAG.getConstant(LeftoverBitIdx, SL, MVT::i32)); | ||||||
10267 | DCI.AddToWorklist(Srl.getNode()); | ||||||
10268 | |||||||
10269 | SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl); | ||||||
10270 | DCI.AddToWorklist(Trunc.getNode()); | ||||||
10271 | return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc); | ||||||
10272 | } | ||||||
10273 | |||||||
10274 | return SDValue(); | ||||||
10275 | } | ||||||
10276 | |||||||
10277 | SDValue | ||||||
10278 | SITargetLowering::performInsertVectorEltCombine(SDNode *N, | ||||||
10279 | DAGCombinerInfo &DCI) const { | ||||||
10280 | SDValue Vec = N->getOperand(0); | ||||||
10281 | SDValue Idx = N->getOperand(2); | ||||||
10282 | EVT VecVT = Vec.getValueType(); | ||||||
10283 | EVT EltVT = VecVT.getVectorElementType(); | ||||||
10284 | |||||||
10285 | // INSERT_VECTOR_ELT (<n x e>, var-idx) | ||||||
10286 | // => BUILD_VECTOR n x select (e, const-idx) | ||||||
10287 | if (!::shouldExpandVectorDynExt(N)) | ||||||
10288 | return SDValue(); | ||||||
10289 | |||||||
10290 | SelectionDAG &DAG = DCI.DAG; | ||||||
10291 | SDLoc SL(N); | ||||||
10292 | SDValue Ins = N->getOperand(1); | ||||||
10293 | EVT IdxVT = Idx.getValueType(); | ||||||
10294 | |||||||
10295 | SmallVector<SDValue, 16> Ops; | ||||||
10296 | for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) { | ||||||
10297 | SDValue IC = DAG.getConstant(I, SL, IdxVT); | ||||||
10298 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC); | ||||||
10299 | SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ); | ||||||
10300 | Ops.push_back(V); | ||||||
10301 | } | ||||||
10302 | |||||||
10303 | return DAG.getBuildVector(VecVT, SL, Ops); | ||||||
10304 | } | ||||||
10305 | |||||||
10306 | unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG, | ||||||
10307 | const SDNode *N0, | ||||||
10308 | const SDNode *N1) const { | ||||||
10309 | EVT VT = N0->getValueType(0); | ||||||
10310 | |||||||
10311 | // Only do this if we are not trying to support denormals. v_mad_f32 does not | ||||||
10312 | // support denormals ever. | ||||||
10313 | if (((VT == MVT::f32 && !hasFP32Denormals(DAG.getMachineFunction())) || | ||||||
10314 | (VT == MVT::f16 && !hasFP64FP16Denormals(DAG.getMachineFunction()) && | ||||||
10315 | getSubtarget()->hasMadF16())) && | ||||||
10316 | isOperationLegal(ISD::FMAD, VT)) | ||||||
10317 | return ISD::FMAD; | ||||||
10318 | |||||||
10319 | const TargetOptions &Options = DAG.getTarget().Options; | ||||||
10320 | if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || | ||||||
10321 | (N0->getFlags().hasAllowContract() && | ||||||
10322 | N1->getFlags().hasAllowContract())) && | ||||||
10323 | isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) { | ||||||
10324 | return ISD::FMA; | ||||||
10325 | } | ||||||
10326 | |||||||
10327 | return 0; | ||||||
10328 | } | ||||||
10329 | |||||||
10330 | // For a reassociatable opcode perform: | ||||||
10331 | // op x, (op y, z) -> op (op x, z), y, if x and z are uniform | ||||||
10332 | SDValue SITargetLowering::reassociateScalarOps(SDNode *N, | ||||||
10333 | SelectionDAG &DAG) const { | ||||||
10334 | EVT VT = N->getValueType(0); | ||||||
10335 | if (VT != MVT::i32 && VT != MVT::i64) | ||||||
10336 | return SDValue(); | ||||||
10337 | |||||||
10338 | unsigned Opc = N->getOpcode(); | ||||||
10339 | SDValue Op0 = N->getOperand(0); | ||||||
10340 | SDValue Op1 = N->getOperand(1); | ||||||
10341 | |||||||
10342 | if (!(Op0->isDivergent() ^ Op1->isDivergent())) | ||||||
10343 | return SDValue(); | ||||||
10344 | |||||||
10345 | if (Op0->isDivergent()) | ||||||
10346 | std::swap(Op0, Op1); | ||||||
10347 | |||||||
10348 | if (Op1.getOpcode() != Opc || !Op1.hasOneUse()) | ||||||
10349 | return SDValue(); | ||||||
10350 | |||||||
10351 | SDValue Op2 = Op1.getOperand(1); | ||||||
10352 | Op1 = Op1.getOperand(0); | ||||||
10353 | if (!(Op1->isDivergent() ^ Op2->isDivergent())) | ||||||
10354 | return SDValue(); | ||||||
10355 | |||||||
10356 | if (Op1->isDivergent()) | ||||||
10357 | std::swap(Op1, Op2); | ||||||
10358 | |||||||
10359 | // If either operand is constant this will conflict with | ||||||
10360 | // DAGCombiner::ReassociateOps(). | ||||||
10361 | if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) || | ||||||
10362 | DAG.isConstantIntBuildVectorOrConstantInt(Op1)) | ||||||
10363 | return SDValue(); | ||||||
10364 | |||||||
10365 | SDLoc SL(N); | ||||||
10366 | SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1); | ||||||
10367 | return DAG.getNode(Opc, SL, VT, Add1, Op2); | ||||||
10368 | } | ||||||
10369 | |||||||
10370 | static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL, | ||||||
10371 | EVT VT, | ||||||
10372 | SDValue N0, SDValue N1, SDValue N2, | ||||||
10373 | bool Signed) { | ||||||
10374 | unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32; | ||||||
10375 | SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1); | ||||||
10376 | SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2); | ||||||
10377 | return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad); | ||||||
10378 | } | ||||||
10379 | |||||||
10380 | SDValue SITargetLowering::performAddCombine(SDNode *N, | ||||||
10381 | DAGCombinerInfo &DCI) const { | ||||||
10382 | SelectionDAG &DAG = DCI.DAG; | ||||||
10383 | EVT VT = N->getValueType(0); | ||||||
10384 | SDLoc SL(N); | ||||||
10385 | SDValue LHS = N->getOperand(0); | ||||||
10386 | SDValue RHS = N->getOperand(1); | ||||||
10387 | |||||||
10388 | if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL) | ||||||
10389 | && Subtarget->hasMad64_32() && | ||||||
10390 | !VT.isVector() && VT.getScalarSizeInBits() > 32 && | ||||||
10391 | VT.getScalarSizeInBits() <= 64) { | ||||||
10392 | if (LHS.getOpcode() != ISD::MUL) | ||||||
10393 | std::swap(LHS, RHS); | ||||||
10394 | |||||||
10395 | SDValue MulLHS = LHS.getOperand(0); | ||||||
10396 | SDValue MulRHS = LHS.getOperand(1); | ||||||
10397 | SDValue AddRHS = RHS; | ||||||
10398 | |||||||
10399 | // TODO: Maybe restrict if SGPR inputs. | ||||||
10400 | if (numBitsUnsigned(MulLHS, DAG) <= 32 && | ||||||
10401 | numBitsUnsigned(MulRHS, DAG) <= 32) { | ||||||
10402 | MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32); | ||||||
10403 | MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32); | ||||||
10404 | AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64); | ||||||
10405 | return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false); | ||||||
10406 | } | ||||||
10407 | |||||||
10408 | if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) { | ||||||
10409 | MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32); | ||||||
10410 | MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32); | ||||||
10411 | AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64); | ||||||
10412 | return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true); | ||||||
10413 | } | ||||||
10414 | |||||||
10415 | return SDValue(); | ||||||
10416 | } | ||||||
10417 | |||||||
10418 | if (SDValue V = reassociateScalarOps(N, DAG)) { | ||||||
10419 | return V; | ||||||
10420 | } | ||||||
10421 | |||||||
10422 | if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG()) | ||||||
10423 | return SDValue(); | ||||||
10424 | |||||||
10425 | // add x, zext (setcc) => addcarry x, 0, setcc | ||||||
10426 | // add x, sext (setcc) => subcarry x, 0, setcc | ||||||
10427 | unsigned Opc = LHS.getOpcode(); | ||||||
10428 | if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND || | ||||||
10429 | Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY) | ||||||
10430 | std::swap(RHS, LHS); | ||||||
10431 | |||||||
10432 | Opc = RHS.getOpcode(); | ||||||
10433 | switch (Opc) { | ||||||
10434 | default: break; | ||||||
10435 | case ISD::ZERO_EXTEND: | ||||||
10436 | case ISD::SIGN_EXTEND: | ||||||
10437 | case ISD::ANY_EXTEND: { | ||||||
10438 | auto Cond = RHS.getOperand(0); | ||||||
10439 | // If this won't be a real VOPC output, we would still need to insert an | ||||||
10440 | // extra instruction anyway. | ||||||
10441 | if (!isBoolSGPR(Cond)) | ||||||
10442 | break; | ||||||
10443 | SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); | ||||||
10444 | SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; | ||||||
10445 | Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY; | ||||||
10446 | return DAG.getNode(Opc, SL, VTList, Args); | ||||||
10447 | } | ||||||
10448 | case ISD::ADDCARRY: { | ||||||
10449 | // add x, (addcarry y, 0, cc) => addcarry x, y, cc | ||||||
10450 | auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); | ||||||
10451 | if (!C || C->getZExtValue() != 0) break; | ||||||
10452 | SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) }; | ||||||
10453 | return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args); | ||||||
10454 | } | ||||||
10455 | } | ||||||
10456 | return SDValue(); | ||||||
10457 | } | ||||||
10458 | |||||||
10459 | SDValue SITargetLowering::performSubCombine(SDNode *N, | ||||||
10460 | DAGCombinerInfo &DCI) const { | ||||||
10461 | SelectionDAG &DAG = DCI.DAG; | ||||||
10462 | EVT VT = N->getValueType(0); | ||||||
10463 | |||||||
10464 | if (VT != MVT::i32) | ||||||
10465 | return SDValue(); | ||||||
10466 | |||||||
10467 | SDLoc SL(N); | ||||||
10468 | SDValue LHS = N->getOperand(0); | ||||||
10469 | SDValue RHS = N->getOperand(1); | ||||||
10470 | |||||||
10471 | // sub x, zext (setcc) => subcarry x, 0, setcc | ||||||
10472 | // sub x, sext (setcc) => addcarry x, 0, setcc | ||||||
10473 | unsigned Opc = RHS.getOpcode(); | ||||||
10474 | switch (Opc) { | ||||||
10475 | default: break; | ||||||
10476 | case ISD::ZERO_EXTEND: | ||||||
10477 | case ISD::SIGN_EXTEND: | ||||||
10478 | case ISD::ANY_EXTEND: { | ||||||
10479 | auto Cond = RHS.getOperand(0); | ||||||
10480 | // If this won't be a real VOPC output, we would still need to insert an | ||||||
10481 | // extra instruction anyway. | ||||||
10482 | if (!isBoolSGPR(Cond)) | ||||||
10483 | break; | ||||||
10484 | SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); | ||||||
10485 | SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; | ||||||
10486 | Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::ADDCARRY : ISD::SUBCARRY; | ||||||
10487 | return DAG.getNode(Opc, SL, VTList, Args); | ||||||
10488 | } | ||||||
10489 | } | ||||||
10490 | |||||||
10491 | if (LHS.getOpcode() == ISD::SUBCARRY) { | ||||||
10492 | // sub (subcarry x, 0, cc), y => subcarry x, y, cc | ||||||
10493 | auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); | ||||||
10494 | if (!C || !C->isNullValue()) | ||||||
10495 | return SDValue(); | ||||||
10496 | SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) }; | ||||||
10497 | return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args); | ||||||
10498 | } | ||||||
10499 | return SDValue(); | ||||||
10500 | } | ||||||
10501 | |||||||
10502 | SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N, | ||||||
10503 | DAGCombinerInfo &DCI) const { | ||||||
10504 | |||||||
10505 | if (N->getValueType(0) != MVT::i32) | ||||||
10506 | return SDValue(); | ||||||
10507 | |||||||
10508 | auto C = dyn_cast<ConstantSDNode>(N->getOperand(1)); | ||||||
10509 | if (!C || C->getZExtValue() != 0) | ||||||
10510 | return SDValue(); | ||||||
10511 | |||||||
10512 | SelectionDAG &DAG = DCI.DAG; | ||||||
10513 | SDValue LHS = N->getOperand(0); | ||||||
10514 | |||||||
10515 | // addcarry (add x, y), 0, cc => addcarry x, y, cc | ||||||
10516 | // subcarry (sub x, y), 0, cc => subcarry x, y, cc | ||||||
10517 | unsigned LHSOpc = LHS.getOpcode(); | ||||||
10518 | unsigned Opc = N->getOpcode(); | ||||||
10519 | if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) || | ||||||
10520 | (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) { | ||||||
10521 | SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) }; | ||||||
10522 | return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args); | ||||||
10523 | } | ||||||
10524 | return SDValue(); | ||||||
10525 | } | ||||||
10526 | |||||||
10527 | SDValue SITargetLowering::performFAddCombine(SDNode *N, | ||||||
10528 | DAGCombinerInfo &DCI) const { | ||||||
10529 | if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) | ||||||
10530 | return SDValue(); | ||||||
10531 | |||||||
10532 | SelectionDAG &DAG = DCI.DAG; | ||||||
10533 | EVT VT = N->getValueType(0); | ||||||
10534 | |||||||
10535 | SDLoc SL(N); | ||||||
10536 | SDValue LHS = N->getOperand(0); | ||||||
10537 | SDValue RHS = N->getOperand(1); | ||||||
10538 | |||||||
10539 | // These should really be instruction patterns, but writing patterns with | ||||||
10540 | // source modiifiers is a pain. | ||||||
10541 | |||||||
10542 | // fadd (fadd (a, a), b) -> mad 2.0, a, b | ||||||
10543 | if (LHS.getOpcode() == ISD::FADD) { | ||||||
10544 | SDValue A = LHS.getOperand(0); | ||||||
10545 | if (A == LHS.getOperand(1)) { | ||||||
10546 | unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); | ||||||
10547 | if (FusedOp != 0) { | ||||||
10548 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); | ||||||
10549 | return DAG.getNode(FusedOp, SL, VT, A, Two, RHS); | ||||||
10550 | } | ||||||
10551 | } | ||||||
10552 | } | ||||||
10553 | |||||||
10554 | // fadd (b, fadd (a, a)) -> mad 2.0, a, b | ||||||
10555 | if (RHS.getOpcode() == ISD::FADD) { | ||||||
10556 | SDValue A = RHS.getOperand(0); | ||||||
10557 | if (A == RHS.getOperand(1)) { | ||||||
10558 | unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); | ||||||
10559 | if (FusedOp != 0) { | ||||||
10560 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); | ||||||
10561 | return DAG.getNode(FusedOp, SL, VT, A, Two, LHS); | ||||||
10562 | } | ||||||
10563 | } | ||||||
10564 | } | ||||||
10565 | |||||||
10566 | return SDValue(); | ||||||
10567 | } | ||||||
10568 | |||||||
10569 | SDValue SITargetLowering::performFSubCombine(SDNode *N, | ||||||
10570 | DAGCombinerInfo &DCI) const { | ||||||
10571 | if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) | ||||||
10572 | return SDValue(); | ||||||
10573 | |||||||
10574 | SelectionDAG &DAG = DCI.DAG; | ||||||
10575 | SDLoc SL(N); | ||||||
10576 | EVT VT = N->getValueType(0); | ||||||
10577 | assert(!VT.isVector())(static_cast <bool> (!VT.isVector()) ? void (0) : __assert_fail ("!VT.isVector()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 10577, __extension__ __PRETTY_FUNCTION__)); | ||||||
10578 | |||||||
10579 | // Try to get the fneg to fold into the source modifier. This undoes generic | ||||||
10580 | // DAG combines and folds them into the mad. | ||||||
10581 | // | ||||||
10582 | // Only do this if we are not trying to support denormals. v_mad_f32 does | ||||||
10583 | // not support denormals ever. | ||||||
10584 | SDValue LHS = N->getOperand(0); | ||||||
10585 | SDValue RHS = N->getOperand(1); | ||||||
10586 | if (LHS.getOpcode() == ISD::FADD) { | ||||||
10587 | // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) | ||||||
10588 | SDValue A = LHS.getOperand(0); | ||||||
10589 | if (A == LHS.getOperand(1)) { | ||||||
10590 | unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); | ||||||
10591 | if (FusedOp != 0){ | ||||||
10592 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); | ||||||
10593 | SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); | ||||||
10594 | |||||||
10595 | return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS); | ||||||
10596 | } | ||||||
10597 | } | ||||||
10598 | } | ||||||
10599 | |||||||
10600 | if (RHS.getOpcode() == ISD::FADD) { | ||||||
10601 | // (fsub c, (fadd a, a)) -> mad -2.0, a, c | ||||||
10602 | |||||||
10603 | SDValue A = RHS.getOperand(0); | ||||||
10604 | if (A == RHS.getOperand(1)) { | ||||||
10605 | unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); | ||||||
10606 | if (FusedOp != 0){ | ||||||
10607 | const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT); | ||||||
10608 | return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS); | ||||||
10609 | } | ||||||
10610 | } | ||||||
10611 | } | ||||||
10612 | |||||||
10613 | return SDValue(); | ||||||
10614 | } | ||||||
10615 | |||||||
10616 | SDValue SITargetLowering::performFMACombine(SDNode *N, | ||||||
10617 | DAGCombinerInfo &DCI) const { | ||||||
10618 | SelectionDAG &DAG = DCI.DAG; | ||||||
10619 | EVT VT = N->getValueType(0); | ||||||
10620 | SDLoc SL(N); | ||||||
10621 | |||||||
10622 | if (!Subtarget->hasDot7Insts() || VT != MVT::f32) | ||||||
10623 | return SDValue(); | ||||||
10624 | |||||||
10625 | // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) -> | ||||||
10626 | // FDOT2((V2F16)S0, (V2F16)S1, (F32)z)) | ||||||
10627 | SDValue Op1 = N->getOperand(0); | ||||||
10628 | SDValue Op2 = N->getOperand(1); | ||||||
10629 | SDValue FMA = N->getOperand(2); | ||||||
10630 | |||||||
10631 | if (FMA.getOpcode() != ISD::FMA || | ||||||
10632 | Op1.getOpcode() != ISD::FP_EXTEND || | ||||||
10633 | Op2.getOpcode() != ISD::FP_EXTEND) | ||||||
10634 | return SDValue(); | ||||||
10635 | |||||||
10636 | // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero, | ||||||
10637 | // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract | ||||||
10638 | // is sufficient to allow generaing fdot2. | ||||||
10639 | const TargetOptions &Options = DAG.getTarget().Options; | ||||||
10640 | if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || | ||||||
10641 | (N->getFlags().hasAllowContract() && | ||||||
10642 | FMA->getFlags().hasAllowContract())) { | ||||||
10643 | Op1 = Op1.getOperand(0); | ||||||
10644 | Op2 = Op2.getOperand(0); | ||||||
10645 | if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | ||||||
10646 | Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) | ||||||
10647 | return SDValue(); | ||||||
10648 | |||||||
10649 | SDValue Vec1 = Op1.getOperand(0); | ||||||
10650 | SDValue Idx1 = Op1.getOperand(1); | ||||||
10651 | SDValue Vec2 = Op2.getOperand(0); | ||||||
10652 | |||||||
10653 | SDValue FMAOp1 = FMA.getOperand(0); | ||||||
10654 | SDValue FMAOp2 = FMA.getOperand(1); | ||||||
10655 | SDValue FMAAcc = FMA.getOperand(2); | ||||||
10656 | |||||||
10657 | if (FMAOp1.getOpcode() != ISD::FP_EXTEND || | ||||||
10658 | FMAOp2.getOpcode() != ISD::FP_EXTEND) | ||||||
10659 | return SDValue(); | ||||||
10660 | |||||||
10661 | FMAOp1 = FMAOp1.getOperand(0); | ||||||
10662 | FMAOp2 = FMAOp2.getOperand(0); | ||||||
10663 | if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | ||||||
10664 | FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) | ||||||
10665 | return SDValue(); | ||||||
10666 | |||||||
10667 | SDValue Vec3 = FMAOp1.getOperand(0); | ||||||
10668 | SDValue Vec4 = FMAOp2.getOperand(0); | ||||||
10669 | SDValue Idx2 = FMAOp1.getOperand(1); | ||||||
10670 | |||||||
10671 | if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) || | ||||||
10672 | // Idx1 and Idx2 cannot be the same. | ||||||
10673 | Idx1 == Idx2) | ||||||
10674 | return SDValue(); | ||||||
10675 | |||||||
10676 | if (Vec1 == Vec2 || Vec3 == Vec4) | ||||||
10677 | return SDValue(); | ||||||
10678 | |||||||
10679 | if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16) | ||||||
10680 | return SDValue(); | ||||||
10681 | |||||||
10682 | if ((Vec1 == Vec3 && Vec2 == Vec4) || | ||||||
10683 | (Vec1 == Vec4 && Vec2 == Vec3)) { | ||||||
10684 | return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc, | ||||||
10685 | DAG.getTargetConstant(0, SL, MVT::i1)); | ||||||
10686 | } | ||||||
10687 | } | ||||||
10688 | return SDValue(); | ||||||
10689 | } | ||||||
10690 | |||||||
10691 | SDValue SITargetLowering::performSetCCCombine(SDNode *N, | ||||||
10692 | DAGCombinerInfo &DCI) const { | ||||||
10693 | SelectionDAG &DAG = DCI.DAG; | ||||||
10694 | SDLoc SL(N); | ||||||
10695 | |||||||
10696 | SDValue LHS = N->getOperand(0); | ||||||
10697 | SDValue RHS = N->getOperand(1); | ||||||
10698 | EVT VT = LHS.getValueType(); | ||||||
10699 | ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); | ||||||
10700 | |||||||
10701 | auto CRHS = dyn_cast<ConstantSDNode>(RHS); | ||||||
10702 | if (!CRHS) { | ||||||
10703 | CRHS = dyn_cast<ConstantSDNode>(LHS); | ||||||
10704 | if (CRHS) { | ||||||
10705 | std::swap(LHS, RHS); | ||||||
10706 | CC = getSetCCSwappedOperands(CC); | ||||||
10707 | } | ||||||
10708 | } | ||||||
10709 | |||||||
10710 | if (CRHS) { | ||||||
10711 | if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND && | ||||||
10712 | isBoolSGPR(LHS.getOperand(0))) { | ||||||
10713 | // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1 | ||||||
10714 | // setcc (sext from i1 cc), -1, eq|sle|uge) => cc | ||||||
10715 | // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1 | ||||||
10716 | // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc | ||||||
10717 | if ((CRHS->isAllOnesValue() && | ||||||
10718 | (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) || | ||||||
10719 | (CRHS->isNullValue() && | ||||||
10720 | (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE))) | ||||||
10721 | return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), | ||||||
10722 | DAG.getConstant(-1, SL, MVT::i1)); | ||||||
10723 | if ((CRHS->isAllOnesValue() && | ||||||
10724 | (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) || | ||||||
10725 | (CRHS->isNullValue() && | ||||||
10726 | (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT))) | ||||||
10727 | return LHS.getOperand(0); | ||||||
10728 | } | ||||||
10729 | |||||||
10730 | uint64_t CRHSVal = CRHS->getZExtValue(); | ||||||
10731 | if ((CC == ISD::SETEQ || CC == ISD::SETNE) && | ||||||
10732 | LHS.getOpcode() == ISD::SELECT && | ||||||
10733 | isa<ConstantSDNode>(LHS.getOperand(1)) && | ||||||
10734 | isa<ConstantSDNode>(LHS.getOperand(2)) && | ||||||
10735 | LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) && | ||||||
10736 | isBoolSGPR(LHS.getOperand(0))) { | ||||||
10737 | // Given CT != FT: | ||||||
10738 | // setcc (select cc, CT, CF), CF, eq => xor cc, -1 | ||||||
10739 | // setcc (select cc, CT, CF), CF, ne => cc | ||||||
10740 | // setcc (select cc, CT, CF), CT, ne => xor cc, -1 | ||||||
10741 | // setcc (select cc, CT, CF), CT, eq => cc | ||||||
10742 | uint64_t CT = LHS.getConstantOperandVal(1); | ||||||
10743 | uint64_t CF = LHS.getConstantOperandVal(2); | ||||||
10744 | |||||||
10745 | if ((CF == CRHSVal && CC == ISD::SETEQ) || | ||||||
10746 | (CT == CRHSVal && CC == ISD::SETNE)) | ||||||
10747 | return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), | ||||||
10748 | DAG.getConstant(-1, SL, MVT::i1)); | ||||||
10749 | if ((CF == CRHSVal && CC == ISD::SETNE) || | ||||||
10750 | (CT == CRHSVal && CC == ISD::SETEQ)) | ||||||
10751 | return LHS.getOperand(0); | ||||||
10752 | } | ||||||
10753 | } | ||||||
10754 | |||||||
10755 | if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() && | ||||||
10756 | VT != MVT::f16)) | ||||||
10757 | return SDValue(); | ||||||
10758 | |||||||
10759 | // Match isinf/isfinite pattern | ||||||
10760 | // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) | ||||||
10761 | // (fcmp one (fabs x), inf) -> (fp_class x, | ||||||
10762 | // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero) | ||||||
10763 | if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) { | ||||||
10764 | const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); | ||||||
10765 | if (!CRHS) | ||||||
10766 | return SDValue(); | ||||||
10767 | |||||||
10768 | const APFloat &APF = CRHS->getValueAPF(); | ||||||
10769 | if (APF.isInfinity() && !APF.isNegative()) { | ||||||
10770 | const unsigned IsInfMask = SIInstrFlags::P_INFINITY | | ||||||
10771 | SIInstrFlags::N_INFINITY; | ||||||
10772 | const unsigned IsFiniteMask = SIInstrFlags::N_ZERO | | ||||||
10773 | SIInstrFlags::P_ZERO | | ||||||
10774 | SIInstrFlags::N_NORMAL | | ||||||
10775 | SIInstrFlags::P_NORMAL | | ||||||
10776 | SIInstrFlags::N_SUBNORMAL | | ||||||
10777 | SIInstrFlags::P_SUBNORMAL; | ||||||
10778 | unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask; | ||||||
10779 | return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), | ||||||
10780 | DAG.getConstant(Mask, SL, MVT::i32)); | ||||||
10781 | } | ||||||
10782 | } | ||||||
10783 | |||||||
10784 | return SDValue(); | ||||||
10785 | } | ||||||
10786 | |||||||
10787 | SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N, | ||||||
10788 | DAGCombinerInfo &DCI) const { | ||||||
10789 | SelectionDAG &DAG = DCI.DAG; | ||||||
10790 | SDLoc SL(N); | ||||||
10791 | unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; | ||||||
10792 | |||||||
10793 | SDValue Src = N->getOperand(0); | ||||||
10794 | SDValue Shift = N->getOperand(0); | ||||||
10795 | |||||||
10796 | // TODO: Extend type shouldn't matter (assuming legal types). | ||||||
10797 | if (Shift.getOpcode() == ISD::ZERO_EXTEND) | ||||||
10798 | Shift = Shift.getOperand(0); | ||||||
10799 | |||||||
10800 | if (Shift.getOpcode() == ISD::SRL || Shift.getOpcode() == ISD::SHL) { | ||||||
10801 | // cvt_f32_ubyte1 (shl x, 8) -> cvt_f32_ubyte0 x | ||||||
10802 | // cvt_f32_ubyte3 (shl x, 16) -> cvt_f32_ubyte1 x | ||||||
10803 | // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x | ||||||
10804 | // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x | ||||||
10805 | // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x | ||||||
10806 | if (auto *C = dyn_cast<ConstantSDNode>(Shift.getOperand(1))) { | ||||||
10807 | Shift = DAG.getZExtOrTrunc(Shift.getOperand(0), | ||||||
10808 | SDLoc(Shift.getOperand(0)), MVT::i32); | ||||||
10809 | |||||||
10810 | unsigned ShiftOffset = 8 * Offset; | ||||||
10811 | if (Shift.getOpcode() == ISD::SHL) | ||||||
10812 | ShiftOffset -= C->getZExtValue(); | ||||||
10813 | else | ||||||
10814 | ShiftOffset += C->getZExtValue(); | ||||||
10815 | |||||||
10816 | if (ShiftOffset < 32 && (ShiftOffset % 8) == 0) { | ||||||
10817 | return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + ShiftOffset / 8, SL, | ||||||
10818 | MVT::f32, Shift); | ||||||
10819 | } | ||||||
10820 | } | ||||||
10821 | } | ||||||
10822 | |||||||
10823 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | ||||||
10824 | APInt DemandedBits = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); | ||||||
10825 | if (TLI.SimplifyDemandedBits(Src, DemandedBits, DCI)) { | ||||||
10826 | // We simplified Src. If this node is not dead, visit it again so it is | ||||||
10827 | // folded properly. | ||||||
10828 | if (N->getOpcode() != ISD::DELETED_NODE) | ||||||
10829 | DCI.AddToWorklist(N); | ||||||
10830 | return SDValue(N, 0); | ||||||
10831 | } | ||||||
10832 | |||||||
10833 | // Handle (or x, (srl y, 8)) pattern when known bits are zero. | ||||||
10834 | if (SDValue DemandedSrc = | ||||||
10835 | TLI.SimplifyMultipleUseDemandedBits(Src, DemandedBits, DAG)) | ||||||
10836 | return DAG.getNode(N->getOpcode(), SL, MVT::f32, DemandedSrc); | ||||||
10837 | |||||||
10838 | return SDValue(); | ||||||
10839 | } | ||||||
10840 | |||||||
10841 | SDValue SITargetLowering::performClampCombine(SDNode *N, | ||||||
10842 | DAGCombinerInfo &DCI) const { | ||||||
10843 | ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); | ||||||
10844 | if (!CSrc) | ||||||
10845 | return SDValue(); | ||||||
10846 | |||||||
10847 | const MachineFunction &MF = DCI.DAG.getMachineFunction(); | ||||||
10848 | const APFloat &F = CSrc->getValueAPF(); | ||||||
10849 | APFloat Zero = APFloat::getZero(F.getSemantics()); | ||||||
10850 | if (F < Zero || | ||||||
10851 | (F.isNaN() && MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) { | ||||||
10852 | return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0)); | ||||||
10853 | } | ||||||
10854 | |||||||
10855 | APFloat One(F.getSemantics(), "1.0"); | ||||||
10856 | if (F > One) | ||||||
10857 | return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0)); | ||||||
10858 | |||||||
10859 | return SDValue(CSrc, 0); | ||||||
10860 | } | ||||||
10861 | |||||||
10862 | |||||||
10863 | SDValue SITargetLowering::PerformDAGCombine(SDNode *N, | ||||||
10864 | DAGCombinerInfo &DCI) const { | ||||||
10865 | if (getTargetMachine().getOptLevel() == CodeGenOpt::None) | ||||||
| |||||||
10866 | return SDValue(); | ||||||
10867 | switch (N->getOpcode()) { | ||||||
10868 | case ISD::ADD: | ||||||
10869 | return performAddCombine(N, DCI); | ||||||
10870 | case ISD::SUB: | ||||||
10871 | return performSubCombine(N, DCI); | ||||||
10872 | case ISD::ADDCARRY: | ||||||
10873 | case ISD::SUBCARRY: | ||||||
10874 | return performAddCarrySubCarryCombine(N, DCI); | ||||||
10875 | case ISD::FADD: | ||||||
10876 | return performFAddCombine(N, DCI); | ||||||
10877 | case ISD::FSUB: | ||||||
10878 | return performFSubCombine(N, DCI); | ||||||
10879 | case ISD::SETCC: | ||||||
10880 | return performSetCCCombine(N, DCI); | ||||||
10881 | case ISD::FMAXNUM: | ||||||
10882 | case ISD::FMINNUM: | ||||||
10883 | case ISD::FMAXNUM_IEEE: | ||||||
10884 | case ISD::FMINNUM_IEEE: | ||||||
10885 | case ISD::SMAX: | ||||||
10886 | case ISD::SMIN: | ||||||
10887 | case ISD::UMAX: | ||||||
10888 | case ISD::UMIN: | ||||||
10889 | case AMDGPUISD::FMIN_LEGACY: | ||||||
10890 | case AMDGPUISD::FMAX_LEGACY: | ||||||
10891 | return performMinMaxCombine(N, DCI); | ||||||
10892 | case ISD::FMA: | ||||||
10893 | return performFMACombine(N, DCI); | ||||||
10894 | case ISD::AND: | ||||||
10895 | return performAndCombine(N, DCI); | ||||||
10896 | case ISD::OR: | ||||||
10897 | return performOrCombine(N, DCI); | ||||||
10898 | case ISD::XOR: | ||||||
10899 | return performXorCombine(N, DCI); | ||||||
10900 | case ISD::ZERO_EXTEND: | ||||||
10901 | return performZeroExtendCombine(N, DCI); | ||||||
10902 | case ISD::SIGN_EXTEND_INREG: | ||||||
10903 | return performSignExtendInRegCombine(N , DCI); | ||||||
10904 | case AMDGPUISD::FP_CLASS: | ||||||
10905 | return performClassCombine(N, DCI); | ||||||
10906 | case ISD::FCANONICALIZE: | ||||||
10907 | return performFCanonicalizeCombine(N, DCI); | ||||||
10908 | case AMDGPUISD::RCP: | ||||||
10909 | return performRcpCombine(N, DCI); | ||||||
10910 | case AMDGPUISD::FRACT: | ||||||
10911 | case AMDGPUISD::RSQ: | ||||||
10912 | case AMDGPUISD::RCP_LEGACY: | ||||||
10913 | case AMDGPUISD::RCP_IFLAG: | ||||||
10914 | case AMDGPUISD::RSQ_CLAMP: | ||||||
10915 | case AMDGPUISD::LDEXP: { | ||||||
10916 | // FIXME: This is probably wrong. If src is an sNaN, it won't be quieted | ||||||
10917 | SDValue Src = N->getOperand(0); | ||||||
10918 | if (Src.isUndef()) | ||||||
10919 | return Src; | ||||||
10920 | break; | ||||||
10921 | } | ||||||
10922 | case ISD::SINT_TO_FP: | ||||||
10923 | case ISD::UINT_TO_FP: | ||||||
10924 | return performUCharToFloatCombine(N, DCI); | ||||||
10925 | case AMDGPUISD::CVT_F32_UBYTE0: | ||||||
10926 | case AMDGPUISD::CVT_F32_UBYTE1: | ||||||
10927 | case AMDGPUISD::CVT_F32_UBYTE2: | ||||||
10928 | case AMDGPUISD::CVT_F32_UBYTE3: | ||||||
10929 | return performCvtF32UByteNCombine(N, DCI); | ||||||
10930 | case AMDGPUISD::FMED3: | ||||||
10931 | return performFMed3Combine(N, DCI); | ||||||
10932 | case AMDGPUISD::CVT_PKRTZ_F16_F32: | ||||||
10933 | return performCvtPkRTZCombine(N, DCI); | ||||||
10934 | case AMDGPUISD::CLAMP: | ||||||
10935 | return performClampCombine(N, DCI); | ||||||
10936 | case ISD::SCALAR_TO_VECTOR: { | ||||||
10937 | SelectionDAG &DAG = DCI.DAG; | ||||||
10938 | EVT VT = N->getValueType(0); | ||||||
10939 | |||||||
10940 | // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x)) | ||||||
10941 | if (VT == MVT::v2i16 || VT == MVT::v2f16) { | ||||||
10942 | SDLoc SL(N); | ||||||
10943 | SDValue Src = N->getOperand(0); | ||||||
10944 | EVT EltVT = Src.getValueType(); | ||||||
10945 | if (EltVT == MVT::f16) | ||||||
10946 | Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src); | ||||||
10947 | |||||||
10948 | SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src); | ||||||
10949 | return DAG.getNode(ISD::BITCAST, SL, VT, Ext); | ||||||
10950 | } | ||||||
10951 | |||||||
10952 | break; | ||||||
10953 | } | ||||||
10954 | case ISD::EXTRACT_VECTOR_ELT: | ||||||
10955 | return performExtractVectorEltCombine(N, DCI); | ||||||
10956 | case ISD::INSERT_VECTOR_ELT: | ||||||
10957 | return performInsertVectorEltCombine(N, DCI); | ||||||
10958 | case ISD::LOAD: { | ||||||
10959 | if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI)) | ||||||
10960 | return Widended; | ||||||
10961 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||||
10962 | } | ||||||
10963 | default: { | ||||||
10964 | if (!DCI.isBeforeLegalize()) { | ||||||
10965 | if (MemSDNode *MemNode = dyn_cast<MemSDNode>(N)) | ||||||
10966 | return performMemSDNodeCombine(MemNode, DCI); | ||||||
10967 | } | ||||||
10968 | |||||||
10969 | break; | ||||||
10970 | } | ||||||
10971 | } | ||||||
10972 | |||||||
10973 | return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); | ||||||
10974 | } | ||||||
10975 | |||||||
10976 | /// Helper function for adjustWritemask | ||||||
10977 | static unsigned SubIdx2Lane(unsigned Idx) { | ||||||
10978 | switch (Idx) { | ||||||
10979 | default: return ~0u; | ||||||
10980 | case AMDGPU::sub0: return 0; | ||||||
10981 | case AMDGPU::sub1: return 1; | ||||||
10982 | case AMDGPU::sub2: return 2; | ||||||
10983 | case AMDGPU::sub3: return 3; | ||||||
10984 | case AMDGPU::sub4: return 4; // Possible with TFE/LWE | ||||||
10985 | } | ||||||
10986 | } | ||||||
10987 | |||||||
10988 | /// Adjust the writemask of MIMG instructions | ||||||
10989 | SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node, | ||||||
10990 | SelectionDAG &DAG) const { | ||||||
10991 | unsigned Opcode = Node->getMachineOpcode(); | ||||||
10992 | |||||||
10993 | // Subtract 1 because the vdata output is not a MachineSDNode operand. | ||||||
10994 | int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1; | ||||||
10995 | if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx)) | ||||||
10996 | return Node; // not implemented for D16 | ||||||
10997 | |||||||
10998 | SDNode *Users[5] = { nullptr }; | ||||||
10999 | unsigned Lane = 0; | ||||||
11000 | unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1; | ||||||
11001 | unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); | ||||||
11002 | unsigned NewDmask = 0; | ||||||
11003 | unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1; | ||||||
11004 | unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1; | ||||||
11005 | bool UsesTFC = ((int(TFEIdx) >= 0 && Node->getConstantOperandVal(TFEIdx)) || | ||||||
11006 | Node->getConstantOperandVal(LWEIdx)) ? 1 : 0; | ||||||
11007 | unsigned TFCLane = 0; | ||||||
11008 | bool HasChain = Node->getNumValues() > 1; | ||||||
11009 | |||||||
11010 | if (OldDmask == 0) { | ||||||
11011 | // These are folded out, but on the chance it happens don't assert. | ||||||
11012 | return Node; | ||||||
11013 | } | ||||||
11014 | |||||||
11015 | unsigned OldBitsSet = countPopulation(OldDmask); | ||||||
11016 | // Work out which is the TFE/LWE lane if that is enabled. | ||||||
11017 | if (UsesTFC) { | ||||||
11018 | TFCLane = OldBitsSet; | ||||||
11019 | } | ||||||
11020 | |||||||
11021 | // Try to figure out the used register components | ||||||
11022 | for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); | ||||||
11023 | I != E; ++I) { | ||||||
11024 | |||||||
11025 | // Don't look at users of the chain. | ||||||
11026 | if (I.getUse().getResNo() != 0) | ||||||
11027 | continue; | ||||||
11028 | |||||||
11029 | // Abort if we can't understand the usage | ||||||
11030 | if (!I->isMachineOpcode() || | ||||||
11031 | I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) | ||||||
11032 | return Node; | ||||||
11033 | |||||||
11034 | // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used. | ||||||
11035 | // Note that subregs are packed, i.e. Lane==0 is the first bit set | ||||||
11036 | // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit | ||||||
11037 | // set, etc. | ||||||
11038 | Lane = SubIdx2Lane(I->getConstantOperandVal(1)); | ||||||
11039 | if (Lane == ~0u) | ||||||
11040 | return Node; | ||||||
11041 | |||||||
11042 | // Check if the use is for the TFE/LWE generated result at VGPRn+1. | ||||||
11043 | if (UsesTFC && Lane == TFCLane) { | ||||||
11044 | Users[Lane] = *I; | ||||||
11045 | } else { | ||||||
11046 | // Set which texture component corresponds to the lane. | ||||||
11047 | unsigned Comp; | ||||||
11048 | for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) { | ||||||
11049 | Comp = countTrailingZeros(Dmask); | ||||||
11050 | Dmask &= ~(1 << Comp); | ||||||
11051 | } | ||||||
11052 | |||||||
11053 | // Abort if we have more than one user per component. | ||||||
11054 | if (Users[Lane]) | ||||||
11055 | return Node; | ||||||
11056 | |||||||
11057 | Users[Lane] = *I; | ||||||
11058 | NewDmask |= 1 << Comp; | ||||||
11059 | } | ||||||
11060 | } | ||||||
11061 | |||||||
11062 | // Don't allow 0 dmask, as hardware assumes one channel enabled. | ||||||
11063 | bool NoChannels = !NewDmask; | ||||||
11064 | if (NoChannels) { | ||||||
11065 | if (!UsesTFC) { | ||||||
11066 | // No uses of the result and not using TFC. Then do nothing. | ||||||
11067 | return Node; | ||||||
11068 | } | ||||||
11069 | // If the original dmask has one channel - then nothing to do | ||||||
11070 | if (OldBitsSet == 1) | ||||||
11071 | return Node; | ||||||
11072 | // Use an arbitrary dmask - required for the instruction to work | ||||||
11073 | NewDmask = 1; | ||||||
11074 | } | ||||||
11075 | // Abort if there's no change | ||||||
11076 | if (NewDmask == OldDmask) | ||||||
11077 | return Node; | ||||||
11078 | |||||||
11079 | unsigned BitsSet = countPopulation(NewDmask); | ||||||
11080 | |||||||
11081 | // Check for TFE or LWE - increase the number of channels by one to account | ||||||
11082 | // for the extra return value | ||||||
11083 | // This will need adjustment for D16 if this is also included in | ||||||
11084 | // adjustWriteMask (this function) but at present D16 are excluded. | ||||||
11085 | unsigned NewChannels = BitsSet + UsesTFC; | ||||||
11086 | |||||||
11087 | int NewOpcode = | ||||||
11088 | AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels); | ||||||
11089 | assert(NewOpcode != -1 &&(static_cast <bool> (NewOpcode != -1 && NewOpcode != static_cast<int>(Node->getMachineOpcode()) && "failed to find equivalent MIMG op") ? void (0) : __assert_fail ("NewOpcode != -1 && NewOpcode != static_cast<int>(Node->getMachineOpcode()) && \"failed to find equivalent MIMG op\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 11091, __extension__ __PRETTY_FUNCTION__)) | ||||||
11090 | NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&(static_cast <bool> (NewOpcode != -1 && NewOpcode != static_cast<int>(Node->getMachineOpcode()) && "failed to find equivalent MIMG op") ? void (0) : __assert_fail ("NewOpcode != -1 && NewOpcode != static_cast<int>(Node->getMachineOpcode()) && \"failed to find equivalent MIMG op\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 11091, __extension__ __PRETTY_FUNCTION__)) | ||||||
11091 | "failed to find equivalent MIMG op")(static_cast <bool> (NewOpcode != -1 && NewOpcode != static_cast<int>(Node->getMachineOpcode()) && "failed to find equivalent MIMG op") ? void (0) : __assert_fail ("NewOpcode != -1 && NewOpcode != static_cast<int>(Node->getMachineOpcode()) && \"failed to find equivalent MIMG op\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 11091, __extension__ __PRETTY_FUNCTION__)); | ||||||
11092 | |||||||
11093 | // Adjust the writemask in the node | ||||||
11094 | SmallVector<SDValue, 12> Ops; | ||||||
11095 | Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); | ||||||
11096 | Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); | ||||||
11097 | Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); | ||||||
11098 | |||||||
11099 | MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT(); | ||||||
11100 | |||||||
11101 | MVT ResultVT = NewChannels == 1 ? | ||||||
11102 | SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 : | ||||||
11103 | NewChannels == 5 ? 8 : NewChannels); | ||||||
11104 | SDVTList NewVTList = HasChain ? | ||||||
11105 | DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT); | ||||||
11106 | |||||||
11107 | |||||||
11108 | MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node), | ||||||
11109 | NewVTList, Ops); | ||||||
11110 | |||||||
11111 | if (HasChain) { | ||||||
11112 | // Update chain. | ||||||
11113 | DAG.setNodeMemRefs(NewNode, Node->memoperands()); | ||||||
11114 | DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1)); | ||||||
11115 | } | ||||||
11116 | |||||||
11117 | if (NewChannels == 1) { | ||||||
11118 | assert(Node->hasNUsesOfValue(1, 0))(static_cast <bool> (Node->hasNUsesOfValue(1, 0)) ? void (0) : __assert_fail ("Node->hasNUsesOfValue(1, 0)", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 11118, __extension__ __PRETTY_FUNCTION__)); | ||||||
11119 | SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY, | ||||||
11120 | SDLoc(Node), Users[Lane]->getValueType(0), | ||||||
11121 | SDValue(NewNode, 0)); | ||||||
11122 | DAG.ReplaceAllUsesWith(Users[Lane], Copy); | ||||||
11123 | return nullptr; | ||||||
11124 | } | ||||||
11125 | |||||||
11126 | // Update the users of the node with the new indices | ||||||
11127 | for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) { | ||||||
11128 | SDNode *User = Users[i]; | ||||||
11129 | if (!User) { | ||||||
11130 | // Handle the special case of NoChannels. We set NewDmask to 1 above, but | ||||||
11131 | // Users[0] is still nullptr because channel 0 doesn't really have a use. | ||||||
11132 | if (i || !NoChannels) | ||||||
11133 | continue; | ||||||
11134 | } else { | ||||||
11135 | SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); | ||||||
11136 | DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op); | ||||||
11137 | } | ||||||
11138 | |||||||
11139 | switch (Idx) { | ||||||
11140 | default: break; | ||||||
11141 | case AMDGPU::sub0: Idx = AMDGPU::sub1; break; | ||||||
11142 | case AMDGPU::sub1: Idx = AMDGPU::sub2; break; | ||||||
11143 | case AMDGPU::sub2: Idx = AMDGPU::sub3; break; | ||||||
11144 | case AMDGPU::sub3: Idx = AMDGPU::sub4; break; | ||||||
11145 | } | ||||||
11146 | } | ||||||
11147 | |||||||
11148 | DAG.RemoveDeadNode(Node); | ||||||
11149 | return nullptr; | ||||||
11150 | } | ||||||
11151 | |||||||
11152 | static bool isFrameIndexOp(SDValue Op) { | ||||||
11153 | if (Op.getOpcode() == ISD::AssertZext) | ||||||
11154 | Op = Op.getOperand(0); | ||||||
11155 | |||||||
11156 | return isa<FrameIndexSDNode>(Op); | ||||||
11157 | } | ||||||
11158 | |||||||
11159 | /// Legalize target independent instructions (e.g. INSERT_SUBREG) | ||||||
11160 | /// with frame index operands. | ||||||
11161 | /// LLVM assumes that inputs are to these instructions are registers. | ||||||
11162 | SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, | ||||||
11163 | SelectionDAG &DAG) const { | ||||||
11164 | if (Node->getOpcode() == ISD::CopyToReg) { | ||||||
11165 | RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1)); | ||||||
11166 | SDValue SrcVal = Node->getOperand(2); | ||||||
11167 | |||||||
11168 | // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have | ||||||
11169 | // to try understanding copies to physical registers. | ||||||
11170 | if (SrcVal.getValueType() == MVT::i1 && DestReg->getReg().isPhysical()) { | ||||||
11171 | SDLoc SL(Node); | ||||||
11172 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); | ||||||
11173 | SDValue VReg = DAG.getRegister( | ||||||
11174 | MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1); | ||||||
11175 | |||||||
11176 | SDNode *Glued = Node->getGluedNode(); | ||||||
11177 | SDValue ToVReg | ||||||
11178 | = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal, | ||||||
11179 | SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0)); | ||||||
11180 | SDValue ToResultReg | ||||||
11181 | = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0), | ||||||
11182 | VReg, ToVReg.getValue(1)); | ||||||
11183 | DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode()); | ||||||
11184 | DAG.RemoveDeadNode(Node); | ||||||
11185 | return ToResultReg.getNode(); | ||||||
11186 | } | ||||||
11187 | } | ||||||
11188 | |||||||
11189 | SmallVector<SDValue, 8> Ops; | ||||||
11190 | for (unsigned i = 0; i < Node->getNumOperands(); ++i) { | ||||||
11191 | if (!isFrameIndexOp(Node->getOperand(i))) { | ||||||
11192 | Ops.push_back(Node->getOperand(i)); | ||||||
11193 | continue; | ||||||
11194 | } | ||||||
11195 | |||||||
11196 | SDLoc DL(Node); | ||||||
11197 | Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, | ||||||
11198 | Node->getOperand(i).getValueType(), | ||||||
11199 | Node->getOperand(i)), 0)); | ||||||
11200 | } | ||||||
11201 | |||||||
11202 | return DAG.UpdateNodeOperands(Node, Ops); | ||||||
11203 | } | ||||||
11204 | |||||||
11205 | /// Fold the instructions after selecting them. | ||||||
11206 | /// Returns null if users were already updated. | ||||||
11207 | SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, | ||||||
11208 | SelectionDAG &DAG) const { | ||||||
11209 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | ||||||
11210 | unsigned Opcode = Node->getMachineOpcode(); | ||||||
11211 | |||||||
11212 | if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && | ||||||
11213 | !TII->isGather4(Opcode) && | ||||||
11214 | AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) != -1) { | ||||||
11215 | return adjustWritemask(Node, DAG); | ||||||
11216 | } | ||||||
11217 | |||||||
11218 | if (Opcode == AMDGPU::INSERT_SUBREG || | ||||||
11219 | Opcode == AMDGPU::REG_SEQUENCE) { | ||||||
11220 | legalizeTargetIndependentNode(Node, DAG); | ||||||
11221 | return Node; | ||||||
11222 | } | ||||||
11223 | |||||||
11224 | switch (Opcode) { | ||||||
11225 | case AMDGPU::V_DIV_SCALE_F32_e64: | ||||||
11226 | case AMDGPU::V_DIV_SCALE_F64_e64: { | ||||||
11227 | // Satisfy the operand register constraint when one of the inputs is | ||||||
11228 | // undefined. Ordinarily each undef value will have its own implicit_def of | ||||||
11229 | // a vreg, so force these to use a single register. | ||||||
11230 | SDValue Src0 = Node->getOperand(1); | ||||||
11231 | SDValue Src1 = Node->getOperand(3); | ||||||
11232 | SDValue Src2 = Node->getOperand(5); | ||||||
11233 | |||||||
11234 | if ((Src0.isMachineOpcode() && | ||||||
11235 | Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) && | ||||||
11236 | (Src0 == Src1 || Src0 == Src2)) | ||||||
11237 | break; | ||||||
11238 | |||||||
11239 | MVT VT = Src0.getValueType().getSimpleVT(); | ||||||
11240 | const TargetRegisterClass *RC = | ||||||
11241 | getRegClassFor(VT, Src0.getNode()->isDivergent()); | ||||||
11242 | |||||||
11243 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); | ||||||
11244 | SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT); | ||||||
11245 | |||||||
11246 | SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node), | ||||||
11247 | UndefReg, Src0, SDValue()); | ||||||
11248 | |||||||
11249 | // src0 must be the same register as src1 or src2, even if the value is | ||||||
11250 | // undefined, so make sure we don't violate this constraint. | ||||||
11251 | if (Src0.isMachineOpcode() && | ||||||
11252 | Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) { | ||||||
11253 | if (Src1.isMachineOpcode() && | ||||||
11254 | Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) | ||||||
11255 | Src0 = Src1; | ||||||
11256 | else if (Src2.isMachineOpcode() && | ||||||
11257 | Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) | ||||||
11258 | Src0 = Src2; | ||||||
11259 | else { | ||||||
11260 | assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF)(static_cast <bool> (Src1.getMachineOpcode() == AMDGPU:: IMPLICIT_DEF) ? void (0) : __assert_fail ("Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 11260, __extension__ __PRETTY_FUNCTION__)); | ||||||
11261 | Src0 = UndefReg; | ||||||
11262 | Src1 = UndefReg; | ||||||
11263 | } | ||||||
11264 | } else | ||||||
11265 | break; | ||||||
11266 | |||||||
11267 | SmallVector<SDValue, 9> Ops(Node->op_begin(), Node->op_end()); | ||||||
11268 | Ops[1] = Src0; | ||||||
11269 | Ops[3] = Src1; | ||||||
11270 | Ops[5] = Src2; | ||||||
11271 | Ops.push_back(ImpDef.getValue(1)); | ||||||
11272 | return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); | ||||||
11273 | } | ||||||
11274 | default: | ||||||
11275 | break; | ||||||
11276 | } | ||||||
11277 | |||||||
11278 | return Node; | ||||||
11279 | } | ||||||
11280 | |||||||
11281 | // Any MIMG instructions that use tfe or lwe require an initialization of the | ||||||
11282 | // result register that will be written in the case of a memory access failure. | ||||||
11283 | // The required code is also added to tie this init code to the result of the | ||||||
11284 | // img instruction. | ||||||
11285 | void SITargetLowering::AddIMGInit(MachineInstr &MI) const { | ||||||
11286 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | ||||||
11287 | const SIRegisterInfo &TRI = TII->getRegisterInfo(); | ||||||
11288 | MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); | ||||||
11289 | MachineBasicBlock &MBB = *MI.getParent(); | ||||||
11290 | |||||||
11291 | MachineOperand *TFE = TII->getNamedOperand(MI, AMDGPU::OpName::tfe); | ||||||
11292 | MachineOperand *LWE = TII->getNamedOperand(MI, AMDGPU::OpName::lwe); | ||||||
11293 | MachineOperand *D16 = TII->getNamedOperand(MI, AMDGPU::OpName::d16); | ||||||
11294 | |||||||
11295 | if (!TFE && !LWE) // intersect_ray | ||||||
11296 | return; | ||||||
11297 | |||||||
11298 | unsigned TFEVal = TFE ? TFE->getImm() : 0; | ||||||
11299 | unsigned LWEVal = LWE->getImm(); | ||||||
11300 | unsigned D16Val = D16 ? D16->getImm() : 0; | ||||||
11301 | |||||||
11302 | if (!TFEVal && !LWEVal) | ||||||
11303 | return; | ||||||
11304 | |||||||
11305 | // At least one of TFE or LWE are non-zero | ||||||
11306 | // We have to insert a suitable initialization of the result value and | ||||||
11307 | // tie this to the dest of the image instruction. | ||||||
11308 | |||||||
11309 | const DebugLoc &DL = MI.getDebugLoc(); | ||||||
11310 | |||||||
11311 | int DstIdx = | ||||||
11312 | AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); | ||||||
11313 | |||||||
11314 | // Calculate which dword we have to initialize to 0. | ||||||
11315 | MachineOperand *MO_Dmask = TII->getNamedOperand(MI, AMDGPU::OpName::dmask); | ||||||
11316 | |||||||
11317 | // check that dmask operand is found. | ||||||
11318 | assert(MO_Dmask && "Expected dmask operand in instruction")(static_cast <bool> (MO_Dmask && "Expected dmask operand in instruction" ) ? void (0) : __assert_fail ("MO_Dmask && \"Expected dmask operand in instruction\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 11318, __extension__ __PRETTY_FUNCTION__)); | ||||||
11319 | |||||||
11320 | unsigned dmask = MO_Dmask->getImm(); | ||||||
11321 | // Determine the number of active lanes taking into account the | ||||||
11322 | // Gather4 special case | ||||||
11323 | unsigned ActiveLanes = TII->isGather4(MI) ? 4 : countPopulation(dmask); | ||||||
11324 | |||||||
11325 | bool Packed = !Subtarget->hasUnpackedD16VMem(); | ||||||
11326 | |||||||
11327 | unsigned InitIdx = | ||||||
11328 | D16Val && Packed ? ((ActiveLanes + 1) >> 1) + 1 : ActiveLanes + 1; | ||||||
11329 | |||||||
11330 | // Abandon attempt if the dst size isn't large enough | ||||||
11331 | // - this is in fact an error but this is picked up elsewhere and | ||||||
11332 | // reported correctly. | ||||||
11333 | uint32_t DstSize = TRI.getRegSizeInBits(*TII->getOpRegClass(MI, DstIdx)) / 32; | ||||||
11334 | if (DstSize < InitIdx) | ||||||
11335 | return; | ||||||
11336 | |||||||
11337 | // Create a register for the intialization value. | ||||||
11338 | Register PrevDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx)); | ||||||
11339 | unsigned NewDst = 0; // Final initialized value will be in here | ||||||
11340 | |||||||
11341 | // If PRTStrictNull feature is enabled (the default) then initialize | ||||||
11342 | // all the result registers to 0, otherwise just the error indication | ||||||
11343 | // register (VGPRn+1) | ||||||
11344 | unsigned SizeLeft = Subtarget->usePRTStrictNull() ? InitIdx : 1; | ||||||
11345 | unsigned CurrIdx = Subtarget->usePRTStrictNull() ? 0 : (InitIdx - 1); | ||||||
11346 | |||||||
11347 | BuildMI(MBB, MI, DL, TII->get(AMDGPU::IMPLICIT_DEF), PrevDst); | ||||||
11348 | for (; SizeLeft; SizeLeft--, CurrIdx++) { | ||||||
11349 | NewDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx)); | ||||||
11350 | // Initialize dword | ||||||
11351 | Register SubReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | ||||||
11352 | BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), SubReg) | ||||||
11353 | .addImm(0); | ||||||
11354 | // Insert into the super-reg | ||||||
11355 | BuildMI(MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewDst) | ||||||
11356 | .addReg(PrevDst) | ||||||
11357 | .addReg(SubReg) | ||||||
11358 | .addImm(SIRegisterInfo::getSubRegFromChannel(CurrIdx)); | ||||||
11359 | |||||||
11360 | PrevDst = NewDst; | ||||||
11361 | } | ||||||
11362 | |||||||
11363 | // Add as an implicit operand | ||||||
11364 | MI.addOperand(MachineOperand::CreateReg(NewDst, false, true)); | ||||||
11365 | |||||||
11366 | // Tie the just added implicit operand to the dst | ||||||
11367 | MI.tieOperands(DstIdx, MI.getNumOperands() - 1); | ||||||
11368 | } | ||||||
11369 | |||||||
11370 | /// Assign the register class depending on the number of | ||||||
11371 | /// bits set in the writemask | ||||||
11372 | void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, | ||||||
11373 | SDNode *Node) const { | ||||||
11374 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | ||||||
11375 | |||||||
11376 | MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); | ||||||
11377 | |||||||
11378 | if (TII->isVOP3(MI.getOpcode())) { | ||||||
11379 | // Make sure constant bus requirements are respected. | ||||||
11380 | TII->legalizeOperandsVOP3(MRI, MI); | ||||||
11381 | |||||||
11382 | // Prefer VGPRs over AGPRs in mAI instructions where possible. | ||||||
11383 | // This saves a chain-copy of registers and better ballance register | ||||||
11384 | // use between vgpr and agpr as agpr tuples tend to be big. | ||||||
11385 | if (const MCOperandInfo *OpInfo = MI.getDesc().OpInfo) { | ||||||
11386 | unsigned Opc = MI.getOpcode(); | ||||||
11387 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); | ||||||
11388 | for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), | ||||||
11389 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) }) { | ||||||
11390 | if (I == -1) | ||||||
11391 | break; | ||||||
11392 | MachineOperand &Op = MI.getOperand(I); | ||||||
11393 | if ((OpInfo[I].RegClass != llvm::AMDGPU::AV_64RegClassID && | ||||||
11394 | OpInfo[I].RegClass != llvm::AMDGPU::AV_32RegClassID) || | ||||||
11395 | !Op.getReg().isVirtual() || !TRI->isAGPR(MRI, Op.getReg())) | ||||||
11396 | continue; | ||||||
11397 | auto *Src = MRI.getUniqueVRegDef(Op.getReg()); | ||||||
11398 | if (!Src || !Src->isCopy() || | ||||||
11399 | !TRI->isSGPRReg(MRI, Src->getOperand(1).getReg())) | ||||||
11400 | continue; | ||||||
11401 | auto *RC = TRI->getRegClassForReg(MRI, Op.getReg()); | ||||||
11402 | auto *NewRC = TRI->getEquivalentVGPRClass(RC); | ||||||
11403 | // All uses of agpr64 and agpr32 can also accept vgpr except for | ||||||
11404 | // v_accvgpr_read, but we do not produce agpr reads during selection, | ||||||
11405 | // so no use checks are needed. | ||||||
11406 | MRI.setRegClass(Op.getReg(), NewRC); | ||||||
11407 | } | ||||||
11408 | } | ||||||
11409 | |||||||
11410 | return; | ||||||
11411 | } | ||||||
11412 | |||||||
11413 | // Replace unused atomics with the no return version. | ||||||
11414 | int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); | ||||||
11415 | if (NoRetAtomicOp != -1) { | ||||||
11416 | if (!Node->hasAnyUseOfValue(0)) { | ||||||
11417 | int CPolIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), | ||||||
11418 | AMDGPU::OpName::cpol); | ||||||
11419 | if (CPolIdx != -1) { | ||||||
11420 | MachineOperand &CPol = MI.getOperand(CPolIdx); | ||||||
11421 | CPol.setImm(CPol.getImm() & ~AMDGPU::CPol::GLC); | ||||||
11422 | } | ||||||
11423 | MI.RemoveOperand(0); | ||||||
11424 | MI.setDesc(TII->get(NoRetAtomicOp)); | ||||||
11425 | return; | ||||||
11426 | } | ||||||
11427 | |||||||
11428 | // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg | ||||||
11429 | // instruction, because the return type of these instructions is a vec2 of | ||||||
11430 | // the memory type, so it can be tied to the input operand. | ||||||
11431 | // This means these instructions always have a use, so we need to add a | ||||||
11432 | // special case to check if the atomic has only one extract_subreg use, | ||||||
11433 | // which itself has no uses. | ||||||
11434 | if ((Node->hasNUsesOfValue(1, 0) && | ||||||
11435 | Node->use_begin()->isMachineOpcode() && | ||||||
11436 | Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && | ||||||
11437 | !Node->use_begin()->hasAnyUseOfValue(0))) { | ||||||
11438 | Register Def = MI.getOperand(0).getReg(); | ||||||
11439 | |||||||
11440 | // Change this into a noret atomic. | ||||||
11441 | MI.setDesc(TII->get(NoRetAtomicOp)); | ||||||
11442 | MI.RemoveOperand(0); | ||||||
11443 | |||||||
11444 | // If we only remove the def operand from the atomic instruction, the | ||||||
11445 | // extract_subreg will be left with a use of a vreg without a def. | ||||||
11446 | // So we need to insert an implicit_def to avoid machine verifier | ||||||
11447 | // errors. | ||||||
11448 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), | ||||||
11449 | TII->get(AMDGPU::IMPLICIT_DEF), Def); | ||||||
11450 | } | ||||||
11451 | return; | ||||||
11452 | } | ||||||
11453 | |||||||
11454 | if (TII->isMIMG(MI) && !MI.mayStore()) | ||||||
11455 | AddIMGInit(MI); | ||||||
11456 | } | ||||||
11457 | |||||||
11458 | static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, | ||||||
11459 | uint64_t Val) { | ||||||
11460 | SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); | ||||||
11461 | return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); | ||||||
11462 | } | ||||||
11463 | |||||||
11464 | MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, | ||||||
11465 | const SDLoc &DL, | ||||||
11466 | SDValue Ptr) const { | ||||||
11467 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | ||||||
11468 | |||||||
11469 | // Build the half of the subregister with the constants before building the | ||||||
11470 | // full 128-bit register. If we are building multiple resource descriptors, | ||||||
11471 | // this will allow CSEing of the 2-component register. | ||||||
11472 | const SDValue Ops0[] = { | ||||||
11473 | DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), | ||||||
11474 | buildSMovImm32(DAG, DL, 0), | ||||||
11475 | DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), | ||||||
11476 | buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), | ||||||
11477 | DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) | ||||||
11478 | }; | ||||||
11479 | |||||||
11480 | SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, | ||||||
11481 | MVT::v2i32, Ops0), 0); | ||||||
11482 | |||||||
11483 | // Combine the constants and the pointer. | ||||||
11484 | const SDValue Ops1[] = { | ||||||
11485 | DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32), | ||||||
11486 | Ptr, | ||||||
11487 | DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), | ||||||
11488 | SubRegHi, | ||||||
11489 | DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) | ||||||
11490 | }; | ||||||
11491 | |||||||
11492 | return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); | ||||||
11493 | } | ||||||
11494 | |||||||
11495 | /// Return a resource descriptor with the 'Add TID' bit enabled | ||||||
11496 | /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] | ||||||
11497 | /// of the resource descriptor) to create an offset, which is added to | ||||||
11498 | /// the resource pointer. | ||||||
11499 | MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, | ||||||
11500 | SDValue Ptr, uint32_t RsrcDword1, | ||||||
11501 | uint64_t RsrcDword2And3) const { | ||||||
11502 | SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); | ||||||
11503 | SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); | ||||||
11504 | if (RsrcDword1) { | ||||||
11505 | PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, | ||||||
11506 | DAG.getConstant(RsrcDword1, DL, MVT::i32)), | ||||||
11507 | 0); | ||||||
11508 | } | ||||||
11509 | |||||||
11510 | SDValue DataLo = buildSMovImm32(DAG, DL, | ||||||
11511 | RsrcDword2And3 & UINT64_C(0xFFFFFFFF)0xFFFFFFFFUL); | ||||||
11512 | SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); | ||||||
11513 | |||||||
11514 | const SDValue Ops[] = { | ||||||
11515 | DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32), | ||||||
11516 | PtrLo, | ||||||
11517 | DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), | ||||||
11518 | PtrHi, | ||||||
11519 | DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), | ||||||
11520 | DataLo, | ||||||
11521 | DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), | ||||||
11522 | DataHi, | ||||||
11523 | DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) | ||||||
11524 | }; | ||||||
11525 | |||||||
11526 | return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); | ||||||
11527 | } | ||||||
11528 | |||||||
11529 | //===----------------------------------------------------------------------===// | ||||||
11530 | // SI Inline Assembly Support | ||||||
11531 | //===----------------------------------------------------------------------===// | ||||||
11532 | |||||||
11533 | std::pair<unsigned, const TargetRegisterClass *> | ||||||
11534 | SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI_, | ||||||
11535 | StringRef Constraint, | ||||||
11536 | MVT VT) const { | ||||||
11537 | const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(TRI_); | ||||||
11538 | |||||||
11539 | const TargetRegisterClass *RC = nullptr; | ||||||
11540 | if (Constraint.size() == 1) { | ||||||
11541 | const unsigned BitWidth = VT.getSizeInBits(); | ||||||
11542 | switch (Constraint[0]) { | ||||||
11543 | default: | ||||||
11544 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); | ||||||
11545 | case 's': | ||||||
11546 | case 'r': | ||||||
11547 | switch (BitWidth) { | ||||||
11548 | case 16: | ||||||
11549 | RC = &AMDGPU::SReg_32RegClass; | ||||||
11550 | break; | ||||||
11551 | case 64: | ||||||
11552 | RC = &AMDGPU::SGPR_64RegClass; | ||||||
11553 | break; | ||||||
11554 | default: | ||||||
11555 | RC = SIRegisterInfo::getSGPRClassForBitWidth(BitWidth); | ||||||
11556 | if (!RC) | ||||||
11557 | return std::make_pair(0U, nullptr); | ||||||
11558 | break; | ||||||
11559 | } | ||||||
11560 | break; | ||||||
11561 | case 'v': | ||||||
11562 | switch (BitWidth) { | ||||||
11563 | case 16: | ||||||
11564 | RC = &AMDGPU::VGPR_32RegClass; | ||||||
11565 | break; | ||||||
11566 | default: | ||||||
11567 | RC = TRI->getVGPRClassForBitWidth(BitWidth); | ||||||
11568 | if (!RC) | ||||||
11569 | return std::make_pair(0U, nullptr); | ||||||
11570 | break; | ||||||
11571 | } | ||||||
11572 | break; | ||||||
11573 | case 'a': | ||||||
11574 | if (!Subtarget->hasMAIInsts()) | ||||||
11575 | break; | ||||||
11576 | switch (BitWidth) { | ||||||
11577 | case 16: | ||||||
11578 | RC = &AMDGPU::AGPR_32RegClass; | ||||||
11579 | break; | ||||||
11580 | default: | ||||||
11581 | RC = TRI->getAGPRClassForBitWidth(BitWidth); | ||||||
11582 | if (!RC) | ||||||
11583 | return std::make_pair(0U, nullptr); | ||||||
11584 | break; | ||||||
11585 | } | ||||||
11586 | break; | ||||||
11587 | } | ||||||
11588 | // We actually support i128, i16 and f16 as inline parameters | ||||||
11589 | // even if they are not reported as legal | ||||||
11590 | if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 || | ||||||
11591 | VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16)) | ||||||
11592 | return std::make_pair(0U, RC); | ||||||
11593 | } | ||||||
11594 | |||||||
11595 | if (Constraint.size() > 1) { | ||||||
11596 | if (Constraint[1] == 'v') { | ||||||
11597 | RC = &AMDGPU::VGPR_32RegClass; | ||||||
11598 | } else if (Constraint[1] == 's') { | ||||||
11599 | RC = &AMDGPU::SGPR_32RegClass; | ||||||
11600 | } else if (Constraint[1] == 'a') { | ||||||
11601 | RC = &AMDGPU::AGPR_32RegClass; | ||||||
11602 | } | ||||||
11603 | |||||||
11604 | if (RC) { | ||||||
11605 | uint32_t Idx; | ||||||
11606 | bool Failed = Constraint.substr(2).getAsInteger(10, Idx); | ||||||
11607 | if (!Failed && Idx < RC->getNumRegs()) | ||||||
11608 | return std::make_pair(RC->getRegister(Idx), RC); | ||||||
11609 | } | ||||||
11610 | } | ||||||
11611 | |||||||
11612 | // FIXME: Returns VS_32 for physical SGPR constraints | ||||||
11613 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); | ||||||
11614 | } | ||||||
11615 | |||||||
11616 | static bool isImmConstraint(StringRef Constraint) { | ||||||
11617 | if (Constraint.size() == 1) { | ||||||
11618 | switch (Constraint[0]) { | ||||||
11619 | default: break; | ||||||
11620 | case 'I': | ||||||
11621 | case 'J': | ||||||
11622 | case 'A': | ||||||
11623 | case 'B': | ||||||
11624 | case 'C': | ||||||
11625 | return true; | ||||||
11626 | } | ||||||
11627 | } else if (Constraint == "DA" || | ||||||
11628 | Constraint == "DB") { | ||||||
11629 | return true; | ||||||
11630 | } | ||||||
11631 | return false; | ||||||
11632 | } | ||||||
11633 | |||||||
11634 | SITargetLowering::ConstraintType | ||||||
11635 | SITargetLowering::getConstraintType(StringRef Constraint) const { | ||||||
11636 | if (Constraint.size() == 1) { | ||||||
11637 | switch (Constraint[0]) { | ||||||
11638 | default: break; | ||||||
11639 | case 's': | ||||||
11640 | case 'v': | ||||||
11641 | case 'a': | ||||||
11642 | return C_RegisterClass; | ||||||
11643 | } | ||||||
11644 | } | ||||||
11645 | if (isImmConstraint(Constraint)) { | ||||||
11646 | return C_Other; | ||||||
11647 | } | ||||||
11648 | return TargetLowering::getConstraintType(Constraint); | ||||||
11649 | } | ||||||
11650 | |||||||
11651 | static uint64_t clearUnusedBits(uint64_t Val, unsigned Size) { | ||||||
11652 | if (!AMDGPU::isInlinableIntLiteral(Val)) { | ||||||
11653 | Val = Val & maskTrailingOnes<uint64_t>(Size); | ||||||
11654 | } | ||||||
11655 | return Val; | ||||||
11656 | } | ||||||
11657 | |||||||
11658 | void SITargetLowering::LowerAsmOperandForConstraint(SDValue Op, | ||||||
11659 | std::string &Constraint, | ||||||
11660 | std::vector<SDValue> &Ops, | ||||||
11661 | SelectionDAG &DAG) const { | ||||||
11662 | if (isImmConstraint(Constraint)) { | ||||||
11663 | uint64_t Val; | ||||||
11664 | if (getAsmOperandConstVal(Op, Val) && | ||||||
11665 | checkAsmConstraintVal(Op, Constraint, Val)) { | ||||||
11666 | Val = clearUnusedBits(Val, Op.getScalarValueSizeInBits()); | ||||||
11667 | Ops.push_back(DAG.getTargetConstant(Val, SDLoc(Op), MVT::i64)); | ||||||
11668 | } | ||||||
11669 | } else { | ||||||
11670 | TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); | ||||||
11671 | } | ||||||
11672 | } | ||||||
11673 | |||||||
11674 | bool SITargetLowering::getAsmOperandConstVal(SDValue Op, uint64_t &Val) const { | ||||||
11675 | unsigned Size = Op.getScalarValueSizeInBits(); | ||||||
11676 | if (Size > 64) | ||||||
11677 | return false; | ||||||
11678 | |||||||
11679 | if (Size == 16 && !Subtarget->has16BitInsts()) | ||||||
11680 | return false; | ||||||
11681 | |||||||
11682 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { | ||||||
11683 | Val = C->getSExtValue(); | ||||||
11684 | return true; | ||||||
11685 | } | ||||||
11686 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { | ||||||
11687 | Val = C->getValueAPF().bitcastToAPInt().getSExtValue(); | ||||||
11688 | return true; | ||||||
11689 | } | ||||||
11690 | if (BuildVectorSDNode *V = dyn_cast<BuildVectorSDNode>(Op)) { | ||||||
11691 | if (Size != 16 || Op.getNumOperands() != 2) | ||||||
11692 | return false; | ||||||
11693 | if (Op.getOperand(0).isUndef() || Op.getOperand(1).isUndef()) | ||||||
11694 | return false; | ||||||
11695 | if (ConstantSDNode *C = V->getConstantSplatNode()) { | ||||||
11696 | Val = C->getSExtValue(); | ||||||
11697 | return true; | ||||||
11698 | } | ||||||
11699 | if (ConstantFPSDNode *C = V->getConstantFPSplatNode()) { | ||||||
11700 | Val = C->getValueAPF().bitcastToAPInt().getSExtValue(); | ||||||
11701 | return true; | ||||||
11702 | } | ||||||
11703 | } | ||||||
11704 | |||||||
11705 | return false; | ||||||
11706 | } | ||||||
11707 | |||||||
11708 | bool SITargetLowering::checkAsmConstraintVal(SDValue Op, | ||||||
11709 | const std::string &Constraint, | ||||||
11710 | uint64_t Val) const { | ||||||
11711 | if (Constraint.size() == 1) { | ||||||
11712 | switch (Constraint[0]) { | ||||||
11713 | case 'I': | ||||||
11714 | return AMDGPU::isInlinableIntLiteral(Val); | ||||||
11715 | case 'J': | ||||||
11716 | return isInt<16>(Val); | ||||||
11717 | case 'A': | ||||||
11718 | return checkAsmConstraintValA(Op, Val); | ||||||
11719 | case 'B': | ||||||
11720 | return isInt<32>(Val); | ||||||
11721 | case 'C': | ||||||
11722 | return isUInt<32>(clearUnusedBits(Val, Op.getScalarValueSizeInBits())) || | ||||||
11723 | AMDGPU::isInlinableIntLiteral(Val); | ||||||
11724 | default: | ||||||
11725 | break; | ||||||
11726 | } | ||||||
11727 | } else if (Constraint.size() == 2) { | ||||||
11728 | if (Constraint == "DA") { | ||||||
11729 | int64_t HiBits = static_cast<int32_t>(Val >> 32); | ||||||
11730 | int64_t LoBits = static_cast<int32_t>(Val); | ||||||
11731 | return checkAsmConstraintValA(Op, HiBits, 32) && | ||||||
11732 | checkAsmConstraintValA(Op, LoBits, 32); | ||||||
11733 | } | ||||||
11734 | if (Constraint == "DB") { | ||||||
11735 | return true; | ||||||
11736 | } | ||||||
11737 | } | ||||||
11738 | llvm_unreachable("Invalid asm constraint")::llvm::llvm_unreachable_internal("Invalid asm constraint", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 11738); | ||||||
11739 | } | ||||||
11740 | |||||||
11741 | bool SITargetLowering::checkAsmConstraintValA(SDValue Op, | ||||||
11742 | uint64_t Val, | ||||||
11743 | unsigned MaxSize) const { | ||||||
11744 | unsigned Size = std::min<unsigned>(Op.getScalarValueSizeInBits(), MaxSize); | ||||||
11745 | bool HasInv2Pi = Subtarget->hasInv2PiInlineImm(); | ||||||
11746 | if ((Size == 16 && AMDGPU::isInlinableLiteral16(Val, HasInv2Pi)) || | ||||||
11747 | (Size == 32 && AMDGPU::isInlinableLiteral32(Val, HasInv2Pi)) || | ||||||
11748 | (Size == 64 && AMDGPU::isInlinableLiteral64(Val, HasInv2Pi))) { | ||||||
11749 | return true; | ||||||
11750 | } | ||||||
11751 | return false; | ||||||
11752 | } | ||||||
11753 | |||||||
11754 | static int getAlignedAGPRClassID(unsigned UnalignedClassID) { | ||||||
11755 | switch (UnalignedClassID) { | ||||||
11756 | case AMDGPU::VReg_64RegClassID: | ||||||
11757 | return AMDGPU::VReg_64_Align2RegClassID; | ||||||
11758 | case AMDGPU::VReg_96RegClassID: | ||||||
11759 | return AMDGPU::VReg_96_Align2RegClassID; | ||||||
11760 | case AMDGPU::VReg_128RegClassID: | ||||||
11761 | return AMDGPU::VReg_128_Align2RegClassID; | ||||||
11762 | case AMDGPU::VReg_160RegClassID: | ||||||
11763 | return AMDGPU::VReg_160_Align2RegClassID; | ||||||
11764 | case AMDGPU::VReg_192RegClassID: | ||||||
11765 | return AMDGPU::VReg_192_Align2RegClassID; | ||||||
11766 | case AMDGPU::VReg_224RegClassID: | ||||||
11767 | return AMDGPU::VReg_224_Align2RegClassID; | ||||||
11768 | case AMDGPU::VReg_256RegClassID: | ||||||
11769 | return AMDGPU::VReg_256_Align2RegClassID; | ||||||
11770 | case AMDGPU::VReg_512RegClassID: | ||||||
11771 | return AMDGPU::VReg_512_Align2RegClassID; | ||||||
11772 | case AMDGPU::VReg_1024RegClassID: | ||||||
11773 | return AMDGPU::VReg_1024_Align2RegClassID; | ||||||
11774 | case AMDGPU::AReg_64RegClassID: | ||||||
11775 | return AMDGPU::AReg_64_Align2RegClassID; | ||||||
11776 | case AMDGPU::AReg_96RegClassID: | ||||||
11777 | return AMDGPU::AReg_96_Align2RegClassID; | ||||||
11778 | case AMDGPU::AReg_128RegClassID: | ||||||
11779 | return AMDGPU::AReg_128_Align2RegClassID; | ||||||
11780 | case AMDGPU::AReg_160RegClassID: | ||||||
11781 | return AMDGPU::AReg_160_Align2RegClassID; | ||||||
11782 | case AMDGPU::AReg_192RegClassID: | ||||||
11783 | return AMDGPU::AReg_192_Align2RegClassID; | ||||||
11784 | case AMDGPU::AReg_256RegClassID: | ||||||
11785 | return AMDGPU::AReg_256_Align2RegClassID; | ||||||
11786 | case AMDGPU::AReg_512RegClassID: | ||||||
11787 | return AMDGPU::AReg_512_Align2RegClassID; | ||||||
11788 | case AMDGPU::AReg_1024RegClassID: | ||||||
11789 | return AMDGPU::AReg_1024_Align2RegClassID; | ||||||
11790 | default: | ||||||
11791 | return -1; | ||||||
11792 | } | ||||||
11793 | } | ||||||
11794 | |||||||
11795 | // Figure out which registers should be reserved for stack access. Only after | ||||||
11796 | // the function is legalized do we know all of the non-spill stack objects or if | ||||||
11797 | // calls are present. | ||||||
11798 | void SITargetLowering::finalizeLowering(MachineFunction &MF) const { | ||||||
11799 | MachineRegisterInfo &MRI = MF.getRegInfo(); | ||||||
11800 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
11801 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | ||||||
11802 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); | ||||||
11803 | const SIInstrInfo *TII = ST.getInstrInfo(); | ||||||
11804 | |||||||
11805 | if (Info->isEntryFunction()) { | ||||||
11806 | // Callable functions have fixed registers used for stack access. | ||||||
11807 | reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info); | ||||||
11808 | } | ||||||
11809 | |||||||
11810 | assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),(static_cast <bool> (!TRI->isSubRegister(Info->getScratchRSrcReg (), Info->getStackPtrOffsetReg())) ? void (0) : __assert_fail ("!TRI->isSubRegister(Info->getScratchRSrcReg(), Info->getStackPtrOffsetReg())" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 11811, __extension__ __PRETTY_FUNCTION__)) | ||||||
11811 | Info->getStackPtrOffsetReg()))(static_cast <bool> (!TRI->isSubRegister(Info->getScratchRSrcReg (), Info->getStackPtrOffsetReg())) ? void (0) : __assert_fail ("!TRI->isSubRegister(Info->getScratchRSrcReg(), Info->getStackPtrOffsetReg())" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 11811, __extension__ __PRETTY_FUNCTION__)); | ||||||
11812 | if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG) | ||||||
11813 | MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg()); | ||||||
11814 | |||||||
11815 | // We need to worry about replacing the default register with itself in case | ||||||
11816 | // of MIR testcases missing the MFI. | ||||||
11817 | if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG) | ||||||
11818 | MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg()); | ||||||
11819 | |||||||
11820 | if (Info->getFrameOffsetReg() != AMDGPU::FP_REG) | ||||||
11821 | MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg()); | ||||||
11822 | |||||||
11823 | Info->limitOccupancy(MF); | ||||||
11824 | |||||||
11825 | if (ST.isWave32() && !MF.empty()) { | ||||||
11826 | for (auto &MBB : MF) { | ||||||
11827 | for (auto &MI : MBB) { | ||||||
11828 | TII->fixImplicitOperands(MI); | ||||||
11829 | } | ||||||
11830 | } | ||||||
11831 | } | ||||||
11832 | |||||||
11833 | // FIXME: This is a hack to fixup AGPR classes to use the properly aligned | ||||||
11834 | // classes if required. Ideally the register class constraints would differ | ||||||
11835 | // per-subtarget, but there's no easy way to achieve that right now. This is | ||||||
11836 | // not a problem for VGPRs because the correctly aligned VGPR class is implied | ||||||
11837 | // from using them as the register class for legal types. | ||||||
11838 | if (ST.needsAlignedVGPRs()) { | ||||||
11839 | for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { | ||||||
11840 | const Register Reg = Register::index2VirtReg(I); | ||||||
11841 | const TargetRegisterClass *RC = MRI.getRegClassOrNull(Reg); | ||||||
11842 | if (!RC) | ||||||
11843 | continue; | ||||||
11844 | int NewClassID = getAlignedAGPRClassID(RC->getID()); | ||||||
11845 | if (NewClassID != -1) | ||||||
11846 | MRI.setRegClass(Reg, TRI->getRegClass(NewClassID)); | ||||||
11847 | } | ||||||
11848 | } | ||||||
11849 | |||||||
11850 | TargetLoweringBase::finalizeLowering(MF); | ||||||
11851 | |||||||
11852 | // Allocate a VGPR for future SGPR Spill if | ||||||
11853 | // "amdgpu-reserve-vgpr-for-sgpr-spill" option is used | ||||||
11854 | // FIXME: We won't need this hack if we split SGPR allocation from VGPR | ||||||
11855 | if (VGPRReserveforSGPRSpill && TRI->spillSGPRToVGPR() && | ||||||
11856 | !Info->VGPRReservedForSGPRSpill && !Info->isEntryFunction()) | ||||||
11857 | Info->reserveVGPRforSGPRSpills(MF); | ||||||
11858 | } | ||||||
11859 | |||||||
11860 | void SITargetLowering::computeKnownBitsForFrameIndex( | ||||||
11861 | const int FI, KnownBits &Known, const MachineFunction &MF) const { | ||||||
11862 | TargetLowering::computeKnownBitsForFrameIndex(FI, Known, MF); | ||||||
11863 | |||||||
11864 | // Set the high bits to zero based on the maximum allowed scratch size per | ||||||
11865 | // wave. We can't use vaddr in MUBUF instructions if we don't know the address | ||||||
11866 | // calculation won't overflow, so assume the sign bit is never set. | ||||||
11867 | Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex()); | ||||||
11868 | } | ||||||
11869 | |||||||
11870 | static void knownBitsForWorkitemID(const GCNSubtarget &ST, GISelKnownBits &KB, | ||||||
11871 | KnownBits &Known, unsigned Dim) { | ||||||
11872 | unsigned MaxValue = | ||||||
11873 | ST.getMaxWorkitemID(KB.getMachineFunction().getFunction(), Dim); | ||||||
11874 | Known.Zero.setHighBits(countLeadingZeros(MaxValue)); | ||||||
11875 | } | ||||||
11876 | |||||||
11877 | void SITargetLowering::computeKnownBitsForTargetInstr( | ||||||
11878 | GISelKnownBits &KB, Register R, KnownBits &Known, const APInt &DemandedElts, | ||||||
11879 | const MachineRegisterInfo &MRI, unsigned Depth) const { | ||||||
11880 | const MachineInstr *MI = MRI.getVRegDef(R); | ||||||
11881 | switch (MI->getOpcode()) { | ||||||
11882 | case AMDGPU::G_INTRINSIC: { | ||||||
11883 | switch (MI->getIntrinsicID()) { | ||||||
11884 | case Intrinsic::amdgcn_workitem_id_x: | ||||||
11885 | knownBitsForWorkitemID(*getSubtarget(), KB, Known, 0); | ||||||
11886 | break; | ||||||
11887 | case Intrinsic::amdgcn_workitem_id_y: | ||||||
11888 | knownBitsForWorkitemID(*getSubtarget(), KB, Known, 1); | ||||||
11889 | break; | ||||||
11890 | case Intrinsic::amdgcn_workitem_id_z: | ||||||
11891 | knownBitsForWorkitemID(*getSubtarget(), KB, Known, 2); | ||||||
11892 | break; | ||||||
11893 | case Intrinsic::amdgcn_mbcnt_lo: | ||||||
11894 | case Intrinsic::amdgcn_mbcnt_hi: { | ||||||
11895 | // These return at most the wavefront size - 1. | ||||||
11896 | unsigned Size = MRI.getType(R).getSizeInBits(); | ||||||
11897 | Known.Zero.setHighBits(Size - getSubtarget()->getWavefrontSizeLog2()); | ||||||
11898 | break; | ||||||
11899 | } | ||||||
11900 | case Intrinsic::amdgcn_groupstaticsize: { | ||||||
11901 | // We can report everything over the maximum size as 0. We can't report | ||||||
11902 | // based on the actual size because we don't know if it's accurate or not | ||||||
11903 | // at any given point. | ||||||
11904 | Known.Zero.setHighBits(countLeadingZeros(getSubtarget()->getLocalMemorySize())); | ||||||
11905 | break; | ||||||
11906 | } | ||||||
11907 | } | ||||||
11908 | break; | ||||||
11909 | } | ||||||
11910 | case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE: | ||||||
11911 | Known.Zero.setHighBits(24); | ||||||
11912 | break; | ||||||
11913 | case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT: | ||||||
11914 | Known.Zero.setHighBits(16); | ||||||
11915 | break; | ||||||
11916 | } | ||||||
11917 | } | ||||||
11918 | |||||||
11919 | Align SITargetLowering::computeKnownAlignForTargetInstr( | ||||||
11920 | GISelKnownBits &KB, Register R, const MachineRegisterInfo &MRI, | ||||||
11921 | unsigned Depth) const { | ||||||
11922 | const MachineInstr *MI = MRI.getVRegDef(R); | ||||||
11923 | switch (MI->getOpcode()) { | ||||||
11924 | case AMDGPU::G_INTRINSIC: | ||||||
11925 | case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: { | ||||||
11926 | // FIXME: Can this move to generic code? What about the case where the call | ||||||
11927 | // site specifies a lower alignment? | ||||||
11928 | Intrinsic::ID IID = MI->getIntrinsicID(); | ||||||
11929 | LLVMContext &Ctx = KB.getMachineFunction().getFunction().getContext(); | ||||||
11930 | AttributeList Attrs = Intrinsic::getAttributes(Ctx, IID); | ||||||
11931 | if (MaybeAlign RetAlign = Attrs.getRetAlignment()) | ||||||
11932 | return *RetAlign; | ||||||
11933 | return Align(1); | ||||||
11934 | } | ||||||
11935 | default: | ||||||
11936 | return Align(1); | ||||||
11937 | } | ||||||
11938 | } | ||||||
11939 | |||||||
11940 | Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { | ||||||
11941 | const Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML); | ||||||
11942 | const Align CacheLineAlign = Align(64); | ||||||
11943 | |||||||
11944 | // Pre-GFX10 target did not benefit from loop alignment | ||||||
11945 | if (!ML || DisableLoopAlignment || | ||||||
11946 | (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) || | ||||||
11947 | getSubtarget()->hasInstFwdPrefetchBug()) | ||||||
11948 | return PrefAlign; | ||||||
11949 | |||||||
11950 | // On GFX10 I$ is 4 x 64 bytes cache lines. | ||||||
11951 | // By default prefetcher keeps one cache line behind and reads two ahead. | ||||||
11952 | // We can modify it with S_INST_PREFETCH for larger loops to have two lines | ||||||
11953 | // behind and one ahead. | ||||||
11954 | // Therefor we can benefit from aligning loop headers if loop fits 192 bytes. | ||||||
11955 | // If loop fits 64 bytes it always spans no more than two cache lines and | ||||||
11956 | // does not need an alignment. | ||||||
11957 | // Else if loop is less or equal 128 bytes we do not need to modify prefetch, | ||||||
11958 | // Else if loop is less or equal 192 bytes we need two lines behind. | ||||||
11959 | |||||||
11960 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | ||||||
11961 | const MachineBasicBlock *Header = ML->getHeader(); | ||||||
11962 | if (Header->getAlignment() != PrefAlign) | ||||||
11963 | return Header->getAlignment(); // Already processed. | ||||||
11964 | |||||||
11965 | unsigned LoopSize = 0; | ||||||
11966 | for (const MachineBasicBlock *MBB : ML->blocks()) { | ||||||
11967 | // If inner loop block is aligned assume in average half of the alignment | ||||||
11968 | // size to be added as nops. | ||||||
11969 | if (MBB != Header) | ||||||
11970 | LoopSize += MBB->getAlignment().value() / 2; | ||||||
11971 | |||||||
11972 | for (const MachineInstr &MI : *MBB) { | ||||||
11973 | LoopSize += TII->getInstSizeInBytes(MI); | ||||||
11974 | if (LoopSize > 192) | ||||||
11975 | return PrefAlign; | ||||||
11976 | } | ||||||
11977 | } | ||||||
11978 | |||||||
11979 | if (LoopSize <= 64) | ||||||
11980 | return PrefAlign; | ||||||
11981 | |||||||
11982 | if (LoopSize <= 128) | ||||||
11983 | return CacheLineAlign; | ||||||
11984 | |||||||
11985 | // If any of parent loops is surrounded by prefetch instructions do not | ||||||
11986 | // insert new for inner loop, which would reset parent's settings. | ||||||
11987 | for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) { | ||||||
11988 | if (MachineBasicBlock *Exit = P->getExitBlock()) { | ||||||
11989 | auto I = Exit->getFirstNonDebugInstr(); | ||||||
11990 | if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH) | ||||||
11991 | return CacheLineAlign; | ||||||
11992 | } | ||||||
11993 | } | ||||||
11994 | |||||||
11995 | MachineBasicBlock *Pre = ML->getLoopPreheader(); | ||||||
11996 | MachineBasicBlock *Exit = ML->getExitBlock(); | ||||||
11997 | |||||||
11998 | if (Pre && Exit) { | ||||||
11999 | BuildMI(*Pre, Pre->getFirstTerminator(), DebugLoc(), | ||||||
12000 | TII->get(AMDGPU::S_INST_PREFETCH)) | ||||||
12001 | .addImm(1); // prefetch 2 lines behind PC | ||||||
12002 | |||||||
12003 | BuildMI(*Exit, Exit->getFirstNonDebugInstr(), DebugLoc(), | ||||||
12004 | TII->get(AMDGPU::S_INST_PREFETCH)) | ||||||
12005 | .addImm(2); // prefetch 1 line behind PC | ||||||
12006 | } | ||||||
12007 | |||||||
12008 | return CacheLineAlign; | ||||||
12009 | } | ||||||
12010 | |||||||
12011 | LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__)) | ||||||
12012 | static bool isCopyFromRegOfInlineAsm(const SDNode *N) { | ||||||
12013 | assert(N->getOpcode() == ISD::CopyFromReg)(static_cast <bool> (N->getOpcode() == ISD::CopyFromReg ) ? void (0) : __assert_fail ("N->getOpcode() == ISD::CopyFromReg" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 12013, __extension__ __PRETTY_FUNCTION__)); | ||||||
12014 | do { | ||||||
12015 | // Follow the chain until we find an INLINEASM node. | ||||||
12016 | N = N->getOperand(0).getNode(); | ||||||
12017 | if (N->getOpcode() == ISD::INLINEASM || | ||||||
12018 | N->getOpcode() == ISD::INLINEASM_BR) | ||||||
12019 | return true; | ||||||
12020 | } while (N->getOpcode() == ISD::CopyFromReg); | ||||||
12021 | return false; | ||||||
12022 | } | ||||||
12023 | |||||||
12024 | bool SITargetLowering::isSDNodeSourceOfDivergence( | ||||||
12025 | const SDNode *N, FunctionLoweringInfo *FLI, | ||||||
12026 | LegacyDivergenceAnalysis *KDA) const { | ||||||
12027 | switch (N->getOpcode()) { | ||||||
12028 | case ISD::CopyFromReg: { | ||||||
12029 | const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1)); | ||||||
12030 | const MachineRegisterInfo &MRI = FLI->MF->getRegInfo(); | ||||||
12031 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); | ||||||
12032 | Register Reg = R->getReg(); | ||||||
12033 | |||||||
12034 | // FIXME: Why does this need to consider isLiveIn? | ||||||
12035 | if (Reg.isPhysical() || MRI.isLiveIn(Reg)) | ||||||
12036 | return !TRI->isSGPRReg(MRI, Reg); | ||||||
12037 | |||||||
12038 | if (const Value *V = FLI->getValueFromVirtualReg(R->getReg())) | ||||||
12039 | return KDA->isDivergent(V); | ||||||
12040 | |||||||
12041 | assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N))(static_cast <bool> (Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm (N)) ? void (0) : __assert_fail ("Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 12041, __extension__ __PRETTY_FUNCTION__)); | ||||||
12042 | return !TRI->isSGPRReg(MRI, Reg); | ||||||
12043 | } | ||||||
12044 | case ISD::LOAD: { | ||||||
12045 | const LoadSDNode *L = cast<LoadSDNode>(N); | ||||||
12046 | unsigned AS = L->getAddressSpace(); | ||||||
12047 | // A flat load may access private memory. | ||||||
12048 | return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS; | ||||||
12049 | } | ||||||
12050 | case ISD::CALLSEQ_END: | ||||||
12051 | return true; | ||||||
12052 | case ISD::INTRINSIC_WO_CHAIN: | ||||||
12053 | return AMDGPU::isIntrinsicSourceOfDivergence( | ||||||
12054 | cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()); | ||||||
12055 | case ISD::INTRINSIC_W_CHAIN: | ||||||
12056 | return AMDGPU::isIntrinsicSourceOfDivergence( | ||||||
12057 | cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()); | ||||||
12058 | case AMDGPUISD::ATOMIC_CMP_SWAP: | ||||||
12059 | case AMDGPUISD::ATOMIC_INC: | ||||||
12060 | case AMDGPUISD::ATOMIC_DEC: | ||||||
12061 | case AMDGPUISD::ATOMIC_LOAD_FMIN: | ||||||
12062 | case AMDGPUISD::ATOMIC_LOAD_FMAX: | ||||||
12063 | case AMDGPUISD::BUFFER_ATOMIC_SWAP: | ||||||
12064 | case AMDGPUISD::BUFFER_ATOMIC_ADD: | ||||||
12065 | case AMDGPUISD::BUFFER_ATOMIC_SUB: | ||||||
12066 | case AMDGPUISD::BUFFER_ATOMIC_SMIN: | ||||||
12067 | case AMDGPUISD::BUFFER_ATOMIC_UMIN: | ||||||
12068 | case AMDGPUISD::BUFFER_ATOMIC_SMAX: | ||||||
12069 | case AMDGPUISD::BUFFER_ATOMIC_UMAX: | ||||||
12070 | case AMDGPUISD::BUFFER_ATOMIC_AND: | ||||||
12071 | case AMDGPUISD::BUFFER_ATOMIC_OR: | ||||||
12072 | case AMDGPUISD::BUFFER_ATOMIC_XOR: | ||||||
12073 | case AMDGPUISD::BUFFER_ATOMIC_INC: | ||||||
12074 | case AMDGPUISD::BUFFER_ATOMIC_DEC: | ||||||
12075 | case AMDGPUISD::BUFFER_ATOMIC_CMPSWAP: | ||||||
12076 | case AMDGPUISD::BUFFER_ATOMIC_CSUB: | ||||||
12077 | case AMDGPUISD::BUFFER_ATOMIC_FADD: | ||||||
12078 | case AMDGPUISD::BUFFER_ATOMIC_FMIN: | ||||||
12079 | case AMDGPUISD::BUFFER_ATOMIC_FMAX: | ||||||
12080 | // Target-specific read-modify-write atomics are sources of divergence. | ||||||
12081 | return true; | ||||||
12082 | default: | ||||||
12083 | if (auto *A = dyn_cast<AtomicSDNode>(N)) { | ||||||
12084 | // Generic read-modify-write atomics are sources of divergence. | ||||||
12085 | return A->readMem() && A->writeMem(); | ||||||
12086 | } | ||||||
12087 | return false; | ||||||
12088 | } | ||||||
12089 | } | ||||||
12090 | |||||||
12091 | bool SITargetLowering::denormalsEnabledForType(const SelectionDAG &DAG, | ||||||
12092 | EVT VT) const { | ||||||
12093 | switch (VT.getScalarType().getSimpleVT().SimpleTy) { | ||||||
12094 | case MVT::f32: | ||||||
12095 | return hasFP32Denormals(DAG.getMachineFunction()); | ||||||
12096 | case MVT::f64: | ||||||
12097 | case MVT::f16: | ||||||
12098 | return hasFP64FP16Denormals(DAG.getMachineFunction()); | ||||||
12099 | default: | ||||||
12100 | return false; | ||||||
12101 | } | ||||||
12102 | } | ||||||
12103 | |||||||
12104 | bool SITargetLowering::denormalsEnabledForType(LLT Ty, | ||||||
12105 | MachineFunction &MF) const { | ||||||
12106 | switch (Ty.getScalarSizeInBits()) { | ||||||
12107 | case 32: | ||||||
12108 | return hasFP32Denormals(MF); | ||||||
12109 | case 64: | ||||||
12110 | case 16: | ||||||
12111 | return hasFP64FP16Denormals(MF); | ||||||
12112 | default: | ||||||
12113 | return false; | ||||||
12114 | } | ||||||
12115 | } | ||||||
12116 | |||||||
12117 | bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, | ||||||
12118 | const SelectionDAG &DAG, | ||||||
12119 | bool SNaN, | ||||||
12120 | unsigned Depth) const { | ||||||
12121 | if (Op.getOpcode() == AMDGPUISD::CLAMP) { | ||||||
12122 | const MachineFunction &MF = DAG.getMachineFunction(); | ||||||
12123 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | ||||||
12124 | |||||||
12125 | if (Info->getMode().DX10Clamp) | ||||||
12126 | return true; // Clamped to 0. | ||||||
12127 | return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); | ||||||
12128 | } | ||||||
12129 | |||||||
12130 | return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG, | ||||||
12131 | SNaN, Depth); | ||||||
12132 | } | ||||||
12133 | |||||||
12134 | // Global FP atomic instructions have a hardcoded FP mode and do not support | ||||||
12135 | // FP32 denormals, and only support v2f16 denormals. | ||||||
12136 | static bool fpModeMatchesGlobalFPAtomicMode(const AtomicRMWInst *RMW) { | ||||||
12137 | const fltSemantics &Flt = RMW->getType()->getScalarType()->getFltSemantics(); | ||||||
12138 | auto DenormMode = RMW->getParent()->getParent()->getDenormalMode(Flt); | ||||||
12139 | if (&Flt == &APFloat::IEEEsingle()) | ||||||
12140 | return DenormMode == DenormalMode::getPreserveSign(); | ||||||
12141 | return DenormMode == DenormalMode::getIEEE(); | ||||||
12142 | } | ||||||
12143 | |||||||
12144 | TargetLowering::AtomicExpansionKind | ||||||
12145 | SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { | ||||||
12146 | |||||||
12147 | auto ReportUnsafeHWInst = [&](TargetLowering::AtomicExpansionKind Kind) { | ||||||
12148 | OptimizationRemarkEmitter ORE(RMW->getFunction()); | ||||||
12149 | LLVMContext &Ctx = RMW->getFunction()->getContext(); | ||||||
12150 | SmallVector<StringRef> SSNs; | ||||||
12151 | Ctx.getSyncScopeNames(SSNs); | ||||||
12152 | auto MemScope = SSNs[RMW->getSyncScopeID()].empty() | ||||||
12153 | ? "system" | ||||||
12154 | : SSNs[RMW->getSyncScopeID()]; | ||||||
12155 | ORE.emit([&]() { | ||||||
12156 | return OptimizationRemark(DEBUG_TYPE"si-lower", "Passed", RMW) | ||||||
12157 | << "Hardware instruction generated for atomic " | ||||||
12158 | << RMW->getOperationName(RMW->getOperation()) | ||||||
12159 | << " operation at memory scope " << MemScope | ||||||
12160 | << " due to an unsafe request."; | ||||||
12161 | }); | ||||||
12162 | return Kind; | ||||||
12163 | }; | ||||||
12164 | |||||||
12165 | switch (RMW->getOperation()) { | ||||||
12166 | case AtomicRMWInst::FAdd: { | ||||||
12167 | Type *Ty = RMW->getType(); | ||||||
12168 | |||||||
12169 | // We don't have a way to support 16-bit atomics now, so just leave them | ||||||
12170 | // as-is. | ||||||
12171 | if (Ty->isHalfTy()) | ||||||
12172 | return AtomicExpansionKind::None; | ||||||
12173 | |||||||
12174 | if (!Ty->isFloatTy() && (!Subtarget->hasGFX90AInsts() || !Ty->isDoubleTy())) | ||||||
12175 | return AtomicExpansionKind::CmpXChg; | ||||||
12176 | |||||||
12177 | unsigned AS = RMW->getPointerAddressSpace(); | ||||||
12178 | |||||||
12179 | if ((AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) && | ||||||
12180 | Subtarget->hasAtomicFaddInsts()) { | ||||||
12181 | // The amdgpu-unsafe-fp-atomics attribute enables generation of unsafe | ||||||
12182 | // floating point atomic instructions. May generate more efficient code, | ||||||
12183 | // but may not respect rounding and denormal modes, and may give incorrect | ||||||
12184 | // results for certain memory destinations. | ||||||
12185 | if (RMW->getFunction() | ||||||
12186 | ->getFnAttribute("amdgpu-unsafe-fp-atomics") | ||||||
12187 | .getValueAsString() != "true") | ||||||
12188 | return AtomicExpansionKind::CmpXChg; | ||||||
12189 | |||||||
12190 | if (Subtarget->hasGFX90AInsts()) { | ||||||
12191 | if (Ty->isFloatTy() && AS == AMDGPUAS::FLAT_ADDRESS) | ||||||
12192 | return AtomicExpansionKind::CmpXChg; | ||||||
12193 | |||||||
12194 | auto SSID = RMW->getSyncScopeID(); | ||||||
12195 | if (SSID == SyncScope::System || | ||||||
12196 | SSID == RMW->getContext().getOrInsertSyncScopeID("one-as")) | ||||||
12197 | return AtomicExpansionKind::CmpXChg; | ||||||
12198 | |||||||
12199 | return ReportUnsafeHWInst(AtomicExpansionKind::None); | ||||||
12200 | } | ||||||
12201 | |||||||
12202 | if (AS == AMDGPUAS::FLAT_ADDRESS) | ||||||
12203 | return AtomicExpansionKind::CmpXChg; | ||||||
12204 | |||||||
12205 | return RMW->use_empty() ? ReportUnsafeHWInst(AtomicExpansionKind::None) | ||||||
12206 | : AtomicExpansionKind::CmpXChg; | ||||||
12207 | } | ||||||
12208 | |||||||
12209 | // DS FP atomics do repect the denormal mode, but the rounding mode is fixed | ||||||
12210 | // to round-to-nearest-even. | ||||||
12211 | // The only exception is DS_ADD_F64 which never flushes regardless of mode. | ||||||
12212 | if (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) { | ||||||
12213 | if (!Ty->isDoubleTy()) | ||||||
12214 | return AtomicExpansionKind::None; | ||||||
12215 | |||||||
12216 | if (fpModeMatchesGlobalFPAtomicMode(RMW)) | ||||||
12217 | return AtomicExpansionKind::None; | ||||||
12218 | |||||||
12219 | return RMW->getFunction() | ||||||
12220 | ->getFnAttribute("amdgpu-unsafe-fp-atomics") | ||||||
12221 | .getValueAsString() == "true" | ||||||
12222 | ? ReportUnsafeHWInst(AtomicExpansionKind::None) | ||||||
12223 | : AtomicExpansionKind::CmpXChg; | ||||||
12224 | } | ||||||
12225 | |||||||
12226 | return AtomicExpansionKind::CmpXChg; | ||||||
12227 | } | ||||||
12228 | default: | ||||||
12229 | break; | ||||||
12230 | } | ||||||
12231 | |||||||
12232 | return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW); | ||||||
12233 | } | ||||||
12234 | |||||||
12235 | const TargetRegisterClass * | ||||||
12236 | SITargetLowering::getRegClassFor(MVT VT, bool isDivergent) const { | ||||||
12237 | const TargetRegisterClass *RC = TargetLoweringBase::getRegClassFor(VT, false); | ||||||
12238 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); | ||||||
12239 | if (RC == &AMDGPU::VReg_1RegClass && !isDivergent) | ||||||
12240 | return Subtarget->getWavefrontSize() == 64 ? &AMDGPU::SReg_64RegClass | ||||||
12241 | : &AMDGPU::SReg_32RegClass; | ||||||
12242 | if (!TRI->isSGPRClass(RC) && !isDivergent) | ||||||
12243 | return TRI->getEquivalentSGPRClass(RC); | ||||||
12244 | else if (TRI->isSGPRClass(RC) && isDivergent) | ||||||
12245 | return TRI->getEquivalentVGPRClass(RC); | ||||||
12246 | |||||||
12247 | return RC; | ||||||
12248 | } | ||||||
12249 | |||||||
12250 | // FIXME: This is a workaround for DivergenceAnalysis not understanding always | ||||||
12251 | // uniform values (as produced by the mask results of control flow intrinsics) | ||||||
12252 | // used outside of divergent blocks. The phi users need to also be treated as | ||||||
12253 | // always uniform. | ||||||
12254 | static bool hasCFUser(const Value *V, SmallPtrSet<const Value *, 16> &Visited, | ||||||
12255 | unsigned WaveSize) { | ||||||
12256 | // FIXME: We asssume we never cast the mask results of a control flow | ||||||
12257 | // intrinsic. | ||||||
12258 | // Early exit if the type won't be consistent as a compile time hack. | ||||||
12259 | IntegerType *IT = dyn_cast<IntegerType>(V->getType()); | ||||||
12260 | if (!IT || IT->getBitWidth() != WaveSize) | ||||||
12261 | return false; | ||||||
12262 | |||||||
12263 | if (!isa<Instruction>(V)) | ||||||
12264 | return false; | ||||||
12265 | if (!Visited.insert(V).second) | ||||||
12266 | return false; | ||||||
12267 | bool Result = false; | ||||||
12268 | for (auto U : V->users()) { | ||||||
12269 | if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(U)) { | ||||||
12270 | if (V == U->getOperand(1)) { | ||||||
12271 | switch (Intrinsic->getIntrinsicID()) { | ||||||
12272 | default: | ||||||
12273 | Result = false; | ||||||
12274 | break; | ||||||
12275 | case Intrinsic::amdgcn_if_break: | ||||||
12276 | case Intrinsic::amdgcn_if: | ||||||
12277 | case Intrinsic::amdgcn_else: | ||||||
12278 | Result = true; | ||||||
12279 | break; | ||||||
12280 | } | ||||||
12281 | } | ||||||
12282 | if (V == U->getOperand(0)) { | ||||||
12283 | switch (Intrinsic->getIntrinsicID()) { | ||||||
12284 | default: | ||||||
12285 | Result = false; | ||||||
12286 | break; | ||||||
12287 | case Intrinsic::amdgcn_end_cf: | ||||||
12288 | case Intrinsic::amdgcn_loop: | ||||||
12289 | Result = true; | ||||||
12290 | break; | ||||||
12291 | } | ||||||
12292 | } | ||||||
12293 | } else { | ||||||
12294 | Result = hasCFUser(U, Visited, WaveSize); | ||||||
12295 | } | ||||||
12296 | if (Result) | ||||||
12297 | break; | ||||||
12298 | } | ||||||
12299 | return Result; | ||||||
12300 | } | ||||||
12301 | |||||||
12302 | bool SITargetLowering::requiresUniformRegister(MachineFunction &MF, | ||||||
12303 | const Value *V) const { | ||||||
12304 | if (const CallInst *CI = dyn_cast<CallInst>(V)) { | ||||||
12305 | if (CI->isInlineAsm()) { | ||||||
12306 | // FIXME: This cannot give a correct answer. This should only trigger in | ||||||
12307 | // the case where inline asm returns mixed SGPR and VGPR results, used | ||||||
12308 | // outside the defining block. We don't have a specific result to | ||||||
12309 | // consider, so this assumes if any value is SGPR, the overall register | ||||||
12310 | // also needs to be SGPR. | ||||||
12311 | const SIRegisterInfo *SIRI = Subtarget->getRegisterInfo(); | ||||||
12312 | TargetLowering::AsmOperandInfoVector TargetConstraints = ParseConstraints( | ||||||
12313 | MF.getDataLayout(), Subtarget->getRegisterInfo(), *CI); | ||||||
12314 | for (auto &TC : TargetConstraints) { | ||||||
12315 | if (TC.Type == InlineAsm::isOutput) { | ||||||
12316 | ComputeConstraintToUse(TC, SDValue()); | ||||||
12317 | unsigned AssignedReg; | ||||||
12318 | const TargetRegisterClass *RC; | ||||||
12319 | std::tie(AssignedReg, RC) = getRegForInlineAsmConstraint( | ||||||
12320 | SIRI, TC.ConstraintCode, TC.ConstraintVT); | ||||||
12321 | if (RC) { | ||||||
12322 | MachineRegisterInfo &MRI = MF.getRegInfo(); | ||||||
12323 | if (AssignedReg != 0 && SIRI->isSGPRReg(MRI, AssignedReg)) | ||||||
12324 | return true; | ||||||
12325 | else if (SIRI->isSGPRClass(RC)) | ||||||
12326 | return true; | ||||||
12327 | } | ||||||
12328 | } | ||||||
12329 | } | ||||||
12330 | } | ||||||
12331 | } | ||||||
12332 | SmallPtrSet<const Value *, 16> Visited; | ||||||
12333 | return hasCFUser(V, Visited, Subtarget->getWavefrontSize()); | ||||||
12334 | } | ||||||
12335 | |||||||
12336 | std::pair<InstructionCost, MVT> | ||||||
12337 | SITargetLowering::getTypeLegalizationCost(const DataLayout &DL, | ||||||
12338 | Type *Ty) const { | ||||||
12339 | std::pair<InstructionCost, MVT> Cost = | ||||||
12340 | TargetLoweringBase::getTypeLegalizationCost(DL, Ty); | ||||||
12341 | auto Size = DL.getTypeSizeInBits(Ty); | ||||||
12342 | // Maximum load or store can handle 8 dwords for scalar and 4 for | ||||||
12343 | // vector ALU. Let's assume anything above 8 dwords is expensive | ||||||
12344 | // even if legal. | ||||||
12345 | if (Size <= 256) | ||||||
12346 | return Cost; | ||||||
12347 | |||||||
12348 | Cost.first = (Size + 255) / 256; | ||||||
12349 | return Cost; | ||||||
12350 | } |
1 | //===- llvm/Support/Casting.h - Allow flexible, checked, casts --*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the isa<X>(), cast<X>(), dyn_cast<X>(), cast_or_null<X>(), |
10 | // and dyn_cast_or_null<X>() templates. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #ifndef LLVM_SUPPORT_CASTING_H |
15 | #define LLVM_SUPPORT_CASTING_H |
16 | |
17 | #include "llvm/Support/Compiler.h" |
18 | #include "llvm/Support/type_traits.h" |
19 | #include <cassert> |
20 | #include <memory> |
21 | #include <type_traits> |
22 | |
23 | namespace llvm { |
24 | |
25 | //===----------------------------------------------------------------------===// |
26 | // isa<x> Support Templates |
27 | //===----------------------------------------------------------------------===// |
28 | |
29 | // Define a template that can be specialized by smart pointers to reflect the |
30 | // fact that they are automatically dereferenced, and are not involved with the |
31 | // template selection process... the default implementation is a noop. |
32 | // |
33 | template<typename From> struct simplify_type { |
34 | using SimpleType = From; // The real type this represents... |
35 | |
36 | // An accessor to get the real value... |
37 | static SimpleType &getSimplifiedValue(From &Val) { return Val; } |
38 | }; |
39 | |
40 | template<typename From> struct simplify_type<const From> { |
41 | using NonConstSimpleType = typename simplify_type<From>::SimpleType; |
42 | using SimpleType = |
43 | typename add_const_past_pointer<NonConstSimpleType>::type; |
44 | using RetType = |
45 | typename add_lvalue_reference_if_not_pointer<SimpleType>::type; |
46 | |
47 | static RetType getSimplifiedValue(const From& Val) { |
48 | return simplify_type<From>::getSimplifiedValue(const_cast<From&>(Val)); |
49 | } |
50 | }; |
51 | |
52 | // The core of the implementation of isa<X> is here; To and From should be |
53 | // the names of classes. This template can be specialized to customize the |
54 | // implementation of isa<> without rewriting it from scratch. |
55 | template <typename To, typename From, typename Enabler = void> |
56 | struct isa_impl { |
57 | static inline bool doit(const From &Val) { |
58 | return To::classof(&Val); |
59 | } |
60 | }; |
61 | |
62 | /// Always allow upcasts, and perform no dynamic check for them. |
63 | template <typename To, typename From> |
64 | struct isa_impl<To, From, std::enable_if_t<std::is_base_of<To, From>::value>> { |
65 | static inline bool doit(const From &) { return true; } |
66 | }; |
67 | |
68 | template <typename To, typename From> struct isa_impl_cl { |
69 | static inline bool doit(const From &Val) { |
70 | return isa_impl<To, From>::doit(Val); |
71 | } |
72 | }; |
73 | |
74 | template <typename To, typename From> struct isa_impl_cl<To, const From> { |
75 | static inline bool doit(const From &Val) { |
76 | return isa_impl<To, From>::doit(Val); |
77 | } |
78 | }; |
79 | |
80 | template <typename To, typename From> |
81 | struct isa_impl_cl<To, const std::unique_ptr<From>> { |
82 | static inline bool doit(const std::unique_ptr<From> &Val) { |
83 | assert(Val && "isa<> used on a null pointer")(static_cast <bool> (Val && "isa<> used on a null pointer" ) ? void (0) : __assert_fail ("Val && \"isa<> used on a null pointer\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/Casting.h" , 83, __extension__ __PRETTY_FUNCTION__)); |
84 | return isa_impl_cl<To, From>::doit(*Val); |
85 | } |
86 | }; |
87 | |
88 | template <typename To, typename From> struct isa_impl_cl<To, From*> { |
89 | static inline bool doit(const From *Val) { |
90 | assert(Val && "isa<> used on a null pointer")(static_cast <bool> (Val && "isa<> used on a null pointer" ) ? void (0) : __assert_fail ("Val && \"isa<> used on a null pointer\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/Casting.h" , 90, __extension__ __PRETTY_FUNCTION__)); |
91 | return isa_impl<To, From>::doit(*Val); |
92 | } |
93 | }; |
94 | |
95 | template <typename To, typename From> struct isa_impl_cl<To, From*const> { |
96 | static inline bool doit(const From *Val) { |
97 | assert(Val && "isa<> used on a null pointer")(static_cast <bool> (Val && "isa<> used on a null pointer" ) ? void (0) : __assert_fail ("Val && \"isa<> used on a null pointer\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/Casting.h" , 97, __extension__ __PRETTY_FUNCTION__)); |
98 | return isa_impl<To, From>::doit(*Val); |
99 | } |
100 | }; |
101 | |
102 | template <typename To, typename From> struct isa_impl_cl<To, const From*> { |
103 | static inline bool doit(const From *Val) { |
104 | assert(Val && "isa<> used on a null pointer")(static_cast <bool> (Val && "isa<> used on a null pointer" ) ? void (0) : __assert_fail ("Val && \"isa<> used on a null pointer\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/Casting.h" , 104, __extension__ __PRETTY_FUNCTION__)); |
105 | return isa_impl<To, From>::doit(*Val); |
106 | } |
107 | }; |
108 | |
109 | template <typename To, typename From> struct isa_impl_cl<To, const From*const> { |
110 | static inline bool doit(const From *Val) { |
111 | assert(Val && "isa<> used on a null pointer")(static_cast <bool> (Val && "isa<> used on a null pointer" ) ? void (0) : __assert_fail ("Val && \"isa<> used on a null pointer\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/Casting.h" , 111, __extension__ __PRETTY_FUNCTION__)); |
112 | return isa_impl<To, From>::doit(*Val); |
113 | } |
114 | }; |
115 | |
116 | template<typename To, typename From, typename SimpleFrom> |
117 | struct isa_impl_wrap { |
118 | // When From != SimplifiedType, we can simplify the type some more by using |
119 | // the simplify_type template. |
120 | static bool doit(const From &Val) { |
121 | return isa_impl_wrap<To, SimpleFrom, |
122 | typename simplify_type<SimpleFrom>::SimpleType>::doit( |
123 | simplify_type<const From>::getSimplifiedValue(Val)); |
124 | } |
125 | }; |
126 | |
127 | template<typename To, typename FromTy> |
128 | struct isa_impl_wrap<To, FromTy, FromTy> { |
129 | // When From == SimpleType, we are as simple as we are going to get. |
130 | static bool doit(const FromTy &Val) { |
131 | return isa_impl_cl<To,FromTy>::doit(Val); |
132 | } |
133 | }; |
134 | |
135 | // isa<X> - Return true if the parameter to the template is an instance of one |
136 | // of the template type arguments. Used like this: |
137 | // |
138 | // if (isa<Type>(myVal)) { ... } |
139 | // if (isa<Type0, Type1, Type2>(myVal)) { ... } |
140 | // |
141 | template <class X, class Y> LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isa(const Y &Val) { |
142 | return isa_impl_wrap<X, const Y, |
143 | typename simplify_type<const Y>::SimpleType>::doit(Val); |
144 | } |
145 | |
146 | template <typename First, typename Second, typename... Rest, typename Y> |
147 | LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isa(const Y &Val) { |
148 | return isa<First>(Val) || isa<Second, Rest...>(Val); |
149 | } |
150 | |
151 | // isa_and_nonnull<X> - Functionally identical to isa, except that a null value |
152 | // is accepted. |
153 | // |
154 | template <typename... X, class Y> |
155 | LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isa_and_nonnull(const Y &Val) { |
156 | if (!Val) |
157 | return false; |
158 | return isa<X...>(Val); |
159 | } |
160 | |
161 | //===----------------------------------------------------------------------===// |
162 | // cast<x> Support Templates |
163 | //===----------------------------------------------------------------------===// |
164 | |
165 | template<class To, class From> struct cast_retty; |
166 | |
167 | // Calculate what type the 'cast' function should return, based on a requested |
168 | // type of To and a source type of From. |
169 | template<class To, class From> struct cast_retty_impl { |
170 | using ret_type = To &; // Normal case, return Ty& |
171 | }; |
172 | template<class To, class From> struct cast_retty_impl<To, const From> { |
173 | using ret_type = const To &; // Normal case, return Ty& |
174 | }; |
175 | |
176 | template<class To, class From> struct cast_retty_impl<To, From*> { |
177 | using ret_type = To *; // Pointer arg case, return Ty* |
178 | }; |
179 | |
180 | template<class To, class From> struct cast_retty_impl<To, const From*> { |
181 | using ret_type = const To *; // Constant pointer arg case, return const Ty* |
182 | }; |
183 | |
184 | template<class To, class From> struct cast_retty_impl<To, const From*const> { |
185 | using ret_type = const To *; // Constant pointer arg case, return const Ty* |
186 | }; |
187 | |
188 | template <class To, class From> |
189 | struct cast_retty_impl<To, std::unique_ptr<From>> { |
190 | private: |
191 | using PointerType = typename cast_retty_impl<To, From *>::ret_type; |
192 | using ResultType = std::remove_pointer_t<PointerType>; |
193 | |
194 | public: |
195 | using ret_type = std::unique_ptr<ResultType>; |
196 | }; |
197 | |
198 | template<class To, class From, class SimpleFrom> |
199 | struct cast_retty_wrap { |
200 | // When the simplified type and the from type are not the same, use the type |
201 | // simplifier to reduce the type, then reuse cast_retty_impl to get the |
202 | // resultant type. |
203 | using ret_type = typename cast_retty<To, SimpleFrom>::ret_type; |
204 | }; |
205 | |
206 | template<class To, class FromTy> |
207 | struct cast_retty_wrap<To, FromTy, FromTy> { |
208 | // When the simplified type is equal to the from type, use it directly. |
209 | using ret_type = typename cast_retty_impl<To,FromTy>::ret_type; |
210 | }; |
211 | |
212 | template<class To, class From> |
213 | struct cast_retty { |
214 | using ret_type = typename cast_retty_wrap< |
215 | To, From, typename simplify_type<From>::SimpleType>::ret_type; |
216 | }; |
217 | |
218 | // Ensure the non-simple values are converted using the simplify_type template |
219 | // that may be specialized by smart pointers... |
220 | // |
221 | template<class To, class From, class SimpleFrom> struct cast_convert_val { |
222 | // This is not a simple type, use the template to simplify it... |
223 | static typename cast_retty<To, From>::ret_type doit(From &Val) { |
224 | return cast_convert_val<To, SimpleFrom, |
225 | typename simplify_type<SimpleFrom>::SimpleType>::doit( |
226 | simplify_type<From>::getSimplifiedValue(Val)); |
227 | } |
228 | }; |
229 | |
230 | template<class To, class FromTy> struct cast_convert_val<To,FromTy,FromTy> { |
231 | // This _is_ a simple type, just cast it. |
232 | static typename cast_retty<To, FromTy>::ret_type doit(const FromTy &Val) { |
233 | typename cast_retty<To, FromTy>::ret_type Res2 |
234 | = (typename cast_retty<To, FromTy>::ret_type)const_cast<FromTy&>(Val); |
235 | return Res2; |
236 | } |
237 | }; |
238 | |
239 | template <class X> struct is_simple_type { |
240 | static const bool value = |
241 | std::is_same<X, typename simplify_type<X>::SimpleType>::value; |
242 | }; |
243 | |
244 | // cast<X> - Return the argument parameter cast to the specified type. This |
245 | // casting operator asserts that the type is correct, so it does not return null |
246 | // on failure. It does not allow a null argument (use cast_or_null for that). |
247 | // It is typically used like this: |
248 | // |
249 | // cast<Instruction>(myVal)->getParent() |
250 | // |
251 | template <class X, class Y> |
252 | inline std::enable_if_t<!is_simple_type<Y>::value, |
253 | typename cast_retty<X, const Y>::ret_type> |
254 | cast(const Y &Val) { |
255 | assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")(static_cast <bool> (isa<X>(Val) && "cast<Ty>() argument of incompatible type!" ) ? void (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/Casting.h" , 255, __extension__ __PRETTY_FUNCTION__)); |
256 | return cast_convert_val< |
257 | X, const Y, typename simplify_type<const Y>::SimpleType>::doit(Val); |
258 | } |
259 | |
260 | template <class X, class Y> |
261 | inline typename cast_retty<X, Y>::ret_type cast(Y &Val) { |
262 | assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")(static_cast <bool> (isa<X>(Val) && "cast<Ty>() argument of incompatible type!" ) ? void (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/Casting.h" , 262, __extension__ __PRETTY_FUNCTION__)); |
263 | return cast_convert_val<X, Y, |
264 | typename simplify_type<Y>::SimpleType>::doit(Val); |
265 | } |
266 | |
267 | template <class X, class Y> |
268 | inline typename cast_retty<X, Y *>::ret_type cast(Y *Val) { |
269 | assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")(static_cast <bool> (isa<X>(Val) && "cast<Ty>() argument of incompatible type!" ) ? void (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/Casting.h" , 269, __extension__ __PRETTY_FUNCTION__)); |
270 | return cast_convert_val<X, Y*, |
271 | typename simplify_type<Y*>::SimpleType>::doit(Val); |
272 | } |
273 | |
274 | template <class X, class Y> |
275 | inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type |
276 | cast(std::unique_ptr<Y> &&Val) { |
277 | assert(isa<X>(Val.get()) && "cast<Ty>() argument of incompatible type!")(static_cast <bool> (isa<X>(Val.get()) && "cast<Ty>() argument of incompatible type!") ? void (0 ) : __assert_fail ("isa<X>(Val.get()) && \"cast<Ty>() argument of incompatible type!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/Casting.h" , 277, __extension__ __PRETTY_FUNCTION__)); |
278 | using ret_type = typename cast_retty<X, std::unique_ptr<Y>>::ret_type; |
279 | return ret_type( |
280 | cast_convert_val<X, Y *, typename simplify_type<Y *>::SimpleType>::doit( |
281 | Val.release())); |
282 | } |
283 | |
284 | // cast_or_null<X> - Functionally identical to cast, except that a null value is |
285 | // accepted. |
286 | // |
287 | template <class X, class Y> |
288 | LLVM_NODISCARD[[clang::warn_unused_result]] inline std::enable_if_t< |
289 | !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type> |
290 | cast_or_null(const Y &Val) { |
291 | if (!Val) |
292 | return nullptr; |
293 | assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")(static_cast <bool> (isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!" ) ? void (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/Casting.h" , 293, __extension__ __PRETTY_FUNCTION__)); |
294 | return cast<X>(Val); |
295 | } |
296 | |
297 | template <class X, class Y> |
298 | LLVM_NODISCARD[[clang::warn_unused_result]] inline std::enable_if_t<!is_simple_type<Y>::value, |
299 | typename cast_retty<X, Y>::ret_type> |
300 | cast_or_null(Y &Val) { |
301 | if (!Val) |
302 | return nullptr; |
303 | assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")(static_cast <bool> (isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!" ) ? void (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/Casting.h" , 303, __extension__ __PRETTY_FUNCTION__)); |
304 | return cast<X>(Val); |
305 | } |
306 | |
307 | template <class X, class Y> |
308 | LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type |
309 | cast_or_null(Y *Val) { |
310 | if (!Val) return nullptr; |
311 | assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")(static_cast <bool> (isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!" ) ? void (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/Casting.h" , 311, __extension__ __PRETTY_FUNCTION__)); |
312 | return cast<X>(Val); |
313 | } |
314 | |
315 | template <class X, class Y> |
316 | inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type |
317 | cast_or_null(std::unique_ptr<Y> &&Val) { |
318 | if (!Val) |
319 | return nullptr; |
320 | return cast<X>(std::move(Val)); |
321 | } |
322 | |
323 | // dyn_cast<X> - Return the argument parameter cast to the specified type. This |
324 | // casting operator returns null if the argument is of the wrong type, so it can |
325 | // be used to test for a type as well as cast if successful. This should be |
326 | // used in the context of an if statement like this: |
327 | // |
328 | // if (const Instruction *I = dyn_cast<Instruction>(myVal)) { ... } |
329 | // |
330 | |
331 | template <class X, class Y> |
332 | LLVM_NODISCARD[[clang::warn_unused_result]] inline std::enable_if_t< |
333 | !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type> |
334 | dyn_cast(const Y &Val) { |
335 | return isa<X>(Val) ? cast<X>(Val) : nullptr; |
336 | } |
337 | |
338 | template <class X, class Y> |
339 | LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y>::ret_type dyn_cast(Y &Val) { |
340 | return isa<X>(Val) ? cast<X>(Val) : nullptr; |
341 | } |
342 | |
343 | template <class X, class Y> |
344 | LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type dyn_cast(Y *Val) { |
345 | return isa<X>(Val) ? cast<X>(Val) : nullptr; |
346 | } |
347 | |
348 | // dyn_cast_or_null<X> - Functionally identical to dyn_cast, except that a null |
349 | // value is accepted. |
350 | // |
351 | template <class X, class Y> |
352 | LLVM_NODISCARD[[clang::warn_unused_result]] inline std::enable_if_t< |
353 | !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type> |
354 | dyn_cast_or_null(const Y &Val) { |
355 | return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr; |
356 | } |
357 | |
358 | template <class X, class Y> |
359 | LLVM_NODISCARD[[clang::warn_unused_result]] inline std::enable_if_t<!is_simple_type<Y>::value, |
360 | typename cast_retty<X, Y>::ret_type> |
361 | dyn_cast_or_null(Y &Val) { |
362 | return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr; |
363 | } |
364 | |
365 | template <class X, class Y> |
366 | LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type |
367 | dyn_cast_or_null(Y *Val) { |
368 | return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr; |
369 | } |
370 | |
371 | // unique_dyn_cast<X> - Given a unique_ptr<Y>, try to return a unique_ptr<X>, |
372 | // taking ownership of the input pointer iff isa<X>(Val) is true. If the |
373 | // cast is successful, From refers to nullptr on exit and the casted value |
374 | // is returned. If the cast is unsuccessful, the function returns nullptr |
375 | // and From is unchanged. |
376 | template <class X, class Y> |
377 | LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast(std::unique_ptr<Y> &Val) |
378 | -> decltype(cast<X>(Val)) { |
379 | if (!isa<X>(Val)) |
380 | return nullptr; |
381 | return cast<X>(std::move(Val)); |
382 | } |
383 | |
384 | template <class X, class Y> |
385 | LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast(std::unique_ptr<Y> &&Val) { |
386 | return unique_dyn_cast<X, Y>(Val); |
387 | } |
388 | |
389 | // dyn_cast_or_null<X> - Functionally identical to unique_dyn_cast, except that |
390 | // a null value is accepted. |
391 | template <class X, class Y> |
392 | LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast_or_null(std::unique_ptr<Y> &Val) |
393 | -> decltype(cast<X>(Val)) { |
394 | if (!Val) |
395 | return nullptr; |
396 | return unique_dyn_cast<X, Y>(Val); |
397 | } |
398 | |
399 | template <class X, class Y> |
400 | LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast_or_null(std::unique_ptr<Y> &&Val) { |
401 | return unique_dyn_cast_or_null<X, Y>(Val); |
402 | } |
403 | |
404 | } // end namespace llvm |
405 | |
406 | #endif // LLVM_SUPPORT_CASTING_H |
1 | //===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file declares the SDNode class and derived classes, which are used to | |||
10 | // represent the nodes and operations present in a SelectionDAG. These nodes | |||
11 | // and operations are machine code level operations, with some similarities to | |||
12 | // the GCC RTL representation. | |||
13 | // | |||
14 | // Clients should include the SelectionDAG.h file instead of this file directly. | |||
15 | // | |||
16 | //===----------------------------------------------------------------------===// | |||
17 | ||||
18 | #ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H | |||
19 | #define LLVM_CODEGEN_SELECTIONDAGNODES_H | |||
20 | ||||
21 | #include "llvm/ADT/APFloat.h" | |||
22 | #include "llvm/ADT/ArrayRef.h" | |||
23 | #include "llvm/ADT/BitVector.h" | |||
24 | #include "llvm/ADT/FoldingSet.h" | |||
25 | #include "llvm/ADT/GraphTraits.h" | |||
26 | #include "llvm/ADT/SmallPtrSet.h" | |||
27 | #include "llvm/ADT/SmallVector.h" | |||
28 | #include "llvm/ADT/ilist_node.h" | |||
29 | #include "llvm/ADT/iterator.h" | |||
30 | #include "llvm/ADT/iterator_range.h" | |||
31 | #include "llvm/CodeGen/ISDOpcodes.h" | |||
32 | #include "llvm/CodeGen/MachineMemOperand.h" | |||
33 | #include "llvm/CodeGen/Register.h" | |||
34 | #include "llvm/CodeGen/ValueTypes.h" | |||
35 | #include "llvm/IR/Constants.h" | |||
36 | #include "llvm/IR/DebugLoc.h" | |||
37 | #include "llvm/IR/Instruction.h" | |||
38 | #include "llvm/IR/Instructions.h" | |||
39 | #include "llvm/IR/Metadata.h" | |||
40 | #include "llvm/IR/Operator.h" | |||
41 | #include "llvm/Support/AlignOf.h" | |||
42 | #include "llvm/Support/AtomicOrdering.h" | |||
43 | #include "llvm/Support/Casting.h" | |||
44 | #include "llvm/Support/ErrorHandling.h" | |||
45 | #include "llvm/Support/MachineValueType.h" | |||
46 | #include "llvm/Support/TypeSize.h" | |||
47 | #include <algorithm> | |||
48 | #include <cassert> | |||
49 | #include <climits> | |||
50 | #include <cstddef> | |||
51 | #include <cstdint> | |||
52 | #include <cstring> | |||
53 | #include <iterator> | |||
54 | #include <string> | |||
55 | #include <tuple> | |||
56 | ||||
57 | namespace llvm { | |||
58 | ||||
59 | class APInt; | |||
60 | class Constant; | |||
61 | template <typename T> struct DenseMapInfo; | |||
62 | class GlobalValue; | |||
63 | class MachineBasicBlock; | |||
64 | class MachineConstantPoolValue; | |||
65 | class MCSymbol; | |||
66 | class raw_ostream; | |||
67 | class SDNode; | |||
68 | class SelectionDAG; | |||
69 | class Type; | |||
70 | class Value; | |||
71 | ||||
72 | void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr, | |||
73 | bool force = false); | |||
74 | ||||
75 | /// This represents a list of ValueType's that has been intern'd by | |||
76 | /// a SelectionDAG. Instances of this simple value class are returned by | |||
77 | /// SelectionDAG::getVTList(...). | |||
78 | /// | |||
79 | struct SDVTList { | |||
80 | const EVT *VTs; | |||
81 | unsigned int NumVTs; | |||
82 | }; | |||
83 | ||||
84 | namespace ISD { | |||
85 | ||||
86 | /// Node predicates | |||
87 | ||||
88 | /// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the | |||
89 | /// same constant or undefined, return true and return the constant value in | |||
90 | /// \p SplatValue. | |||
91 | bool isConstantSplatVector(const SDNode *N, APInt &SplatValue); | |||
92 | ||||
93 | /// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where | |||
94 | /// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to | |||
95 | /// true, it only checks BUILD_VECTOR. | |||
96 | bool isConstantSplatVectorAllOnes(const SDNode *N, | |||
97 | bool BuildVectorOnly = false); | |||
98 | ||||
99 | /// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where | |||
100 | /// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it | |||
101 | /// only checks BUILD_VECTOR. | |||
102 | bool isConstantSplatVectorAllZeros(const SDNode *N, | |||
103 | bool BuildVectorOnly = false); | |||
104 | ||||
105 | /// Return true if the specified node is a BUILD_VECTOR where all of the | |||
106 | /// elements are ~0 or undef. | |||
107 | bool isBuildVectorAllOnes(const SDNode *N); | |||
108 | ||||
109 | /// Return true if the specified node is a BUILD_VECTOR where all of the | |||
110 | /// elements are 0 or undef. | |||
111 | bool isBuildVectorAllZeros(const SDNode *N); | |||
112 | ||||
113 | /// Return true if the specified node is a BUILD_VECTOR node of all | |||
114 | /// ConstantSDNode or undef. | |||
115 | bool isBuildVectorOfConstantSDNodes(const SDNode *N); | |||
116 | ||||
117 | /// Return true if the specified node is a BUILD_VECTOR node of all | |||
118 | /// ConstantFPSDNode or undef. | |||
119 | bool isBuildVectorOfConstantFPSDNodes(const SDNode *N); | |||
120 | ||||
121 | /// Return true if the node has at least one operand and all operands of the | |||
122 | /// specified node are ISD::UNDEF. | |||
123 | bool allOperandsUndef(const SDNode *N); | |||
124 | ||||
125 | } // end namespace ISD | |||
126 | ||||
127 | //===----------------------------------------------------------------------===// | |||
128 | /// Unlike LLVM values, Selection DAG nodes may return multiple | |||
129 | /// values as the result of a computation. Many nodes return multiple values, | |||
130 | /// from loads (which define a token and a return value) to ADDC (which returns | |||
131 | /// a result and a carry value), to calls (which may return an arbitrary number | |||
132 | /// of values). | |||
133 | /// | |||
134 | /// As such, each use of a SelectionDAG computation must indicate the node that | |||
135 | /// computes it as well as which return value to use from that node. This pair | |||
136 | /// of information is represented with the SDValue value type. | |||
137 | /// | |||
138 | class SDValue { | |||
139 | friend struct DenseMapInfo<SDValue>; | |||
140 | ||||
141 | SDNode *Node = nullptr; // The node defining the value we are using. | |||
142 | unsigned ResNo = 0; // Which return value of the node we are using. | |||
143 | ||||
144 | public: | |||
145 | SDValue() = default; | |||
146 | SDValue(SDNode *node, unsigned resno); | |||
147 | ||||
148 | /// get the index which selects a specific result in the SDNode | |||
149 | unsigned getResNo() const { return ResNo; } | |||
150 | ||||
151 | /// get the SDNode which holds the desired result | |||
152 | SDNode *getNode() const { return Node; } | |||
153 | ||||
154 | /// set the SDNode | |||
155 | void setNode(SDNode *N) { Node = N; } | |||
156 | ||||
157 | inline SDNode *operator->() const { return Node; } | |||
158 | ||||
159 | bool operator==(const SDValue &O) const { | |||
160 | return Node == O.Node && ResNo == O.ResNo; | |||
161 | } | |||
162 | bool operator!=(const SDValue &O) const { | |||
163 | return !operator==(O); | |||
164 | } | |||
165 | bool operator<(const SDValue &O) const { | |||
166 | return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo); | |||
167 | } | |||
168 | explicit operator bool() const { | |||
169 | return Node != nullptr; | |||
170 | } | |||
171 | ||||
172 | SDValue getValue(unsigned R) const { | |||
173 | return SDValue(Node, R); | |||
174 | } | |||
175 | ||||
176 | /// Return true if this node is an operand of N. | |||
177 | bool isOperandOf(const SDNode *N) const; | |||
178 | ||||
179 | /// Return the ValueType of the referenced return value. | |||
180 | inline EVT getValueType() const; | |||
181 | ||||
182 | /// Return the simple ValueType of the referenced return value. | |||
183 | MVT getSimpleValueType() const { | |||
184 | return getValueType().getSimpleVT(); | |||
185 | } | |||
186 | ||||
187 | /// Returns the size of the value in bits. | |||
188 | /// | |||
189 | /// If the value type is a scalable vector type, the scalable property will | |||
190 | /// be set and the runtime size will be a positive integer multiple of the | |||
191 | /// base size. | |||
192 | TypeSize getValueSizeInBits() const { | |||
193 | return getValueType().getSizeInBits(); | |||
194 | } | |||
195 | ||||
196 | uint64_t getScalarValueSizeInBits() const { | |||
197 | return getValueType().getScalarType().getFixedSizeInBits(); | |||
198 | } | |||
199 | ||||
200 | // Forwarding methods - These forward to the corresponding methods in SDNode. | |||
201 | inline unsigned getOpcode() const; | |||
202 | inline unsigned getNumOperands() const; | |||
203 | inline const SDValue &getOperand(unsigned i) const; | |||
204 | inline uint64_t getConstantOperandVal(unsigned i) const; | |||
205 | inline const APInt &getConstantOperandAPInt(unsigned i) const; | |||
206 | inline bool isTargetMemoryOpcode() const; | |||
207 | inline bool isTargetOpcode() const; | |||
208 | inline bool isMachineOpcode() const; | |||
209 | inline bool isUndef() const; | |||
210 | inline unsigned getMachineOpcode() const; | |||
211 | inline const DebugLoc &getDebugLoc() const; | |||
212 | inline void dump() const; | |||
213 | inline void dump(const SelectionDAG *G) const; | |||
214 | inline void dumpr() const; | |||
215 | inline void dumpr(const SelectionDAG *G) const; | |||
216 | ||||
217 | /// Return true if this operand (which must be a chain) reaches the | |||
218 | /// specified operand without crossing any side-effecting instructions. | |||
219 | /// In practice, this looks through token factors and non-volatile loads. | |||
220 | /// In order to remain efficient, this only | |||
221 | /// looks a couple of nodes in, it does not do an exhaustive search. | |||
222 | bool reachesChainWithoutSideEffects(SDValue Dest, | |||
223 | unsigned Depth = 2) const; | |||
224 | ||||
225 | /// Return true if there are no nodes using value ResNo of Node. | |||
226 | inline bool use_empty() const; | |||
227 | ||||
228 | /// Return true if there is exactly one node using value ResNo of Node. | |||
229 | inline bool hasOneUse() const; | |||
230 | }; | |||
231 | ||||
232 | template<> struct DenseMapInfo<SDValue> { | |||
233 | static inline SDValue getEmptyKey() { | |||
234 | SDValue V; | |||
235 | V.ResNo = -1U; | |||
236 | return V; | |||
237 | } | |||
238 | ||||
239 | static inline SDValue getTombstoneKey() { | |||
240 | SDValue V; | |||
241 | V.ResNo = -2U; | |||
242 | return V; | |||
243 | } | |||
244 | ||||
245 | static unsigned getHashValue(const SDValue &Val) { | |||
246 | return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^ | |||
247 | (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo(); | |||
248 | } | |||
249 | ||||
250 | static bool isEqual(const SDValue &LHS, const SDValue &RHS) { | |||
251 | return LHS == RHS; | |||
252 | } | |||
253 | }; | |||
254 | ||||
255 | /// Allow casting operators to work directly on | |||
256 | /// SDValues as if they were SDNode*'s. | |||
257 | template<> struct simplify_type<SDValue> { | |||
258 | using SimpleType = SDNode *; | |||
259 | ||||
260 | static SimpleType getSimplifiedValue(SDValue &Val) { | |||
261 | return Val.getNode(); | |||
262 | } | |||
263 | }; | |||
264 | template<> struct simplify_type<const SDValue> { | |||
265 | using SimpleType = /*const*/ SDNode *; | |||
266 | ||||
267 | static SimpleType getSimplifiedValue(const SDValue &Val) { | |||
268 | return Val.getNode(); | |||
269 | } | |||
270 | }; | |||
271 | ||||
272 | /// Represents a use of a SDNode. This class holds an SDValue, | |||
273 | /// which records the SDNode being used and the result number, a | |||
274 | /// pointer to the SDNode using the value, and Next and Prev pointers, | |||
275 | /// which link together all the uses of an SDNode. | |||
276 | /// | |||
277 | class SDUse { | |||
278 | /// Val - The value being used. | |||
279 | SDValue Val; | |||
280 | /// User - The user of this value. | |||
281 | SDNode *User = nullptr; | |||
282 | /// Prev, Next - Pointers to the uses list of the SDNode referred by | |||
283 | /// this operand. | |||
284 | SDUse **Prev = nullptr; | |||
285 | SDUse *Next = nullptr; | |||
286 | ||||
287 | public: | |||
288 | SDUse() = default; | |||
289 | SDUse(const SDUse &U) = delete; | |||
290 | SDUse &operator=(const SDUse &) = delete; | |||
291 | ||||
292 | /// Normally SDUse will just implicitly convert to an SDValue that it holds. | |||
293 | operator const SDValue&() const { return Val; } | |||
294 | ||||
295 | /// If implicit conversion to SDValue doesn't work, the get() method returns | |||
296 | /// the SDValue. | |||
297 | const SDValue &get() const { return Val; } | |||
298 | ||||
299 | /// This returns the SDNode that contains this Use. | |||
300 | SDNode *getUser() { return User; } | |||
301 | ||||
302 | /// Get the next SDUse in the use list. | |||
303 | SDUse *getNext() const { return Next; } | |||
304 | ||||
305 | /// Convenience function for get().getNode(). | |||
306 | SDNode *getNode() const { return Val.getNode(); } | |||
307 | /// Convenience function for get().getResNo(). | |||
308 | unsigned getResNo() const { return Val.getResNo(); } | |||
309 | /// Convenience function for get().getValueType(). | |||
310 | EVT getValueType() const { return Val.getValueType(); } | |||
311 | ||||
312 | /// Convenience function for get().operator== | |||
313 | bool operator==(const SDValue &V) const { | |||
314 | return Val == V; | |||
315 | } | |||
316 | ||||
317 | /// Convenience function for get().operator!= | |||
318 | bool operator!=(const SDValue &V) const { | |||
319 | return Val != V; | |||
320 | } | |||
321 | ||||
322 | /// Convenience function for get().operator< | |||
323 | bool operator<(const SDValue &V) const { | |||
324 | return Val < V; | |||
325 | } | |||
326 | ||||
327 | private: | |||
328 | friend class SelectionDAG; | |||
329 | friend class SDNode; | |||
330 | // TODO: unfriend HandleSDNode once we fix its operand handling. | |||
331 | friend class HandleSDNode; | |||
332 | ||||
333 | void setUser(SDNode *p) { User = p; } | |||
334 | ||||
335 | /// Remove this use from its existing use list, assign it the | |||
336 | /// given value, and add it to the new value's node's use list. | |||
337 | inline void set(const SDValue &V); | |||
338 | /// Like set, but only supports initializing a newly-allocated | |||
339 | /// SDUse with a non-null value. | |||
340 | inline void setInitial(const SDValue &V); | |||
341 | /// Like set, but only sets the Node portion of the value, | |||
342 | /// leaving the ResNo portion unmodified. | |||
343 | inline void setNode(SDNode *N); | |||
344 | ||||
345 | void addToList(SDUse **List) { | |||
346 | Next = *List; | |||
347 | if (Next) Next->Prev = &Next; | |||
348 | Prev = List; | |||
349 | *List = this; | |||
350 | } | |||
351 | ||||
352 | void removeFromList() { | |||
353 | *Prev = Next; | |||
354 | if (Next) Next->Prev = Prev; | |||
355 | } | |||
356 | }; | |||
357 | ||||
358 | /// simplify_type specializations - Allow casting operators to work directly on | |||
359 | /// SDValues as if they were SDNode*'s. | |||
360 | template<> struct simplify_type<SDUse> { | |||
361 | using SimpleType = SDNode *; | |||
362 | ||||
363 | static SimpleType getSimplifiedValue(SDUse &Val) { | |||
364 | return Val.getNode(); | |||
365 | } | |||
366 | }; | |||
367 | ||||
368 | /// These are IR-level optimization flags that may be propagated to SDNodes. | |||
369 | /// TODO: This data structure should be shared by the IR optimizer and the | |||
370 | /// the backend. | |||
371 | struct SDNodeFlags { | |||
372 | private: | |||
373 | bool NoUnsignedWrap : 1; | |||
374 | bool NoSignedWrap : 1; | |||
375 | bool Exact : 1; | |||
376 | bool NoNaNs : 1; | |||
377 | bool NoInfs : 1; | |||
378 | bool NoSignedZeros : 1; | |||
379 | bool AllowReciprocal : 1; | |||
380 | bool AllowContract : 1; | |||
381 | bool ApproximateFuncs : 1; | |||
382 | bool AllowReassociation : 1; | |||
383 | ||||
384 | // We assume instructions do not raise floating-point exceptions by default, | |||
385 | // and only those marked explicitly may do so. We could choose to represent | |||
386 | // this via a positive "FPExcept" flags like on the MI level, but having a | |||
387 | // negative "NoFPExcept" flag here (that defaults to true) makes the flag | |||
388 | // intersection logic more straightforward. | |||
389 | bool NoFPExcept : 1; | |||
390 | ||||
391 | public: | |||
392 | /// Default constructor turns off all optimization flags. | |||
393 | SDNodeFlags() | |||
394 | : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false), | |||
395 | NoInfs(false), NoSignedZeros(false), AllowReciprocal(false), | |||
396 | AllowContract(false), ApproximateFuncs(false), | |||
397 | AllowReassociation(false), NoFPExcept(false) {} | |||
398 | ||||
399 | /// Propagate the fast-math-flags from an IR FPMathOperator. | |||
400 | void copyFMF(const FPMathOperator &FPMO) { | |||
401 | setNoNaNs(FPMO.hasNoNaNs()); | |||
402 | setNoInfs(FPMO.hasNoInfs()); | |||
403 | setNoSignedZeros(FPMO.hasNoSignedZeros()); | |||
404 | setAllowReciprocal(FPMO.hasAllowReciprocal()); | |||
405 | setAllowContract(FPMO.hasAllowContract()); | |||
406 | setApproximateFuncs(FPMO.hasApproxFunc()); | |||
407 | setAllowReassociation(FPMO.hasAllowReassoc()); | |||
408 | } | |||
409 | ||||
410 | // These are mutators for each flag. | |||
411 | void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; } | |||
412 | void setNoSignedWrap(bool b) { NoSignedWrap = b; } | |||
413 | void setExact(bool b) { Exact = b; } | |||
414 | void setNoNaNs(bool b) { NoNaNs = b; } | |||
415 | void setNoInfs(bool b) { NoInfs = b; } | |||
416 | void setNoSignedZeros(bool b) { NoSignedZeros = b; } | |||
417 | void setAllowReciprocal(bool b) { AllowReciprocal = b; } | |||
418 | void setAllowContract(bool b) { AllowContract = b; } | |||
419 | void setApproximateFuncs(bool b) { ApproximateFuncs = b; } | |||
420 | void setAllowReassociation(bool b) { AllowReassociation = b; } | |||
421 | void setNoFPExcept(bool b) { NoFPExcept = b; } | |||
422 | ||||
423 | // These are accessors for each flag. | |||
424 | bool hasNoUnsignedWrap() const { return NoUnsignedWrap; } | |||
425 | bool hasNoSignedWrap() const { return NoSignedWrap; } | |||
426 | bool hasExact() const { return Exact; } | |||
427 | bool hasNoNaNs() const { return NoNaNs; } | |||
428 | bool hasNoInfs() const { return NoInfs; } | |||
429 | bool hasNoSignedZeros() const { return NoSignedZeros; } | |||
430 | bool hasAllowReciprocal() const { return AllowReciprocal; } | |||
431 | bool hasAllowContract() const { return AllowContract; } | |||
432 | bool hasApproximateFuncs() const { return ApproximateFuncs; } | |||
433 | bool hasAllowReassociation() const { return AllowReassociation; } | |||
434 | bool hasNoFPExcept() const { return NoFPExcept; } | |||
435 | ||||
436 | /// Clear any flags in this flag set that aren't also set in Flags. All | |||
437 | /// flags will be cleared if Flags are undefined. | |||
438 | void intersectWith(const SDNodeFlags Flags) { | |||
439 | NoUnsignedWrap &= Flags.NoUnsignedWrap; | |||
440 | NoSignedWrap &= Flags.NoSignedWrap; | |||
441 | Exact &= Flags.Exact; | |||
442 | NoNaNs &= Flags.NoNaNs; | |||
443 | NoInfs &= Flags.NoInfs; | |||
444 | NoSignedZeros &= Flags.NoSignedZeros; | |||
445 | AllowReciprocal &= Flags.AllowReciprocal; | |||
446 | AllowContract &= Flags.AllowContract; | |||
447 | ApproximateFuncs &= Flags.ApproximateFuncs; | |||
448 | AllowReassociation &= Flags.AllowReassociation; | |||
449 | NoFPExcept &= Flags.NoFPExcept; | |||
450 | } | |||
451 | }; | |||
452 | ||||
453 | /// Represents one node in the SelectionDAG. | |||
454 | /// | |||
455 | class SDNode : public FoldingSetNode, public ilist_node<SDNode> { | |||
456 | private: | |||
457 | /// The operation that this node performs. | |||
458 | int16_t NodeType; | |||
459 | ||||
460 | protected: | |||
461 | // We define a set of mini-helper classes to help us interpret the bits in our | |||
462 | // SubclassData. These are designed to fit within a uint16_t so they pack | |||
463 | // with NodeType. | |||
464 | ||||
465 | #if defined(_AIX) && (!defined(__GNUC__4) || defined(__clang__1)) | |||
466 | // Except for GCC; by default, AIX compilers store bit-fields in 4-byte words | |||
467 | // and give the `pack` pragma push semantics. | |||
468 | #define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2) | |||
469 | #define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop) | |||
470 | #else | |||
471 | #define BEGIN_TWO_BYTE_PACK() | |||
472 | #define END_TWO_BYTE_PACK() | |||
473 | #endif | |||
474 | ||||
475 | BEGIN_TWO_BYTE_PACK() | |||
476 | class SDNodeBitfields { | |||
477 | friend class SDNode; | |||
478 | friend class MemIntrinsicSDNode; | |||
479 | friend class MemSDNode; | |||
480 | friend class SelectionDAG; | |||
481 | ||||
482 | uint16_t HasDebugValue : 1; | |||
483 | uint16_t IsMemIntrinsic : 1; | |||
484 | uint16_t IsDivergent : 1; | |||
485 | }; | |||
486 | enum { NumSDNodeBits = 3 }; | |||
487 | ||||
488 | class ConstantSDNodeBitfields { | |||
489 | friend class ConstantSDNode; | |||
490 | ||||
491 | uint16_t : NumSDNodeBits; | |||
492 | ||||
493 | uint16_t IsOpaque : 1; | |||
494 | }; | |||
495 | ||||
496 | class MemSDNodeBitfields { | |||
497 | friend class MemSDNode; | |||
498 | friend class MemIntrinsicSDNode; | |||
499 | friend class AtomicSDNode; | |||
500 | ||||
501 | uint16_t : NumSDNodeBits; | |||
502 | ||||
503 | uint16_t IsVolatile : 1; | |||
504 | uint16_t IsNonTemporal : 1; | |||
505 | uint16_t IsDereferenceable : 1; | |||
506 | uint16_t IsInvariant : 1; | |||
507 | }; | |||
508 | enum { NumMemSDNodeBits = NumSDNodeBits + 4 }; | |||
509 | ||||
510 | class LSBaseSDNodeBitfields { | |||
511 | friend class LSBaseSDNode; | |||
512 | friend class MaskedLoadStoreSDNode; | |||
513 | friend class MaskedGatherScatterSDNode; | |||
514 | ||||
515 | uint16_t : NumMemSDNodeBits; | |||
516 | ||||
517 | // This storage is shared between disparate class hierarchies to hold an | |||
518 | // enumeration specific to the class hierarchy in use. | |||
519 | // LSBaseSDNode => enum ISD::MemIndexedMode | |||
520 | // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode | |||
521 | // MaskedGatherScatterSDNode => enum ISD::MemIndexType | |||
522 | uint16_t AddressingMode : 3; | |||
523 | }; | |||
524 | enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 }; | |||
525 | ||||
526 | class LoadSDNodeBitfields { | |||
527 | friend class LoadSDNode; | |||
528 | friend class MaskedLoadSDNode; | |||
529 | friend class MaskedGatherSDNode; | |||
530 | ||||
531 | uint16_t : NumLSBaseSDNodeBits; | |||
532 | ||||
533 | uint16_t ExtTy : 2; // enum ISD::LoadExtType | |||
534 | uint16_t IsExpanding : 1; | |||
535 | }; | |||
536 | ||||
537 | class StoreSDNodeBitfields { | |||
538 | friend class StoreSDNode; | |||
539 | friend class MaskedStoreSDNode; | |||
540 | friend class MaskedScatterSDNode; | |||
541 | ||||
542 | uint16_t : NumLSBaseSDNodeBits; | |||
543 | ||||
544 | uint16_t IsTruncating : 1; | |||
545 | uint16_t IsCompressing : 1; | |||
546 | }; | |||
547 | ||||
548 | union { | |||
549 | char RawSDNodeBits[sizeof(uint16_t)]; | |||
550 | SDNodeBitfields SDNodeBits; | |||
551 | ConstantSDNodeBitfields ConstantSDNodeBits; | |||
552 | MemSDNodeBitfields MemSDNodeBits; | |||
553 | LSBaseSDNodeBitfields LSBaseSDNodeBits; | |||
554 | LoadSDNodeBitfields LoadSDNodeBits; | |||
555 | StoreSDNodeBitfields StoreSDNodeBits; | |||
556 | }; | |||
557 | END_TWO_BYTE_PACK() | |||
558 | #undef BEGIN_TWO_BYTE_PACK | |||
559 | #undef END_TWO_BYTE_PACK | |||
560 | ||||
561 | // RawSDNodeBits must cover the entirety of the union. This means that all of | |||
562 | // the union's members must have size <= RawSDNodeBits. We write the RHS as | |||
563 | // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter. | |||
564 | static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide"); | |||
565 | static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide"); | |||
566 | static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide"); | |||
567 | static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide"); | |||
568 | static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide"); | |||
569 | static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide"); | |||
570 | ||||
571 | private: | |||
572 | friend class SelectionDAG; | |||
573 | // TODO: unfriend HandleSDNode once we fix its operand handling. | |||
574 | friend class HandleSDNode; | |||
575 | ||||
576 | /// Unique id per SDNode in the DAG. | |||
577 | int NodeId = -1; | |||
578 | ||||
579 | /// The values that are used by this operation. | |||
580 | SDUse *OperandList = nullptr; | |||
581 | ||||
582 | /// The types of the values this node defines. SDNode's may | |||
583 | /// define multiple values simultaneously. | |||
584 | const EVT *ValueList; | |||
585 | ||||
586 | /// List of uses for this SDNode. | |||
587 | SDUse *UseList = nullptr; | |||
588 | ||||
589 | /// The number of entries in the Operand/Value list. | |||
590 | unsigned short NumOperands = 0; | |||
591 | unsigned short NumValues; | |||
592 | ||||
593 | // The ordering of the SDNodes. It roughly corresponds to the ordering of the | |||
594 | // original LLVM instructions. | |||
595 | // This is used for turning off scheduling, because we'll forgo | |||
596 | // the normal scheduling algorithms and output the instructions according to | |||
597 | // this ordering. | |||
598 | unsigned IROrder; | |||
599 | ||||
600 | /// Source line information. | |||
601 | DebugLoc debugLoc; | |||
602 | ||||
603 | /// Return a pointer to the specified value type. | |||
604 | static const EVT *getValueTypeList(EVT VT); | |||
605 | ||||
606 | SDNodeFlags Flags; | |||
607 | ||||
608 | public: | |||
609 | /// Unique and persistent id per SDNode in the DAG. | |||
610 | /// Used for debug printing. | |||
611 | uint16_t PersistentId; | |||
612 | ||||
613 | //===--------------------------------------------------------------------===// | |||
614 | // Accessors | |||
615 | // | |||
616 | ||||
617 | /// Return the SelectionDAG opcode value for this node. For | |||
618 | /// pre-isel nodes (those for which isMachineOpcode returns false), these | |||
619 | /// are the opcode values in the ISD and <target>ISD namespaces. For | |||
620 | /// post-isel opcodes, see getMachineOpcode. | |||
621 | unsigned getOpcode() const { return (unsigned short)NodeType; } | |||
622 | ||||
623 | /// Test if this node has a target-specific opcode (in the | |||
624 | /// \<target\>ISD namespace). | |||
625 | bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; } | |||
626 | ||||
627 | /// Test if this node has a target-specific opcode that may raise | |||
628 | /// FP exceptions (in the \<target\>ISD namespace and greater than | |||
629 | /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory | |||
630 | /// opcode are currently automatically considered to possibly raise | |||
631 | /// FP exceptions as well. | |||
632 | bool isTargetStrictFPOpcode() const { | |||
633 | return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE; | |||
634 | } | |||
635 | ||||
636 | /// Test if this node has a target-specific | |||
637 | /// memory-referencing opcode (in the \<target\>ISD namespace and | |||
638 | /// greater than FIRST_TARGET_MEMORY_OPCODE). | |||
639 | bool isTargetMemoryOpcode() const { | |||
640 | return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE; | |||
641 | } | |||
642 | ||||
643 | /// Return true if the type of the node type undefined. | |||
644 | bool isUndef() const { return NodeType == ISD::UNDEF; } | |||
645 | ||||
646 | /// Test if this node is a memory intrinsic (with valid pointer information). | |||
647 | /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for | |||
648 | /// non-memory intrinsics (with chains) that are not really instances of | |||
649 | /// MemSDNode. For such nodes, we need some extra state to determine the | |||
650 | /// proper classof relationship. | |||
651 | bool isMemIntrinsic() const { | |||
652 | return (NodeType == ISD::INTRINSIC_W_CHAIN || | |||
653 | NodeType == ISD::INTRINSIC_VOID) && | |||
654 | SDNodeBits.IsMemIntrinsic; | |||
655 | } | |||
656 | ||||
657 | /// Test if this node is a strict floating point pseudo-op. | |||
658 | bool isStrictFPOpcode() { | |||
659 | switch (NodeType) { | |||
660 | default: | |||
661 | return false; | |||
662 | case ISD::STRICT_FP16_TO_FP: | |||
663 | case ISD::STRICT_FP_TO_FP16: | |||
664 | #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ | |||
665 | case ISD::STRICT_##DAGN: | |||
666 | #include "llvm/IR/ConstrainedOps.def" | |||
667 | return true; | |||
668 | } | |||
669 | } | |||
670 | ||||
671 | /// Test if this node has a post-isel opcode, directly | |||
672 | /// corresponding to a MachineInstr opcode. | |||
673 | bool isMachineOpcode() const { return NodeType < 0; } | |||
674 | ||||
675 | /// This may only be called if isMachineOpcode returns | |||
676 | /// true. It returns the MachineInstr opcode value that the node's opcode | |||
677 | /// corresponds to. | |||
678 | unsigned getMachineOpcode() const { | |||
679 | assert(isMachineOpcode() && "Not a MachineInstr opcode!")(static_cast <bool> (isMachineOpcode() && "Not a MachineInstr opcode!" ) ? void (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 679, __extension__ __PRETTY_FUNCTION__)); | |||
680 | return ~NodeType; | |||
681 | } | |||
682 | ||||
683 | bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; } | |||
684 | void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; } | |||
685 | ||||
686 | bool isDivergent() const { return SDNodeBits.IsDivergent; } | |||
687 | ||||
688 | /// Return true if there are no uses of this node. | |||
689 | bool use_empty() const { return UseList == nullptr; } | |||
690 | ||||
691 | /// Return true if there is exactly one use of this node. | |||
692 | bool hasOneUse() const { return hasSingleElement(uses()); } | |||
693 | ||||
694 | /// Return the number of uses of this node. This method takes | |||
695 | /// time proportional to the number of uses. | |||
696 | size_t use_size() const { return std::distance(use_begin(), use_end()); } | |||
697 | ||||
698 | /// Return the unique node id. | |||
699 | int getNodeId() const { return NodeId; } | |||
700 | ||||
701 | /// Set unique node id. | |||
702 | void setNodeId(int Id) { NodeId = Id; } | |||
703 | ||||
704 | /// Return the node ordering. | |||
705 | unsigned getIROrder() const { return IROrder; } | |||
706 | ||||
707 | /// Set the node ordering. | |||
708 | void setIROrder(unsigned Order) { IROrder = Order; } | |||
709 | ||||
710 | /// Return the source location info. | |||
711 | const DebugLoc &getDebugLoc() const { return debugLoc; } | |||
712 | ||||
713 | /// Set source location info. Try to avoid this, putting | |||
714 | /// it in the constructor is preferable. | |||
715 | void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); } | |||
716 | ||||
717 | /// This class provides iterator support for SDUse | |||
718 | /// operands that use a specific SDNode. | |||
719 | class use_iterator { | |||
720 | friend class SDNode; | |||
721 | ||||
722 | SDUse *Op = nullptr; | |||
723 | ||||
724 | explicit use_iterator(SDUse *op) : Op(op) {} | |||
725 | ||||
726 | public: | |||
727 | using iterator_category = std::forward_iterator_tag; | |||
728 | using value_type = SDUse; | |||
729 | using difference_type = std::ptrdiff_t; | |||
730 | using pointer = value_type *; | |||
731 | using reference = value_type &; | |||
732 | ||||
733 | use_iterator() = default; | |||
734 | use_iterator(const use_iterator &I) : Op(I.Op) {} | |||
735 | ||||
736 | bool operator==(const use_iterator &x) const { | |||
737 | return Op == x.Op; | |||
738 | } | |||
739 | bool operator!=(const use_iterator &x) const { | |||
740 | return !operator==(x); | |||
741 | } | |||
742 | ||||
743 | /// Return true if this iterator is at the end of uses list. | |||
744 | bool atEnd() const { return Op == nullptr; } | |||
745 | ||||
746 | // Iterator traversal: forward iteration only. | |||
747 | use_iterator &operator++() { // Preincrement | |||
748 | assert(Op && "Cannot increment end iterator!")(static_cast <bool> (Op && "Cannot increment end iterator!" ) ? void (0) : __assert_fail ("Op && \"Cannot increment end iterator!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 748, __extension__ __PRETTY_FUNCTION__)); | |||
749 | Op = Op->getNext(); | |||
750 | return *this; | |||
751 | } | |||
752 | ||||
753 | use_iterator operator++(int) { // Postincrement | |||
754 | use_iterator tmp = *this; ++*this; return tmp; | |||
755 | } | |||
756 | ||||
757 | /// Retrieve a pointer to the current user node. | |||
758 | SDNode *operator*() const { | |||
759 | assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!" ) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 759, __extension__ __PRETTY_FUNCTION__)); | |||
760 | return Op->getUser(); | |||
761 | } | |||
762 | ||||
763 | SDNode *operator->() const { return operator*(); } | |||
764 | ||||
765 | SDUse &getUse() const { return *Op; } | |||
766 | ||||
767 | /// Retrieve the operand # of this use in its user. | |||
768 | unsigned getOperandNo() const { | |||
769 | assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!" ) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 769, __extension__ __PRETTY_FUNCTION__)); | |||
770 | return (unsigned)(Op - Op->getUser()->OperandList); | |||
771 | } | |||
772 | }; | |||
773 | ||||
774 | /// Provide iteration support to walk over all uses of an SDNode. | |||
775 | use_iterator use_begin() const { | |||
776 | return use_iterator(UseList); | |||
777 | } | |||
778 | ||||
779 | static use_iterator use_end() { return use_iterator(nullptr); } | |||
780 | ||||
781 | inline iterator_range<use_iterator> uses() { | |||
782 | return make_range(use_begin(), use_end()); | |||
783 | } | |||
784 | inline iterator_range<use_iterator> uses() const { | |||
785 | return make_range(use_begin(), use_end()); | |||
786 | } | |||
787 | ||||
788 | /// Return true if there are exactly NUSES uses of the indicated value. | |||
789 | /// This method ignores uses of other values defined by this operation. | |||
790 | bool hasNUsesOfValue(unsigned NUses, unsigned Value) const; | |||
791 | ||||
792 | /// Return true if there are any use of the indicated value. | |||
793 | /// This method ignores uses of other values defined by this operation. | |||
794 | bool hasAnyUseOfValue(unsigned Value) const; | |||
795 | ||||
796 | /// Return true if this node is the only use of N. | |||
797 | bool isOnlyUserOf(const SDNode *N) const; | |||
798 | ||||
799 | /// Return true if this node is an operand of N. | |||
800 | bool isOperandOf(const SDNode *N) const; | |||
801 | ||||
802 | /// Return true if this node is a predecessor of N. | |||
803 | /// NOTE: Implemented on top of hasPredecessor and every bit as | |||
804 | /// expensive. Use carefully. | |||
805 | bool isPredecessorOf(const SDNode *N) const { | |||
806 | return N->hasPredecessor(this); | |||
807 | } | |||
808 | ||||
809 | /// Return true if N is a predecessor of this node. | |||
810 | /// N is either an operand of this node, or can be reached by recursively | |||
811 | /// traversing up the operands. | |||
812 | /// NOTE: This is an expensive method. Use it carefully. | |||
813 | bool hasPredecessor(const SDNode *N) const; | |||
814 | ||||
815 | /// Returns true if N is a predecessor of any node in Worklist. This | |||
816 | /// helper keeps Visited and Worklist sets externally to allow unions | |||
817 | /// searches to be performed in parallel, caching of results across | |||
818 | /// queries and incremental addition to Worklist. Stops early if N is | |||
819 | /// found but will resume. Remember to clear Visited and Worklists | |||
820 | /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before | |||
821 | /// giving up. The TopologicalPrune flag signals that positive NodeIds are | |||
822 | /// topologically ordered (Operands have strictly smaller node id) and search | |||
823 | /// can be pruned leveraging this. | |||
824 | static bool hasPredecessorHelper(const SDNode *N, | |||
825 | SmallPtrSetImpl<const SDNode *> &Visited, | |||
826 | SmallVectorImpl<const SDNode *> &Worklist, | |||
827 | unsigned int MaxSteps = 0, | |||
828 | bool TopologicalPrune = false) { | |||
829 | SmallVector<const SDNode *, 8> DeferredNodes; | |||
830 | if (Visited.count(N)) | |||
831 | return true; | |||
832 | ||||
833 | // Node Id's are assigned in three places: As a topological | |||
834 | // ordering (> 0), during legalization (results in values set to | |||
835 | // 0), new nodes (set to -1). If N has a topolgical id then we | |||
836 | // know that all nodes with ids smaller than it cannot be | |||
837 | // successors and we need not check them. Filter out all node | |||
838 | // that can't be matches. We add them to the worklist before exit | |||
839 | // in case of multiple calls. Note that during selection the topological id | |||
840 | // may be violated if a node's predecessor is selected before it. We mark | |||
841 | // this at selection negating the id of unselected successors and | |||
842 | // restricting topological pruning to positive ids. | |||
843 | ||||
844 | int NId = N->getNodeId(); | |||
845 | // If we Invalidated the Id, reconstruct original NId. | |||
846 | if (NId < -1) | |||
847 | NId = -(NId + 1); | |||
848 | ||||
849 | bool Found = false; | |||
850 | while (!Worklist.empty()) { | |||
851 | const SDNode *M = Worklist.pop_back_val(); | |||
852 | int MId = M->getNodeId(); | |||
853 | if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) && | |||
854 | (MId > 0) && (MId < NId)) { | |||
855 | DeferredNodes.push_back(M); | |||
856 | continue; | |||
857 | } | |||
858 | for (const SDValue &OpV : M->op_values()) { | |||
859 | SDNode *Op = OpV.getNode(); | |||
860 | if (Visited.insert(Op).second) | |||
861 | Worklist.push_back(Op); | |||
862 | if (Op == N) | |||
863 | Found = true; | |||
864 | } | |||
865 | if (Found) | |||
866 | break; | |||
867 | if (MaxSteps != 0 && Visited.size() >= MaxSteps) | |||
868 | break; | |||
869 | } | |||
870 | // Push deferred nodes back on worklist. | |||
871 | Worklist.append(DeferredNodes.begin(), DeferredNodes.end()); | |||
872 | // If we bailed early, conservatively return found. | |||
873 | if (MaxSteps != 0 && Visited.size() >= MaxSteps) | |||
874 | return true; | |||
875 | return Found; | |||
876 | } | |||
877 | ||||
878 | /// Return true if all the users of N are contained in Nodes. | |||
879 | /// NOTE: Requires at least one match, but doesn't require them all. | |||
880 | static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N); | |||
881 | ||||
882 | /// Return the number of values used by this operation. | |||
883 | unsigned getNumOperands() const { return NumOperands; } | |||
884 | ||||
885 | /// Return the maximum number of operands that a SDNode can hold. | |||
886 | static constexpr size_t getMaxNumOperands() { | |||
887 | return std::numeric_limits<decltype(SDNode::NumOperands)>::max(); | |||
888 | } | |||
889 | ||||
890 | /// Helper method returns the integer value of a ConstantSDNode operand. | |||
891 | inline uint64_t getConstantOperandVal(unsigned Num) const; | |||
892 | ||||
893 | /// Helper method returns the APInt of a ConstantSDNode operand. | |||
894 | inline const APInt &getConstantOperandAPInt(unsigned Num) const; | |||
895 | ||||
896 | const SDValue &getOperand(unsigned Num) const { | |||
897 | assert(Num < NumOperands && "Invalid child # of SDNode!")(static_cast <bool> (Num < NumOperands && "Invalid child # of SDNode!" ) ? void (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 897, __extension__ __PRETTY_FUNCTION__)); | |||
898 | return OperandList[Num]; | |||
899 | } | |||
900 | ||||
901 | using op_iterator = SDUse *; | |||
902 | ||||
903 | op_iterator op_begin() const { return OperandList; } | |||
904 | op_iterator op_end() const { return OperandList+NumOperands; } | |||
905 | ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); } | |||
906 | ||||
907 | /// Iterator for directly iterating over the operand SDValue's. | |||
908 | struct value_op_iterator | |||
909 | : iterator_adaptor_base<value_op_iterator, op_iterator, | |||
910 | std::random_access_iterator_tag, SDValue, | |||
911 | ptrdiff_t, value_op_iterator *, | |||
912 | value_op_iterator *> { | |||
913 | explicit value_op_iterator(SDUse *U = nullptr) | |||
914 | : iterator_adaptor_base(U) {} | |||
915 | ||||
916 | const SDValue &operator*() const { return I->get(); } | |||
917 | }; | |||
918 | ||||
919 | iterator_range<value_op_iterator> op_values() const { | |||
920 | return make_range(value_op_iterator(op_begin()), | |||
921 | value_op_iterator(op_end())); | |||
922 | } | |||
923 | ||||
924 | SDVTList getVTList() const { | |||
925 | SDVTList X = { ValueList, NumValues }; | |||
926 | return X; | |||
927 | } | |||
928 | ||||
929 | /// If this node has a glue operand, return the node | |||
930 | /// to which the glue operand points. Otherwise return NULL. | |||
931 | SDNode *getGluedNode() const { | |||
932 | if (getNumOperands() != 0 && | |||
933 | getOperand(getNumOperands()-1).getValueType() == MVT::Glue) | |||
934 | return getOperand(getNumOperands()-1).getNode(); | |||
935 | return nullptr; | |||
936 | } | |||
937 | ||||
938 | /// If this node has a glue value with a user, return | |||
939 | /// the user (there is at most one). Otherwise return NULL. | |||
940 | SDNode *getGluedUser() const { | |||
941 | for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI) | |||
942 | if (UI.getUse().get().getValueType() == MVT::Glue) | |||
943 | return *UI; | |||
944 | return nullptr; | |||
945 | } | |||
946 | ||||
947 | SDNodeFlags getFlags() const { return Flags; } | |||
948 | void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; } | |||
949 | ||||
950 | /// Clear any flags in this node that aren't also set in Flags. | |||
951 | /// If Flags is not in a defined state then this has no effect. | |||
952 | void intersectFlagsWith(const SDNodeFlags Flags); | |||
953 | ||||
954 | /// Return the number of values defined/returned by this operator. | |||
955 | unsigned getNumValues() const { return NumValues; } | |||
956 | ||||
957 | /// Return the type of a specified result. | |||
958 | EVT getValueType(unsigned ResNo) const { | |||
959 | assert(ResNo < NumValues && "Illegal result number!")(static_cast <bool> (ResNo < NumValues && "Illegal result number!" ) ? void (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 959, __extension__ __PRETTY_FUNCTION__)); | |||
960 | return ValueList[ResNo]; | |||
961 | } | |||
962 | ||||
963 | /// Return the type of a specified result as a simple type. | |||
964 | MVT getSimpleValueType(unsigned ResNo) const { | |||
965 | return getValueType(ResNo).getSimpleVT(); | |||
966 | } | |||
967 | ||||
968 | /// Returns MVT::getSizeInBits(getValueType(ResNo)). | |||
969 | /// | |||
970 | /// If the value type is a scalable vector type, the scalable property will | |||
971 | /// be set and the runtime size will be a positive integer multiple of the | |||
972 | /// base size. | |||
973 | TypeSize getValueSizeInBits(unsigned ResNo) const { | |||
974 | return getValueType(ResNo).getSizeInBits(); | |||
975 | } | |||
976 | ||||
977 | using value_iterator = const EVT *; | |||
978 | ||||
979 | value_iterator value_begin() const { return ValueList; } | |||
980 | value_iterator value_end() const { return ValueList+NumValues; } | |||
981 | iterator_range<value_iterator> values() const { | |||
982 | return llvm::make_range(value_begin(), value_end()); | |||
983 | } | |||
984 | ||||
985 | /// Return the opcode of this operation for printing. | |||
986 | std::string getOperationName(const SelectionDAG *G = nullptr) const; | |||
987 | static const char* getIndexedModeName(ISD::MemIndexedMode AM); | |||
988 | void print_types(raw_ostream &OS, const SelectionDAG *G) const; | |||
989 | void print_details(raw_ostream &OS, const SelectionDAG *G) const; | |||
990 | void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const; | |||
991 | void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const; | |||
992 | ||||
993 | /// Print a SelectionDAG node and all children down to | |||
994 | /// the leaves. The given SelectionDAG allows target-specific nodes | |||
995 | /// to be printed in human-readable form. Unlike printr, this will | |||
996 | /// print the whole DAG, including children that appear multiple | |||
997 | /// times. | |||
998 | /// | |||
999 | void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const; | |||
1000 | ||||
1001 | /// Print a SelectionDAG node and children up to | |||
1002 | /// depth "depth." The given SelectionDAG allows target-specific | |||
1003 | /// nodes to be printed in human-readable form. Unlike printr, this | |||
1004 | /// will print children that appear multiple times wherever they are | |||
1005 | /// used. | |||
1006 | /// | |||
1007 | void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr, | |||
1008 | unsigned depth = 100) const; | |||
1009 | ||||
1010 | /// Dump this node, for debugging. | |||
1011 | void dump() const; | |||
1012 | ||||
1013 | /// Dump (recursively) this node and its use-def subgraph. | |||
1014 | void dumpr() const; | |||
1015 | ||||
1016 | /// Dump this node, for debugging. | |||
1017 | /// The given SelectionDAG allows target-specific nodes to be printed | |||
1018 | /// in human-readable form. | |||
1019 | void dump(const SelectionDAG *G) const; | |||
1020 | ||||
1021 | /// Dump (recursively) this node and its use-def subgraph. | |||
1022 | /// The given SelectionDAG allows target-specific nodes to be printed | |||
1023 | /// in human-readable form. | |||
1024 | void dumpr(const SelectionDAG *G) const; | |||
1025 | ||||
1026 | /// printrFull to dbgs(). The given SelectionDAG allows | |||
1027 | /// target-specific nodes to be printed in human-readable form. | |||
1028 | /// Unlike dumpr, this will print the whole DAG, including children | |||
1029 | /// that appear multiple times. | |||
1030 | void dumprFull(const SelectionDAG *G = nullptr) const; | |||
1031 | ||||
1032 | /// printrWithDepth to dbgs(). The given | |||
1033 | /// SelectionDAG allows target-specific nodes to be printed in | |||
1034 | /// human-readable form. Unlike dumpr, this will print children | |||
1035 | /// that appear multiple times wherever they are used. | |||
1036 | /// | |||
1037 | void dumprWithDepth(const SelectionDAG *G = nullptr, | |||
1038 | unsigned depth = 100) const; | |||
1039 | ||||
1040 | /// Gather unique data for the node. | |||
1041 | void Profile(FoldingSetNodeID &ID) const; | |||
1042 | ||||
1043 | /// This method should only be used by the SDUse class. | |||
1044 | void addUse(SDUse &U) { U.addToList(&UseList); } | |||
1045 | ||||
1046 | protected: | |||
1047 | static SDVTList getSDVTList(EVT VT) { | |||
1048 | SDVTList Ret = { getValueTypeList(VT), 1 }; | |||
1049 | return Ret; | |||
1050 | } | |||
1051 | ||||
1052 | /// Create an SDNode. | |||
1053 | /// | |||
1054 | /// SDNodes are created without any operands, and never own the operand | |||
1055 | /// storage. To add operands, see SelectionDAG::createOperands. | |||
1056 | SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs) | |||
1057 | : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs), | |||
1058 | IROrder(Order), debugLoc(std::move(dl)) { | |||
1059 | memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits)); | |||
1060 | assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")(static_cast <bool> (debugLoc.hasTrivialDestructor() && "Expected trivial destructor") ? void (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1060, __extension__ __PRETTY_FUNCTION__)); | |||
1061 | assert(NumValues == VTs.NumVTs &&(static_cast <bool> (NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!") ? void (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1062, __extension__ __PRETTY_FUNCTION__)) | |||
1062 | "NumValues wasn't wide enough for its operands!")(static_cast <bool> (NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!") ? void (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1062, __extension__ __PRETTY_FUNCTION__)); | |||
1063 | } | |||
1064 | ||||
1065 | /// Release the operands and set this node to have zero operands. | |||
1066 | void DropOperands(); | |||
1067 | }; | |||
1068 | ||||
1069 | /// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed | |||
1070 | /// into SDNode creation functions. | |||
1071 | /// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted | |||
1072 | /// from the original Instruction, and IROrder is the ordinal position of | |||
1073 | /// the instruction. | |||
1074 | /// When an SDNode is created after the DAG is being built, both DebugLoc and | |||
1075 | /// the IROrder are propagated from the original SDNode. | |||
1076 | /// So SDLoc class provides two constructors besides the default one, one to | |||
1077 | /// be used by the DAGBuilder, the other to be used by others. | |||
1078 | class SDLoc { | |||
1079 | private: | |||
1080 | DebugLoc DL; | |||
1081 | int IROrder = 0; | |||
1082 | ||||
1083 | public: | |||
1084 | SDLoc() = default; | |||
1085 | SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {} | |||
1086 | SDLoc(const SDValue V) : SDLoc(V.getNode()) {} | |||
1087 | SDLoc(const Instruction *I, int Order) : IROrder(Order) { | |||
1088 | assert(Order >= 0 && "bad IROrder")(static_cast <bool> (Order >= 0 && "bad IROrder" ) ? void (0) : __assert_fail ("Order >= 0 && \"bad IROrder\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1088, __extension__ __PRETTY_FUNCTION__)); | |||
1089 | if (I) | |||
1090 | DL = I->getDebugLoc(); | |||
1091 | } | |||
1092 | ||||
1093 | unsigned getIROrder() const { return IROrder; } | |||
1094 | const DebugLoc &getDebugLoc() const { return DL; } | |||
1095 | }; | |||
1096 | ||||
1097 | // Define inline functions from the SDValue class. | |||
1098 | ||||
1099 | inline SDValue::SDValue(SDNode *node, unsigned resno) | |||
1100 | : Node(node), ResNo(resno) { | |||
1101 | // Explicitly check for !ResNo to avoid use-after-free, because there are | |||
1102 | // callers that use SDValue(N, 0) with a deleted N to indicate successful | |||
1103 | // combines. | |||
1104 | assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(static_cast <bool> ((!Node || !ResNo || ResNo < Node ->getNumValues()) && "Invalid result number for the given node!" ) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1105, __extension__ __PRETTY_FUNCTION__)) | |||
1105 | "Invalid result number for the given node!")(static_cast <bool> ((!Node || !ResNo || ResNo < Node ->getNumValues()) && "Invalid result number for the given node!" ) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1105, __extension__ __PRETTY_FUNCTION__)); | |||
1106 | assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")(static_cast <bool> (ResNo < -2U && "Cannot use result numbers reserved for DenseMaps." ) ? void (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1106, __extension__ __PRETTY_FUNCTION__)); | |||
1107 | } | |||
1108 | ||||
1109 | inline unsigned SDValue::getOpcode() const { | |||
1110 | return Node->getOpcode(); | |||
1111 | } | |||
1112 | ||||
1113 | inline EVT SDValue::getValueType() const { | |||
1114 | return Node->getValueType(ResNo); | |||
1115 | } | |||
1116 | ||||
1117 | inline unsigned SDValue::getNumOperands() const { | |||
1118 | return Node->getNumOperands(); | |||
1119 | } | |||
1120 | ||||
1121 | inline const SDValue &SDValue::getOperand(unsigned i) const { | |||
1122 | return Node->getOperand(i); | |||
1123 | } | |||
1124 | ||||
1125 | inline uint64_t SDValue::getConstantOperandVal(unsigned i) const { | |||
1126 | return Node->getConstantOperandVal(i); | |||
1127 | } | |||
1128 | ||||
1129 | inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const { | |||
1130 | return Node->getConstantOperandAPInt(i); | |||
1131 | } | |||
1132 | ||||
1133 | inline bool SDValue::isTargetOpcode() const { | |||
1134 | return Node->isTargetOpcode(); | |||
1135 | } | |||
1136 | ||||
1137 | inline bool SDValue::isTargetMemoryOpcode() const { | |||
1138 | return Node->isTargetMemoryOpcode(); | |||
1139 | } | |||
1140 | ||||
1141 | inline bool SDValue::isMachineOpcode() const { | |||
1142 | return Node->isMachineOpcode(); | |||
1143 | } | |||
1144 | ||||
1145 | inline unsigned SDValue::getMachineOpcode() const { | |||
1146 | return Node->getMachineOpcode(); | |||
1147 | } | |||
1148 | ||||
1149 | inline bool SDValue::isUndef() const { | |||
1150 | return Node->isUndef(); | |||
| ||||
1151 | } | |||
1152 | ||||
1153 | inline bool SDValue::use_empty() const { | |||
1154 | return !Node->hasAnyUseOfValue(ResNo); | |||
1155 | } | |||
1156 | ||||
1157 | inline bool SDValue::hasOneUse() const { | |||
1158 | return Node->hasNUsesOfValue(1, ResNo); | |||
1159 | } | |||
1160 | ||||
1161 | inline const DebugLoc &SDValue::getDebugLoc() const { | |||
1162 | return Node->getDebugLoc(); | |||
1163 | } | |||
1164 | ||||
1165 | inline void SDValue::dump() const { | |||
1166 | return Node->dump(); | |||
1167 | } | |||
1168 | ||||
1169 | inline void SDValue::dump(const SelectionDAG *G) const { | |||
1170 | return Node->dump(G); | |||
1171 | } | |||
1172 | ||||
1173 | inline void SDValue::dumpr() const { | |||
1174 | return Node->dumpr(); | |||
1175 | } | |||
1176 | ||||
1177 | inline void SDValue::dumpr(const SelectionDAG *G) const { | |||
1178 | return Node->dumpr(G); | |||
1179 | } | |||
1180 | ||||
1181 | // Define inline functions from the SDUse class. | |||
1182 | ||||
1183 | inline void SDUse::set(const SDValue &V) { | |||
1184 | if (Val.getNode()) removeFromList(); | |||
1185 | Val = V; | |||
1186 | if (V.getNode()) V.getNode()->addUse(*this); | |||
1187 | } | |||
1188 | ||||
1189 | inline void SDUse::setInitial(const SDValue &V) { | |||
1190 | Val = V; | |||
1191 | V.getNode()->addUse(*this); | |||
1192 | } | |||
1193 | ||||
1194 | inline void SDUse::setNode(SDNode *N) { | |||
1195 | if (Val.getNode()) removeFromList(); | |||
1196 | Val.setNode(N); | |||
1197 | if (N) N->addUse(*this); | |||
1198 | } | |||
1199 | ||||
1200 | /// This class is used to form a handle around another node that | |||
1201 | /// is persistent and is updated across invocations of replaceAllUsesWith on its | |||
1202 | /// operand. This node should be directly created by end-users and not added to | |||
1203 | /// the AllNodes list. | |||
1204 | class HandleSDNode : public SDNode { | |||
1205 | SDUse Op; | |||
1206 | ||||
1207 | public: | |||
1208 | explicit HandleSDNode(SDValue X) | |||
1209 | : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) { | |||
1210 | // HandleSDNodes are never inserted into the DAG, so they won't be | |||
1211 | // auto-numbered. Use ID 65535 as a sentinel. | |||
1212 | PersistentId = 0xffff; | |||
1213 | ||||
1214 | // Manually set up the operand list. This node type is special in that it's | |||
1215 | // always stack allocated and SelectionDAG does not manage its operands. | |||
1216 | // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not | |||
1217 | // be so special. | |||
1218 | Op.setUser(this); | |||
1219 | Op.setInitial(X); | |||
1220 | NumOperands = 1; | |||
1221 | OperandList = &Op; | |||
1222 | } | |||
1223 | ~HandleSDNode(); | |||
1224 | ||||
1225 | const SDValue &getValue() const { return Op; } | |||
1226 | }; | |||
1227 | ||||
1228 | class AddrSpaceCastSDNode : public SDNode { | |||
1229 | private: | |||
1230 | unsigned SrcAddrSpace; | |||
1231 | unsigned DestAddrSpace; | |||
1232 | ||||
1233 | public: | |||
1234 | AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT, | |||
1235 | unsigned SrcAS, unsigned DestAS); | |||
1236 | ||||
1237 | unsigned getSrcAddressSpace() const { return SrcAddrSpace; } | |||
1238 | unsigned getDestAddressSpace() const { return DestAddrSpace; } | |||
1239 | ||||
1240 | static bool classof(const SDNode *N) { | |||
1241 | return N->getOpcode() == ISD::ADDRSPACECAST; | |||
1242 | } | |||
1243 | }; | |||
1244 | ||||
1245 | /// This is an abstract virtual class for memory operations. | |||
1246 | class MemSDNode : public SDNode { | |||
1247 | private: | |||
1248 | // VT of in-memory value. | |||
1249 | EVT MemoryVT; | |||
1250 | ||||
1251 | protected: | |||
1252 | /// Memory reference information. | |||
1253 | MachineMemOperand *MMO; | |||
1254 | ||||
1255 | public: | |||
1256 | MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
1257 | EVT memvt, MachineMemOperand *MMO); | |||
1258 | ||||
1259 | bool readMem() const { return MMO->isLoad(); } | |||
1260 | bool writeMem() const { return MMO->isStore(); } | |||
1261 | ||||
1262 | /// Returns alignment and volatility of the memory access | |||
1263 | Align getOriginalAlign() const { return MMO->getBaseAlign(); } | |||
1264 | Align getAlign() const { return MMO->getAlign(); } | |||
1265 | // FIXME: Remove once transition to getAlign is over. | |||
1266 | unsigned getAlignment() const { return MMO->getAlign().value(); } | |||
1267 | ||||
1268 | /// Return the SubclassData value, without HasDebugValue. This contains an | |||
1269 | /// encoding of the volatile flag, as well as bits used by subclasses. This | |||
1270 | /// function should only be used to compute a FoldingSetNodeID value. | |||
1271 | /// The HasDebugValue bit is masked out because CSE map needs to match | |||
1272 | /// nodes with debug info with nodes without debug info. Same is about | |||
1273 | /// isDivergent bit. | |||
1274 | unsigned getRawSubclassData() const { | |||
1275 | uint16_t Data; | |||
1276 | union { | |||
1277 | char RawSDNodeBits[sizeof(uint16_t)]; | |||
1278 | SDNodeBitfields SDNodeBits; | |||
1279 | }; | |||
1280 | memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits)); | |||
1281 | SDNodeBits.HasDebugValue = 0; | |||
1282 | SDNodeBits.IsDivergent = false; | |||
1283 | memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits)); | |||
1284 | return Data; | |||
1285 | } | |||
1286 | ||||
1287 | bool isVolatile() const { return MemSDNodeBits.IsVolatile; } | |||
1288 | bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; } | |||
1289 | bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; } | |||
1290 | bool isInvariant() const { return MemSDNodeBits.IsInvariant; } | |||
1291 | ||||
1292 | // Returns the offset from the location of the access. | |||
1293 | int64_t getSrcValueOffset() const { return MMO->getOffset(); } | |||
1294 | ||||
1295 | /// Returns the AA info that describes the dereference. | |||
1296 | AAMDNodes getAAInfo() const { return MMO->getAAInfo(); } | |||
1297 | ||||
1298 | /// Returns the Ranges that describes the dereference. | |||
1299 | const MDNode *getRanges() const { return MMO->getRanges(); } | |||
1300 | ||||
1301 | /// Returns the synchronization scope ID for this memory operation. | |||
1302 | SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); } | |||
1303 | ||||
1304 | /// Return the atomic ordering requirements for this memory operation. For | |||
1305 | /// cmpxchg atomic operations, return the atomic ordering requirements when | |||
1306 | /// store occurs. | |||
1307 | AtomicOrdering getSuccessOrdering() const { | |||
1308 | return MMO->getSuccessOrdering(); | |||
1309 | } | |||
1310 | ||||
1311 | /// Return a single atomic ordering that is at least as strong as both the | |||
1312 | /// success and failure orderings for an atomic operation. (For operations | |||
1313 | /// other than cmpxchg, this is equivalent to getSuccessOrdering().) | |||
1314 | AtomicOrdering getMergedOrdering() const { return MMO->getMergedOrdering(); } | |||
1315 | ||||
1316 | /// Return true if the memory operation ordering is Unordered or higher. | |||
1317 | bool isAtomic() const { return MMO->isAtomic(); } | |||
1318 | ||||
1319 | /// Returns true if the memory operation doesn't imply any ordering | |||
1320 | /// constraints on surrounding memory operations beyond the normal memory | |||
1321 | /// aliasing rules. | |||
1322 | bool isUnordered() const { return MMO->isUnordered(); } | |||
1323 | ||||
1324 | /// Returns true if the memory operation is neither atomic or volatile. | |||
1325 | bool isSimple() const { return !isAtomic() && !isVolatile(); } | |||
1326 | ||||
1327 | /// Return the type of the in-memory value. | |||
1328 | EVT getMemoryVT() const { return MemoryVT; } | |||
1329 | ||||
1330 | /// Return a MachineMemOperand object describing the memory | |||
1331 | /// reference performed by operation. | |||
1332 | MachineMemOperand *getMemOperand() const { return MMO; } | |||
1333 | ||||
1334 | const MachinePointerInfo &getPointerInfo() const { | |||
1335 | return MMO->getPointerInfo(); | |||
1336 | } | |||
1337 | ||||
1338 | /// Return the address space for the associated pointer | |||
1339 | unsigned getAddressSpace() const { | |||
1340 | return getPointerInfo().getAddrSpace(); | |||
1341 | } | |||
1342 | ||||
1343 | /// Update this MemSDNode's MachineMemOperand information | |||
1344 | /// to reflect the alignment of NewMMO, if it has a greater alignment. | |||
1345 | /// This must only be used when the new alignment applies to all users of | |||
1346 | /// this MachineMemOperand. | |||
1347 | void refineAlignment(const MachineMemOperand *NewMMO) { | |||
1348 | MMO->refineAlignment(NewMMO); | |||
1349 | } | |||
1350 | ||||
1351 | const SDValue &getChain() const { return getOperand(0); } | |||
1352 | ||||
1353 | const SDValue &getBasePtr() const { | |||
1354 | switch (getOpcode()) { | |||
1355 | case ISD::STORE: | |||
1356 | case ISD::MSTORE: | |||
1357 | return getOperand(2); | |||
1358 | case ISD::MGATHER: | |||
1359 | case ISD::MSCATTER: | |||
1360 | return getOperand(3); | |||
1361 | default: | |||
1362 | return getOperand(1); | |||
1363 | } | |||
1364 | } | |||
1365 | ||||
1366 | // Methods to support isa and dyn_cast | |||
1367 | static bool classof(const SDNode *N) { | |||
1368 | // For some targets, we lower some target intrinsics to a MemIntrinsicNode | |||
1369 | // with either an intrinsic or a target opcode. | |||
1370 | switch (N->getOpcode()) { | |||
1371 | case ISD::LOAD: | |||
1372 | case ISD::STORE: | |||
1373 | case ISD::PREFETCH: | |||
1374 | case ISD::ATOMIC_CMP_SWAP: | |||
1375 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: | |||
1376 | case ISD::ATOMIC_SWAP: | |||
1377 | case ISD::ATOMIC_LOAD_ADD: | |||
1378 | case ISD::ATOMIC_LOAD_SUB: | |||
1379 | case ISD::ATOMIC_LOAD_AND: | |||
1380 | case ISD::ATOMIC_LOAD_CLR: | |||
1381 | case ISD::ATOMIC_LOAD_OR: | |||
1382 | case ISD::ATOMIC_LOAD_XOR: | |||
1383 | case ISD::ATOMIC_LOAD_NAND: | |||
1384 | case ISD::ATOMIC_LOAD_MIN: | |||
1385 | case ISD::ATOMIC_LOAD_MAX: | |||
1386 | case ISD::ATOMIC_LOAD_UMIN: | |||
1387 | case ISD::ATOMIC_LOAD_UMAX: | |||
1388 | case ISD::ATOMIC_LOAD_FADD: | |||
1389 | case ISD::ATOMIC_LOAD_FSUB: | |||
1390 | case ISD::ATOMIC_LOAD: | |||
1391 | case ISD::ATOMIC_STORE: | |||
1392 | case ISD::MLOAD: | |||
1393 | case ISD::MSTORE: | |||
1394 | case ISD::MGATHER: | |||
1395 | case ISD::MSCATTER: | |||
1396 | return true; | |||
1397 | default: | |||
1398 | return N->isMemIntrinsic() || N->isTargetMemoryOpcode(); | |||
1399 | } | |||
1400 | } | |||
1401 | }; | |||
1402 | ||||
1403 | /// This is an SDNode representing atomic operations. | |||
1404 | class AtomicSDNode : public MemSDNode { | |||
1405 | public: | |||
1406 | AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL, | |||
1407 | EVT MemVT, MachineMemOperand *MMO) | |||
1408 | : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) { | |||
1409 | assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||(static_cast <bool> (((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?" ) ? void (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1410, __extension__ __PRETTY_FUNCTION__)) | |||
1410 | MMO->isAtomic()) && "then why are we using an AtomicSDNode?")(static_cast <bool> (((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?" ) ? void (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1410, __extension__ __PRETTY_FUNCTION__)); | |||
1411 | } | |||
1412 | ||||
1413 | const SDValue &getBasePtr() const { return getOperand(1); } | |||
1414 | const SDValue &getVal() const { return getOperand(2); } | |||
1415 | ||||
1416 | /// Returns true if this SDNode represents cmpxchg atomic operation, false | |||
1417 | /// otherwise. | |||
1418 | bool isCompareAndSwap() const { | |||
1419 | unsigned Op = getOpcode(); | |||
1420 | return Op == ISD::ATOMIC_CMP_SWAP || | |||
1421 | Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS; | |||
1422 | } | |||
1423 | ||||
1424 | /// For cmpxchg atomic operations, return the atomic ordering requirements | |||
1425 | /// when store does not occur. | |||
1426 | AtomicOrdering getFailureOrdering() const { | |||
1427 | assert(isCompareAndSwap() && "Must be cmpxchg operation")(static_cast <bool> (isCompareAndSwap() && "Must be cmpxchg operation" ) ? void (0) : __assert_fail ("isCompareAndSwap() && \"Must be cmpxchg operation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1427, __extension__ __PRETTY_FUNCTION__)); | |||
1428 | return MMO->getFailureOrdering(); | |||
1429 | } | |||
1430 | ||||
1431 | // Methods to support isa and dyn_cast | |||
1432 | static bool classof(const SDNode *N) { | |||
1433 | return N->getOpcode() == ISD::ATOMIC_CMP_SWAP || | |||
1434 | N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS || | |||
1435 | N->getOpcode() == ISD::ATOMIC_SWAP || | |||
1436 | N->getOpcode() == ISD::ATOMIC_LOAD_ADD || | |||
1437 | N->getOpcode() == ISD::ATOMIC_LOAD_SUB || | |||
1438 | N->getOpcode() == ISD::ATOMIC_LOAD_AND || | |||
1439 | N->getOpcode() == ISD::ATOMIC_LOAD_CLR || | |||
1440 | N->getOpcode() == ISD::ATOMIC_LOAD_OR || | |||
1441 | N->getOpcode() == ISD::ATOMIC_LOAD_XOR || | |||
1442 | N->getOpcode() == ISD::ATOMIC_LOAD_NAND || | |||
1443 | N->getOpcode() == ISD::ATOMIC_LOAD_MIN || | |||
1444 | N->getOpcode() == ISD::ATOMIC_LOAD_MAX || | |||
1445 | N->getOpcode() == ISD::ATOMIC_LOAD_UMIN || | |||
1446 | N->getOpcode() == ISD::ATOMIC_LOAD_UMAX || | |||
1447 | N->getOpcode() == ISD::ATOMIC_LOAD_FADD || | |||
1448 | N->getOpcode() == ISD::ATOMIC_LOAD_FSUB || | |||
1449 | N->getOpcode() == ISD::ATOMIC_LOAD || | |||
1450 | N->getOpcode() == ISD::ATOMIC_STORE; | |||
1451 | } | |||
1452 | }; | |||
1453 | ||||
1454 | /// This SDNode is used for target intrinsics that touch | |||
1455 | /// memory and need an associated MachineMemOperand. Its opcode may be | |||
1456 | /// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode | |||
1457 | /// with a value not less than FIRST_TARGET_MEMORY_OPCODE. | |||
1458 | class MemIntrinsicSDNode : public MemSDNode { | |||
1459 | public: | |||
1460 | MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, | |||
1461 | SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO) | |||
1462 | : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) { | |||
1463 | SDNodeBits.IsMemIntrinsic = true; | |||
1464 | } | |||
1465 | ||||
1466 | // Methods to support isa and dyn_cast | |||
1467 | static bool classof(const SDNode *N) { | |||
1468 | // We lower some target intrinsics to their target opcode | |||
1469 | // early a node with a target opcode can be of this class | |||
1470 | return N->isMemIntrinsic() || | |||
1471 | N->getOpcode() == ISD::PREFETCH || | |||
1472 | N->isTargetMemoryOpcode(); | |||
1473 | } | |||
1474 | }; | |||
1475 | ||||
1476 | /// This SDNode is used to implement the code generator | |||
1477 | /// support for the llvm IR shufflevector instruction. It combines elements | |||
1478 | /// from two input vectors into a new input vector, with the selection and | |||
1479 | /// ordering of elements determined by an array of integers, referred to as | |||
1480 | /// the shuffle mask. For input vectors of width N, mask indices of 0..N-1 | |||
1481 | /// refer to elements from the LHS input, and indices from N to 2N-1 the RHS. | |||
1482 | /// An index of -1 is treated as undef, such that the code generator may put | |||
1483 | /// any value in the corresponding element of the result. | |||
1484 | class ShuffleVectorSDNode : public SDNode { | |||
1485 | // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and | |||
1486 | // is freed when the SelectionDAG object is destroyed. | |||
1487 | const int *Mask; | |||
1488 | ||||
1489 | protected: | |||
1490 | friend class SelectionDAG; | |||
1491 | ||||
1492 | ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M) | |||
1493 | : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {} | |||
1494 | ||||
1495 | public: | |||
1496 | ArrayRef<int> getMask() const { | |||
1497 | EVT VT = getValueType(0); | |||
1498 | return makeArrayRef(Mask, VT.getVectorNumElements()); | |||
1499 | } | |||
1500 | ||||
1501 | int getMaskElt(unsigned Idx) const { | |||
1502 | assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")(static_cast <bool> (Idx < getValueType(0).getVectorNumElements () && "Idx out of range!") ? void (0) : __assert_fail ("Idx < getValueType(0).getVectorNumElements() && \"Idx out of range!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1502, __extension__ __PRETTY_FUNCTION__)); | |||
1503 | return Mask[Idx]; | |||
1504 | } | |||
1505 | ||||
1506 | bool isSplat() const { return isSplatMask(Mask, getValueType(0)); } | |||
1507 | ||||
1508 | int getSplatIndex() const { | |||
1509 | assert(isSplat() && "Cannot get splat index for non-splat!")(static_cast <bool> (isSplat() && "Cannot get splat index for non-splat!" ) ? void (0) : __assert_fail ("isSplat() && \"Cannot get splat index for non-splat!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1509, __extension__ __PRETTY_FUNCTION__)); | |||
1510 | EVT VT = getValueType(0); | |||
1511 | for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) | |||
1512 | if (Mask[i] >= 0) | |||
1513 | return Mask[i]; | |||
1514 | ||||
1515 | // We can choose any index value here and be correct because all elements | |||
1516 | // are undefined. Return 0 for better potential for callers to simplify. | |||
1517 | return 0; | |||
1518 | } | |||
1519 | ||||
1520 | static bool isSplatMask(const int *Mask, EVT VT); | |||
1521 | ||||
1522 | /// Change values in a shuffle permute mask assuming | |||
1523 | /// the two vector operands have swapped position. | |||
1524 | static void commuteMask(MutableArrayRef<int> Mask) { | |||
1525 | unsigned NumElems = Mask.size(); | |||
1526 | for (unsigned i = 0; i != NumElems; ++i) { | |||
1527 | int idx = Mask[i]; | |||
1528 | if (idx < 0) | |||
1529 | continue; | |||
1530 | else if (idx < (int)NumElems) | |||
1531 | Mask[i] = idx + NumElems; | |||
1532 | else | |||
1533 | Mask[i] = idx - NumElems; | |||
1534 | } | |||
1535 | } | |||
1536 | ||||
1537 | static bool classof(const SDNode *N) { | |||
1538 | return N->getOpcode() == ISD::VECTOR_SHUFFLE; | |||
1539 | } | |||
1540 | }; | |||
1541 | ||||
1542 | class ConstantSDNode : public SDNode { | |||
1543 | friend class SelectionDAG; | |||
1544 | ||||
1545 | const ConstantInt *Value; | |||
1546 | ||||
1547 | ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT) | |||
1548 | : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(), | |||
1549 | getSDVTList(VT)), | |||
1550 | Value(val) { | |||
1551 | ConstantSDNodeBits.IsOpaque = isOpaque; | |||
1552 | } | |||
1553 | ||||
1554 | public: | |||
1555 | const ConstantInt *getConstantIntValue() const { return Value; } | |||
1556 | const APInt &getAPIntValue() const { return Value->getValue(); } | |||
1557 | uint64_t getZExtValue() const { return Value->getZExtValue(); } | |||
1558 | int64_t getSExtValue() const { return Value->getSExtValue(); } | |||
1559 | uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) { | |||
1560 | return Value->getLimitedValue(Limit); | |||
1561 | } | |||
1562 | MaybeAlign getMaybeAlignValue() const { return Value->getMaybeAlignValue(); } | |||
1563 | Align getAlignValue() const { return Value->getAlignValue(); } | |||
1564 | ||||
1565 | bool isOne() const { return Value->isOne(); } | |||
1566 | bool isNullValue() const { return Value->isZero(); } | |||
1567 | bool isAllOnesValue() const { return Value->isMinusOne(); } | |||
1568 | bool isMaxSignedValue() const { return Value->isMaxValue(true); } | |||
1569 | bool isMinSignedValue() const { return Value->isMinValue(true); } | |||
1570 | ||||
1571 | bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; } | |||
1572 | ||||
1573 | static bool classof(const SDNode *N) { | |||
1574 | return N->getOpcode() == ISD::Constant || | |||
1575 | N->getOpcode() == ISD::TargetConstant; | |||
1576 | } | |||
1577 | }; | |||
1578 | ||||
1579 | uint64_t SDNode::getConstantOperandVal(unsigned Num) const { | |||
1580 | return cast<ConstantSDNode>(getOperand(Num))->getZExtValue(); | |||
1581 | } | |||
1582 | ||||
1583 | const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const { | |||
1584 | return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue(); | |||
1585 | } | |||
1586 | ||||
1587 | class ConstantFPSDNode : public SDNode { | |||
1588 | friend class SelectionDAG; | |||
1589 | ||||
1590 | const ConstantFP *Value; | |||
1591 | ||||
1592 | ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT) | |||
1593 | : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0, | |||
1594 | DebugLoc(), getSDVTList(VT)), | |||
1595 | Value(val) {} | |||
1596 | ||||
1597 | public: | |||
1598 | const APFloat& getValueAPF() const { return Value->getValueAPF(); } | |||
1599 | const ConstantFP *getConstantFPValue() const { return Value; } | |||
1600 | ||||
1601 | /// Return true if the value is positive or negative zero. | |||
1602 | bool isZero() const { return Value->isZero(); } | |||
1603 | ||||
1604 | /// Return true if the value is a NaN. | |||
1605 | bool isNaN() const { return Value->isNaN(); } | |||
1606 | ||||
1607 | /// Return true if the value is an infinity | |||
1608 | bool isInfinity() const { return Value->isInfinity(); } | |||
1609 | ||||
1610 | /// Return true if the value is negative. | |||
1611 | bool isNegative() const { return Value->isNegative(); } | |||
1612 | ||||
1613 | /// We don't rely on operator== working on double values, as | |||
1614 | /// it returns true for things that are clearly not equal, like -0.0 and 0.0. | |||
1615 | /// As such, this method can be used to do an exact bit-for-bit comparison of | |||
1616 | /// two floating point values. | |||
1617 | ||||
1618 | /// We leave the version with the double argument here because it's just so | |||
1619 | /// convenient to write "2.0" and the like. Without this function we'd | |||
1620 | /// have to duplicate its logic everywhere it's called. | |||
1621 | bool isExactlyValue(double V) const { | |||
1622 | return Value->getValueAPF().isExactlyValue(V); | |||
1623 | } | |||
1624 | bool isExactlyValue(const APFloat& V) const; | |||
1625 | ||||
1626 | static bool isValueValidForType(EVT VT, const APFloat& Val); | |||
1627 | ||||
1628 | static bool classof(const SDNode *N) { | |||
1629 | return N->getOpcode() == ISD::ConstantFP || | |||
1630 | N->getOpcode() == ISD::TargetConstantFP; | |||
1631 | } | |||
1632 | }; | |||
1633 | ||||
1634 | /// Returns true if \p V is a constant integer zero. | |||
1635 | bool isNullConstant(SDValue V); | |||
1636 | ||||
1637 | /// Returns true if \p V is an FP constant with a value of positive zero. | |||
1638 | bool isNullFPConstant(SDValue V); | |||
1639 | ||||
1640 | /// Returns true if \p V is an integer constant with all bits set. | |||
1641 | bool isAllOnesConstant(SDValue V); | |||
1642 | ||||
1643 | /// Returns true if \p V is a constant integer one. | |||
1644 | bool isOneConstant(SDValue V); | |||
1645 | ||||
1646 | /// Return the non-bitcasted source operand of \p V if it exists. | |||
1647 | /// If \p V is not a bitcasted value, it is returned as-is. | |||
1648 | SDValue peekThroughBitcasts(SDValue V); | |||
1649 | ||||
1650 | /// Return the non-bitcasted and one-use source operand of \p V if it exists. | |||
1651 | /// If \p V is not a bitcasted one-use value, it is returned as-is. | |||
1652 | SDValue peekThroughOneUseBitcasts(SDValue V); | |||
1653 | ||||
1654 | /// Return the non-extracted vector source operand of \p V if it exists. | |||
1655 | /// If \p V is not an extracted subvector, it is returned as-is. | |||
1656 | SDValue peekThroughExtractSubvectors(SDValue V); | |||
1657 | ||||
1658 | /// Returns true if \p V is a bitwise not operation. Assumes that an all ones | |||
1659 | /// constant is canonicalized to be operand 1. | |||
1660 | bool isBitwiseNot(SDValue V, bool AllowUndefs = false); | |||
1661 | ||||
1662 | /// Returns the SDNode if it is a constant splat BuildVector or constant int. | |||
1663 | ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false, | |||
1664 | bool AllowTruncation = false); | |||
1665 | ||||
1666 | /// Returns the SDNode if it is a demanded constant splat BuildVector or | |||
1667 | /// constant int. | |||
1668 | ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts, | |||
1669 | bool AllowUndefs = false, | |||
1670 | bool AllowTruncation = false); | |||
1671 | ||||
1672 | /// Returns the SDNode if it is a constant splat BuildVector or constant float. | |||
1673 | ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false); | |||
1674 | ||||
1675 | /// Returns the SDNode if it is a demanded constant splat BuildVector or | |||
1676 | /// constant float. | |||
1677 | ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts, | |||
1678 | bool AllowUndefs = false); | |||
1679 | ||||
1680 | /// Return true if the value is a constant 0 integer or a splatted vector of | |||
1681 | /// a constant 0 integer (with no undefs by default). | |||
1682 | /// Build vector implicit truncation is not an issue for null values. | |||
1683 | bool isNullOrNullSplat(SDValue V, bool AllowUndefs = false); | |||
1684 | ||||
1685 | /// Return true if the value is a constant 1 integer or a splatted vector of a | |||
1686 | /// constant 1 integer (with no undefs). | |||
1687 | /// Does not permit build vector implicit truncation. | |||
1688 | bool isOneOrOneSplat(SDValue V, bool AllowUndefs = false); | |||
1689 | ||||
1690 | /// Return true if the value is a constant -1 integer or a splatted vector of a | |||
1691 | /// constant -1 integer (with no undefs). | |||
1692 | /// Does not permit build vector implicit truncation. | |||
1693 | bool isAllOnesOrAllOnesSplat(SDValue V, bool AllowUndefs = false); | |||
1694 | ||||
1695 | /// Return true if \p V is either a integer or FP constant. | |||
1696 | inline bool isIntOrFPConstant(SDValue V) { | |||
1697 | return isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V); | |||
1698 | } | |||
1699 | ||||
1700 | class GlobalAddressSDNode : public SDNode { | |||
1701 | friend class SelectionDAG; | |||
1702 | ||||
1703 | const GlobalValue *TheGlobal; | |||
1704 | int64_t Offset; | |||
1705 | unsigned TargetFlags; | |||
1706 | ||||
1707 | GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, | |||
1708 | const GlobalValue *GA, EVT VT, int64_t o, | |||
1709 | unsigned TF); | |||
1710 | ||||
1711 | public: | |||
1712 | const GlobalValue *getGlobal() const { return TheGlobal; } | |||
1713 | int64_t getOffset() const { return Offset; } | |||
1714 | unsigned getTargetFlags() const { return TargetFlags; } | |||
1715 | // Return the address space this GlobalAddress belongs to. | |||
1716 | unsigned getAddressSpace() const; | |||
1717 | ||||
1718 | static bool classof(const SDNode *N) { | |||
1719 | return N->getOpcode() == ISD::GlobalAddress || | |||
1720 | N->getOpcode() == ISD::TargetGlobalAddress || | |||
1721 | N->getOpcode() == ISD::GlobalTLSAddress || | |||
1722 | N->getOpcode() == ISD::TargetGlobalTLSAddress; | |||
1723 | } | |||
1724 | }; | |||
1725 | ||||
1726 | class FrameIndexSDNode : public SDNode { | |||
1727 | friend class SelectionDAG; | |||
1728 | ||||
1729 | int FI; | |||
1730 | ||||
1731 | FrameIndexSDNode(int fi, EVT VT, bool isTarg) | |||
1732 | : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex, | |||
1733 | 0, DebugLoc(), getSDVTList(VT)), FI(fi) { | |||
1734 | } | |||
1735 | ||||
1736 | public: | |||
1737 | int getIndex() const { return FI; } | |||
1738 | ||||
1739 | static bool classof(const SDNode *N) { | |||
1740 | return N->getOpcode() == ISD::FrameIndex || | |||
1741 | N->getOpcode() == ISD::TargetFrameIndex; | |||
1742 | } | |||
1743 | }; | |||
1744 | ||||
1745 | /// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate | |||
1746 | /// the offet and size that are started/ended in the underlying FrameIndex. | |||
1747 | class LifetimeSDNode : public SDNode { | |||
1748 | friend class SelectionDAG; | |||
1749 | int64_t Size; | |||
1750 | int64_t Offset; // -1 if offset is unknown. | |||
1751 | ||||
1752 | LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, | |||
1753 | SDVTList VTs, int64_t Size, int64_t Offset) | |||
1754 | : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {} | |||
1755 | public: | |||
1756 | int64_t getFrameIndex() const { | |||
1757 | return cast<FrameIndexSDNode>(getOperand(1))->getIndex(); | |||
1758 | } | |||
1759 | ||||
1760 | bool hasOffset() const { return Offset >= 0; } | |||
1761 | int64_t getOffset() const { | |||
1762 | assert(hasOffset() && "offset is unknown")(static_cast <bool> (hasOffset() && "offset is unknown" ) ? void (0) : __assert_fail ("hasOffset() && \"offset is unknown\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1762, __extension__ __PRETTY_FUNCTION__)); | |||
1763 | return Offset; | |||
1764 | } | |||
1765 | int64_t getSize() const { | |||
1766 | assert(hasOffset() && "offset is unknown")(static_cast <bool> (hasOffset() && "offset is unknown" ) ? void (0) : __assert_fail ("hasOffset() && \"offset is unknown\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1766, __extension__ __PRETTY_FUNCTION__)); | |||
1767 | return Size; | |||
1768 | } | |||
1769 | ||||
1770 | // Methods to support isa and dyn_cast | |||
1771 | static bool classof(const SDNode *N) { | |||
1772 | return N->getOpcode() == ISD::LIFETIME_START || | |||
1773 | N->getOpcode() == ISD::LIFETIME_END; | |||
1774 | } | |||
1775 | }; | |||
1776 | ||||
1777 | /// This SDNode is used for PSEUDO_PROBE values, which are the function guid and | |||
1778 | /// the index of the basic block being probed. A pseudo probe serves as a place | |||
1779 | /// holder and will be removed at the end of compilation. It does not have any | |||
1780 | /// operand because we do not want the instruction selection to deal with any. | |||
1781 | class PseudoProbeSDNode : public SDNode { | |||
1782 | friend class SelectionDAG; | |||
1783 | uint64_t Guid; | |||
1784 | uint64_t Index; | |||
1785 | uint32_t Attributes; | |||
1786 | ||||
1787 | PseudoProbeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &Dl, | |||
1788 | SDVTList VTs, uint64_t Guid, uint64_t Index, uint32_t Attr) | |||
1789 | : SDNode(Opcode, Order, Dl, VTs), Guid(Guid), Index(Index), | |||
1790 | Attributes(Attr) {} | |||
1791 | ||||
1792 | public: | |||
1793 | uint64_t getGuid() const { return Guid; } | |||
1794 | uint64_t getIndex() const { return Index; } | |||
1795 | uint32_t getAttributes() const { return Attributes; } | |||
1796 | ||||
1797 | // Methods to support isa and dyn_cast | |||
1798 | static bool classof(const SDNode *N) { | |||
1799 | return N->getOpcode() == ISD::PSEUDO_PROBE; | |||
1800 | } | |||
1801 | }; | |||
1802 | ||||
1803 | class JumpTableSDNode : public SDNode { | |||
1804 | friend class SelectionDAG; | |||
1805 | ||||
1806 | int JTI; | |||
1807 | unsigned TargetFlags; | |||
1808 | ||||
1809 | JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned TF) | |||
1810 | : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable, | |||
1811 | 0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) { | |||
1812 | } | |||
1813 | ||||
1814 | public: | |||
1815 | int getIndex() const { return JTI; } | |||
1816 | unsigned getTargetFlags() const { return TargetFlags; } | |||
1817 | ||||
1818 | static bool classof(const SDNode *N) { | |||
1819 | return N->getOpcode() == ISD::JumpTable || | |||
1820 | N->getOpcode() == ISD::TargetJumpTable; | |||
1821 | } | |||
1822 | }; | |||
1823 | ||||
1824 | class ConstantPoolSDNode : public SDNode { | |||
1825 | friend class SelectionDAG; | |||
1826 | ||||
1827 | union { | |||
1828 | const Constant *ConstVal; | |||
1829 | MachineConstantPoolValue *MachineCPVal; | |||
1830 | } Val; | |||
1831 | int Offset; // It's a MachineConstantPoolValue if top bit is set. | |||
1832 | Align Alignment; // Minimum alignment requirement of CP. | |||
1833 | unsigned TargetFlags; | |||
1834 | ||||
1835 | ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o, | |||
1836 | Align Alignment, unsigned TF) | |||
1837 | : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0, | |||
1838 | DebugLoc(), getSDVTList(VT)), | |||
1839 | Offset(o), Alignment(Alignment), TargetFlags(TF) { | |||
1840 | assert(Offset >= 0 && "Offset is too large")(static_cast <bool> (Offset >= 0 && "Offset is too large" ) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1840, __extension__ __PRETTY_FUNCTION__)); | |||
1841 | Val.ConstVal = c; | |||
1842 | } | |||
1843 | ||||
1844 | ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v, EVT VT, int o, | |||
1845 | Align Alignment, unsigned TF) | |||
1846 | : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0, | |||
1847 | DebugLoc(), getSDVTList(VT)), | |||
1848 | Offset(o), Alignment(Alignment), TargetFlags(TF) { | |||
1849 | assert(Offset >= 0 && "Offset is too large")(static_cast <bool> (Offset >= 0 && "Offset is too large" ) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1849, __extension__ __PRETTY_FUNCTION__)); | |||
1850 | Val.MachineCPVal = v; | |||
1851 | Offset |= 1 << (sizeof(unsigned)*CHAR_BIT8-1); | |||
1852 | } | |||
1853 | ||||
1854 | public: | |||
1855 | bool isMachineConstantPoolEntry() const { | |||
1856 | return Offset < 0; | |||
1857 | } | |||
1858 | ||||
1859 | const Constant *getConstVal() const { | |||
1860 | assert(!isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast <bool> (!isMachineConstantPoolEntry() && "Wrong constantpool type") ? void (0) : __assert_fail ("!isMachineConstantPoolEntry() && \"Wrong constantpool type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1860, __extension__ __PRETTY_FUNCTION__)); | |||
1861 | return Val.ConstVal; | |||
1862 | } | |||
1863 | ||||
1864 | MachineConstantPoolValue *getMachineCPVal() const { | |||
1865 | assert(isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast <bool> (isMachineConstantPoolEntry() && "Wrong constantpool type") ? void (0) : __assert_fail ("isMachineConstantPoolEntry() && \"Wrong constantpool type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1865, __extension__ __PRETTY_FUNCTION__)); | |||
1866 | return Val.MachineCPVal; | |||
1867 | } | |||
1868 | ||||
1869 | int getOffset() const { | |||
1870 | return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT8-1)); | |||
1871 | } | |||
1872 | ||||
1873 | // Return the alignment of this constant pool object, which is either 0 (for | |||
1874 | // default alignment) or the desired value. | |||
1875 | Align getAlign() const { return Alignment; } | |||
1876 | unsigned getTargetFlags() const { return TargetFlags; } | |||
1877 | ||||
1878 | Type *getType() const; | |||
1879 | ||||
1880 | static bool classof(const SDNode *N) { | |||
1881 | return N->getOpcode() == ISD::ConstantPool || | |||
1882 | N->getOpcode() == ISD::TargetConstantPool; | |||
1883 | } | |||
1884 | }; | |||
1885 | ||||
1886 | /// Completely target-dependent object reference. | |||
1887 | class TargetIndexSDNode : public SDNode { | |||
1888 | friend class SelectionDAG; | |||
1889 | ||||
1890 | unsigned TargetFlags; | |||
1891 | int Index; | |||
1892 | int64_t Offset; | |||
1893 | ||||
1894 | public: | |||
1895 | TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned TF) | |||
1896 | : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)), | |||
1897 | TargetFlags(TF), Index(Idx), Offset(Ofs) {} | |||
1898 | ||||
1899 | unsigned getTargetFlags() const { return TargetFlags; } | |||
1900 | int getIndex() const { return Index; } | |||
1901 | int64_t getOffset() const { return Offset; } | |||
1902 | ||||
1903 | static bool classof(const SDNode *N) { | |||
1904 | return N->getOpcode() == ISD::TargetIndex; | |||
1905 | } | |||
1906 | }; | |||
1907 | ||||
1908 | class BasicBlockSDNode : public SDNode { | |||
1909 | friend class SelectionDAG; | |||
1910 | ||||
1911 | MachineBasicBlock *MBB; | |||
1912 | ||||
1913 | /// Debug info is meaningful and potentially useful here, but we create | |||
1914 | /// blocks out of order when they're jumped to, which makes it a bit | |||
1915 | /// harder. Let's see if we need it first. | |||
1916 | explicit BasicBlockSDNode(MachineBasicBlock *mbb) | |||
1917 | : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb) | |||
1918 | {} | |||
1919 | ||||
1920 | public: | |||
1921 | MachineBasicBlock *getBasicBlock() const { return MBB; } | |||
1922 | ||||
1923 | static bool classof(const SDNode *N) { | |||
1924 | return N->getOpcode() == ISD::BasicBlock; | |||
1925 | } | |||
1926 | }; | |||
1927 | ||||
1928 | /// A "pseudo-class" with methods for operating on BUILD_VECTORs. | |||
1929 | class BuildVectorSDNode : public SDNode { | |||
1930 | public: | |||
1931 | // These are constructed as SDNodes and then cast to BuildVectorSDNodes. | |||
1932 | explicit BuildVectorSDNode() = delete; | |||
1933 | ||||
1934 | /// Check if this is a constant splat, and if so, find the | |||
1935 | /// smallest element size that splats the vector. If MinSplatBits is | |||
1936 | /// nonzero, the element size must be at least that large. Note that the | |||
1937 | /// splat element may be the entire vector (i.e., a one element vector). | |||
1938 | /// Returns the splat element value in SplatValue. Any undefined bits in | |||
1939 | /// that value are zero, and the corresponding bits in the SplatUndef mask | |||
1940 | /// are set. The SplatBitSize value is set to the splat element size in | |||
1941 | /// bits. HasAnyUndefs is set to true if any bits in the vector are | |||
1942 | /// undefined. isBigEndian describes the endianness of the target. | |||
1943 | bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, | |||
1944 | unsigned &SplatBitSize, bool &HasAnyUndefs, | |||
1945 | unsigned MinSplatBits = 0, | |||
1946 | bool isBigEndian = false) const; | |||
1947 | ||||
1948 | /// Returns the demanded splatted value or a null value if this is not a | |||
1949 | /// splat. | |||
1950 | /// | |||
1951 | /// The DemandedElts mask indicates the elements that must be in the splat. | |||
1952 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
1953 | /// the vector width and set the bits where elements are undef. | |||
1954 | SDValue getSplatValue(const APInt &DemandedElts, | |||
1955 | BitVector *UndefElements = nullptr) const; | |||
1956 | ||||
1957 | /// Returns the splatted value or a null value if this is not a splat. | |||
1958 | /// | |||
1959 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
1960 | /// the vector width and set the bits where elements are undef. | |||
1961 | SDValue getSplatValue(BitVector *UndefElements = nullptr) const; | |||
1962 | ||||
1963 | /// Find the shortest repeating sequence of values in the build vector. | |||
1964 | /// | |||
1965 | /// e.g. { u, X, u, X, u, u, X, u } -> { X } | |||
1966 | /// { X, Y, u, Y, u, u, X, u } -> { X, Y } | |||
1967 | /// | |||
1968 | /// Currently this must be a power-of-2 build vector. | |||
1969 | /// The DemandedElts mask indicates the elements that must be present, | |||
1970 | /// undemanded elements in Sequence may be null (SDValue()). If passed a | |||
1971 | /// non-null UndefElements bitvector, it will resize it to match the original | |||
1972 | /// vector width and set the bits where elements are undef. If result is | |||
1973 | /// false, Sequence will be empty. | |||
1974 | bool getRepeatedSequence(const APInt &DemandedElts, | |||
1975 | SmallVectorImpl<SDValue> &Sequence, | |||
1976 | BitVector *UndefElements = nullptr) const; | |||
1977 | ||||
1978 | /// Find the shortest repeating sequence of values in the build vector. | |||
1979 | /// | |||
1980 | /// e.g. { u, X, u, X, u, u, X, u } -> { X } | |||
1981 | /// { X, Y, u, Y, u, u, X, u } -> { X, Y } | |||
1982 | /// | |||
1983 | /// Currently this must be a power-of-2 build vector. | |||
1984 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
1985 | /// the original vector width and set the bits where elements are undef. | |||
1986 | /// If result is false, Sequence will be empty. | |||
1987 | bool getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence, | |||
1988 | BitVector *UndefElements = nullptr) const; | |||
1989 | ||||
1990 | /// Returns the demanded splatted constant or null if this is not a constant | |||
1991 | /// splat. | |||
1992 | /// | |||
1993 | /// The DemandedElts mask indicates the elements that must be in the splat. | |||
1994 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
1995 | /// the vector width and set the bits where elements are undef. | |||
1996 | ConstantSDNode * | |||
1997 | getConstantSplatNode(const APInt &DemandedElts, | |||
1998 | BitVector *UndefElements = nullptr) const; | |||
1999 | ||||
2000 | /// Returns the splatted constant or null if this is not a constant | |||
2001 | /// splat. | |||
2002 | /// | |||
2003 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
2004 | /// the vector width and set the bits where elements are undef. | |||
2005 | ConstantSDNode * | |||
2006 | getConstantSplatNode(BitVector *UndefElements = nullptr) const; | |||
2007 | ||||
2008 | /// Returns the demanded splatted constant FP or null if this is not a | |||
2009 | /// constant FP splat. | |||
2010 | /// | |||
2011 | /// The DemandedElts mask indicates the elements that must be in the splat. | |||
2012 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
2013 | /// the vector width and set the bits where elements are undef. | |||
2014 | ConstantFPSDNode * | |||
2015 | getConstantFPSplatNode(const APInt &DemandedElts, | |||
2016 | BitVector *UndefElements = nullptr) const; | |||
2017 | ||||
2018 | /// Returns the splatted constant FP or null if this is not a constant | |||
2019 | /// FP splat. | |||
2020 | /// | |||
2021 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
2022 | /// the vector width and set the bits where elements are undef. | |||
2023 | ConstantFPSDNode * | |||
2024 | getConstantFPSplatNode(BitVector *UndefElements = nullptr) const; | |||
2025 | ||||
2026 | /// If this is a constant FP splat and the splatted constant FP is an | |||
2027 | /// exact power or 2, return the log base 2 integer value. Otherwise, | |||
2028 | /// return -1. | |||
2029 | /// | |||
2030 | /// The BitWidth specifies the necessary bit precision. | |||
2031 | int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, | |||
2032 | uint32_t BitWidth) const; | |||
2033 | ||||
2034 | bool isConstant() const; | |||
2035 | ||||
2036 | static bool classof(const SDNode *N) { | |||
2037 | return N->getOpcode() == ISD::BUILD_VECTOR; | |||
2038 | } | |||
2039 | }; | |||
2040 | ||||
2041 | /// An SDNode that holds an arbitrary LLVM IR Value. This is | |||
2042 | /// used when the SelectionDAG needs to make a simple reference to something | |||
2043 | /// in the LLVM IR representation. | |||
2044 | /// | |||
2045 | class SrcValueSDNode : public SDNode { | |||
2046 | friend class SelectionDAG; | |||
2047 | ||||
2048 | const Value *V; | |||
2049 | ||||
2050 | /// Create a SrcValue for a general value. | |||
2051 | explicit SrcValueSDNode(const Value *v) | |||
2052 | : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {} | |||
2053 | ||||
2054 | public: | |||
2055 | /// Return the contained Value. | |||
2056 | const Value *getValue() const { return V; } | |||
2057 | ||||
2058 | static bool classof(const SDNode *N) { | |||
2059 | return N->getOpcode() == ISD::SRCVALUE; | |||
2060 | } | |||
2061 | }; | |||
2062 | ||||
2063 | class MDNodeSDNode : public SDNode { | |||
2064 | friend class SelectionDAG; | |||
2065 | ||||
2066 | const MDNode *MD; | |||
2067 | ||||
2068 | explicit MDNodeSDNode(const MDNode *md) | |||
2069 | : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md) | |||
2070 | {} | |||
2071 | ||||
2072 | public: | |||
2073 | const MDNode *getMD() const { return MD; } | |||
2074 | ||||
2075 | static bool classof(const SDNode *N) { | |||
2076 | return N->getOpcode() == ISD::MDNODE_SDNODE; | |||
2077 | } | |||
2078 | }; | |||
2079 | ||||
2080 | class RegisterSDNode : public SDNode { | |||
2081 | friend class SelectionDAG; | |||
2082 | ||||
2083 | Register Reg; | |||
2084 | ||||
2085 | RegisterSDNode(Register reg, EVT VT) | |||
2086 | : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {} | |||
2087 | ||||
2088 | public: | |||
2089 | Register getReg() const { return Reg; } | |||
2090 | ||||
2091 | static bool classof(const SDNode *N) { | |||
2092 | return N->getOpcode() == ISD::Register; | |||
2093 | } | |||
2094 | }; | |||
2095 | ||||
2096 | class RegisterMaskSDNode : public SDNode { | |||
2097 | friend class SelectionDAG; | |||
2098 | ||||
2099 | // The memory for RegMask is not owned by the node. | |||
2100 | const uint32_t *RegMask; | |||
2101 | ||||
2102 | RegisterMaskSDNode(const uint32_t *mask) | |||
2103 | : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)), | |||
2104 | RegMask(mask) {} | |||
2105 | ||||
2106 | public: | |||
2107 | const uint32_t *getRegMask() const { return RegMask; } | |||
2108 | ||||
2109 | static bool classof(const SDNode *N) { | |||
2110 | return N->getOpcode() == ISD::RegisterMask; | |||
2111 | } | |||
2112 | }; | |||
2113 | ||||
2114 | class BlockAddressSDNode : public SDNode { | |||
2115 | friend class SelectionDAG; | |||
2116 | ||||
2117 | const BlockAddress *BA; | |||
2118 | int64_t Offset; | |||
2119 | unsigned TargetFlags; | |||
2120 | ||||
2121 | BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba, | |||
2122 | int64_t o, unsigned Flags) | |||
2123 | : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)), | |||
2124 | BA(ba), Offset(o), TargetFlags(Flags) {} | |||
2125 | ||||
2126 | public: | |||
2127 | const BlockAddress *getBlockAddress() const { return BA; } | |||
2128 | int64_t getOffset() const { return Offset; } | |||
2129 | unsigned getTargetFlags() const { return TargetFlags; } | |||
2130 | ||||
2131 | static bool classof(const SDNode *N) { | |||
2132 | return N->getOpcode() == ISD::BlockAddress || | |||
2133 | N->getOpcode() == ISD::TargetBlockAddress; | |||
2134 | } | |||
2135 | }; | |||
2136 | ||||
2137 | class LabelSDNode : public SDNode { | |||
2138 | friend class SelectionDAG; | |||
2139 | ||||
2140 | MCSymbol *Label; | |||
2141 | ||||
2142 | LabelSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, MCSymbol *L) | |||
2143 | : SDNode(Opcode, Order, dl, getSDVTList(MVT::Other)), Label(L) { | |||
2144 | assert(LabelSDNode::classof(this) && "not a label opcode")(static_cast <bool> (LabelSDNode::classof(this) && "not a label opcode") ? void (0) : __assert_fail ("LabelSDNode::classof(this) && \"not a label opcode\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2144, __extension__ __PRETTY_FUNCTION__)); | |||
2145 | } | |||
2146 | ||||
2147 | public: | |||
2148 | MCSymbol *getLabel() const { return Label; } | |||
2149 | ||||
2150 | static bool classof(const SDNode *N) { | |||
2151 | return N->getOpcode() == ISD::EH_LABEL || | |||
2152 | N->getOpcode() == ISD::ANNOTATION_LABEL; | |||
2153 | } | |||
2154 | }; | |||
2155 | ||||
2156 | class ExternalSymbolSDNode : public SDNode { | |||
2157 | friend class SelectionDAG; | |||
2158 | ||||
2159 | const char *Symbol; | |||
2160 | unsigned TargetFlags; | |||
2161 | ||||
2162 | ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned TF, EVT VT) | |||
2163 | : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol, 0, | |||
2164 | DebugLoc(), getSDVTList(VT)), | |||
2165 | Symbol(Sym), TargetFlags(TF) {} | |||
2166 | ||||
2167 | public: | |||
2168 | const char *getSymbol() const { return Symbol; } | |||
2169 | unsigned getTargetFlags() const { return TargetFlags; } | |||
2170 | ||||
2171 | static bool classof(const SDNode *N) { | |||
2172 | return N->getOpcode() == ISD::ExternalSymbol || | |||
2173 | N->getOpcode() == ISD::TargetExternalSymbol; | |||
2174 | } | |||
2175 | }; | |||
2176 | ||||
2177 | class MCSymbolSDNode : public SDNode { | |||
2178 | friend class SelectionDAG; | |||
2179 | ||||
2180 | MCSymbol *Symbol; | |||
2181 | ||||
2182 | MCSymbolSDNode(MCSymbol *Symbol, EVT VT) | |||
2183 | : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {} | |||
2184 | ||||
2185 | public: | |||
2186 | MCSymbol *getMCSymbol() const { return Symbol; } | |||
2187 | ||||
2188 | static bool classof(const SDNode *N) { | |||
2189 | return N->getOpcode() == ISD::MCSymbol; | |||
2190 | } | |||
2191 | }; | |||
2192 | ||||
2193 | class CondCodeSDNode : public SDNode { | |||
2194 | friend class SelectionDAG; | |||
2195 | ||||
2196 | ISD::CondCode Condition; | |||
2197 | ||||
2198 | explicit CondCodeSDNode(ISD::CondCode Cond) | |||
2199 | : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)), | |||
2200 | Condition(Cond) {} | |||
2201 | ||||
2202 | public: | |||
2203 | ISD::CondCode get() const { return Condition; } | |||
2204 | ||||
2205 | static bool classof(const SDNode *N) { | |||
2206 | return N->getOpcode() == ISD::CONDCODE; | |||
2207 | } | |||
2208 | }; | |||
2209 | ||||
2210 | /// This class is used to represent EVT's, which are used | |||
2211 | /// to parameterize some operations. | |||
2212 | class VTSDNode : public SDNode { | |||
2213 | friend class SelectionDAG; | |||
2214 | ||||
2215 | EVT ValueType; | |||
2216 | ||||
2217 | explicit VTSDNode(EVT VT) | |||
2218 | : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)), | |||
2219 | ValueType(VT) {} | |||
2220 | ||||
2221 | public: | |||
2222 | EVT getVT() const { return ValueType; } | |||
2223 | ||||
2224 | static bool classof(const SDNode *N) { | |||
2225 | return N->getOpcode() == ISD::VALUETYPE; | |||
2226 | } | |||
2227 | }; | |||
2228 | ||||
2229 | /// Base class for LoadSDNode and StoreSDNode | |||
2230 | class LSBaseSDNode : public MemSDNode { | |||
2231 | public: | |||
2232 | LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl, | |||
2233 | SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT, | |||
2234 | MachineMemOperand *MMO) | |||
2235 | : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) { | |||
2236 | LSBaseSDNodeBits.AddressingMode = AM; | |||
2237 | assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM && "Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2237, __extension__ __PRETTY_FUNCTION__)); | |||
2238 | } | |||
2239 | ||||
2240 | const SDValue &getOffset() const { | |||
2241 | return getOperand(getOpcode() == ISD::LOAD ? 2 : 3); | |||
2242 | } | |||
2243 | ||||
2244 | /// Return the addressing mode for this load or store: | |||
2245 | /// unindexed, pre-inc, pre-dec, post-inc, or post-dec. | |||
2246 | ISD::MemIndexedMode getAddressingMode() const { | |||
2247 | return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode); | |||
2248 | } | |||
2249 | ||||
2250 | /// Return true if this is a pre/post inc/dec load/store. | |||
2251 | bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; } | |||
2252 | ||||
2253 | /// Return true if this is NOT a pre/post inc/dec load/store. | |||
2254 | bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; } | |||
2255 | ||||
2256 | static bool classof(const SDNode *N) { | |||
2257 | return N->getOpcode() == ISD::LOAD || | |||
2258 | N->getOpcode() == ISD::STORE; | |||
2259 | } | |||
2260 | }; | |||
2261 | ||||
2262 | /// This class is used to represent ISD::LOAD nodes. | |||
2263 | class LoadSDNode : public LSBaseSDNode { | |||
2264 | friend class SelectionDAG; | |||
2265 | ||||
2266 | LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2267 | ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT, | |||
2268 | MachineMemOperand *MMO) | |||
2269 | : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) { | |||
2270 | LoadSDNodeBits.ExtTy = ETy; | |||
2271 | assert(readMem() && "Load MachineMemOperand is not a load!")(static_cast <bool> (readMem() && "Load MachineMemOperand is not a load!" ) ? void (0) : __assert_fail ("readMem() && \"Load MachineMemOperand is not a load!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2271, __extension__ __PRETTY_FUNCTION__)); | |||
2272 | assert(!writeMem() && "Load MachineMemOperand is a store!")(static_cast <bool> (!writeMem() && "Load MachineMemOperand is a store!" ) ? void (0) : __assert_fail ("!writeMem() && \"Load MachineMemOperand is a store!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2272, __extension__ __PRETTY_FUNCTION__)); | |||
2273 | } | |||
2274 | ||||
2275 | public: | |||
2276 | /// Return whether this is a plain node, | |||
2277 | /// or one of the varieties of value-extending loads. | |||
2278 | ISD::LoadExtType getExtensionType() const { | |||
2279 | return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy); | |||
2280 | } | |||
2281 | ||||
2282 | const SDValue &getBasePtr() const { return getOperand(1); } | |||
2283 | const SDValue &getOffset() const { return getOperand(2); } | |||
2284 | ||||
2285 | static bool classof(const SDNode *N) { | |||
2286 | return N->getOpcode() == ISD::LOAD; | |||
2287 | } | |||
2288 | }; | |||
2289 | ||||
2290 | /// This class is used to represent ISD::STORE nodes. | |||
2291 | class StoreSDNode : public LSBaseSDNode { | |||
2292 | friend class SelectionDAG; | |||
2293 | ||||
2294 | StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2295 | ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT, | |||
2296 | MachineMemOperand *MMO) | |||
2297 | : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) { | |||
2298 | StoreSDNodeBits.IsTruncating = isTrunc; | |||
2299 | assert(!readMem() && "Store MachineMemOperand is a load!")(static_cast <bool> (!readMem() && "Store MachineMemOperand is a load!" ) ? void (0) : __assert_fail ("!readMem() && \"Store MachineMemOperand is a load!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2299, __extension__ __PRETTY_FUNCTION__)); | |||
2300 | assert(writeMem() && "Store MachineMemOperand is not a store!")(static_cast <bool> (writeMem() && "Store MachineMemOperand is not a store!" ) ? void (0) : __assert_fail ("writeMem() && \"Store MachineMemOperand is not a store!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2300, __extension__ __PRETTY_FUNCTION__)); | |||
2301 | } | |||
2302 | ||||
2303 | public: | |||
2304 | /// Return true if the op does a truncation before store. | |||
2305 | /// For integers this is the same as doing a TRUNCATE and storing the result. | |||
2306 | /// For floats, it is the same as doing an FP_ROUND and storing the result. | |||
2307 | bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; } | |||
2308 | void setTruncatingStore(bool Truncating) { | |||
2309 | StoreSDNodeBits.IsTruncating = Truncating; | |||
2310 | } | |||
2311 | ||||
2312 | const SDValue &getValue() const { return getOperand(1); } | |||
2313 | const SDValue &getBasePtr() const { return getOperand(2); } | |||
2314 | const SDValue &getOffset() const { return getOperand(3); } | |||
2315 | ||||
2316 | static bool classof(const SDNode *N) { | |||
2317 | return N->getOpcode() == ISD::STORE; | |||
2318 | } | |||
2319 | }; | |||
2320 | ||||
2321 | /// This base class is used to represent MLOAD and MSTORE nodes | |||
2322 | class MaskedLoadStoreSDNode : public MemSDNode { | |||
2323 | public: | |||
2324 | friend class SelectionDAG; | |||
2325 | ||||
2326 | MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order, | |||
2327 | const DebugLoc &dl, SDVTList VTs, | |||
2328 | ISD::MemIndexedMode AM, EVT MemVT, | |||
2329 | MachineMemOperand *MMO) | |||
2330 | : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) { | |||
2331 | LSBaseSDNodeBits.AddressingMode = AM; | |||
2332 | assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM && "Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2332, __extension__ __PRETTY_FUNCTION__)); | |||
2333 | } | |||
2334 | ||||
2335 | // MaskedLoadSDNode (Chain, ptr, offset, mask, passthru) | |||
2336 | // MaskedStoreSDNode (Chain, data, ptr, offset, mask) | |||
2337 | // Mask is a vector of i1 elements | |||
2338 | const SDValue &getOffset() const { | |||
2339 | return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3); | |||
2340 | } | |||
2341 | const SDValue &getMask() const { | |||
2342 | return getOperand(getOpcode() == ISD::MLOAD ? 3 : 4); | |||
2343 | } | |||
2344 | ||||
2345 | /// Return the addressing mode for this load or store: | |||
2346 | /// unindexed, pre-inc, pre-dec, post-inc, or post-dec. | |||
2347 | ISD::MemIndexedMode getAddressingMode() const { | |||
2348 | return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode); | |||
2349 | } | |||
2350 | ||||
2351 | /// Return true if this is a pre/post inc/dec load/store. | |||
2352 | bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; } | |||
2353 | ||||
2354 | /// Return true if this is NOT a pre/post inc/dec load/store. | |||
2355 | bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; } | |||
2356 | ||||
2357 | static bool classof(const SDNode *N) { | |||
2358 | return N->getOpcode() == ISD::MLOAD || | |||
2359 | N->getOpcode() == ISD::MSTORE; | |||
2360 | } | |||
2361 | }; | |||
2362 | ||||
2363 | /// This class is used to represent an MLOAD node | |||
2364 | class MaskedLoadSDNode : public MaskedLoadStoreSDNode { | |||
2365 | public: | |||
2366 | friend class SelectionDAG; | |||
2367 | ||||
2368 | MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2369 | ISD::MemIndexedMode AM, ISD::LoadExtType ETy, | |||
2370 | bool IsExpanding, EVT MemVT, MachineMemOperand *MMO) | |||
2371 | : MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, AM, MemVT, MMO) { | |||
2372 | LoadSDNodeBits.ExtTy = ETy; | |||
2373 | LoadSDNodeBits.IsExpanding = IsExpanding; | |||
2374 | } | |||
2375 | ||||
2376 | ISD::LoadExtType getExtensionType() const { | |||
2377 | return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy); | |||
2378 | } | |||
2379 | ||||
2380 | const SDValue &getBasePtr() const { return getOperand(1); } | |||
2381 | const SDValue &getOffset() const { return getOperand(2); } | |||
2382 | const SDValue &getMask() const { return getOperand(3); } | |||
2383 | const SDValue &getPassThru() const { return getOperand(4); } | |||
2384 | ||||
2385 | static bool classof(const SDNode *N) { | |||
2386 | return N->getOpcode() == ISD::MLOAD; | |||
2387 | } | |||
2388 | ||||
2389 | bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; } | |||
2390 | }; | |||
2391 | ||||
2392 | /// This class is used to represent an MSTORE node | |||
2393 | class MaskedStoreSDNode : public MaskedLoadStoreSDNode { | |||
2394 | public: | |||
2395 | friend class SelectionDAG; | |||
2396 | ||||
2397 | MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2398 | ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing, | |||
2399 | EVT MemVT, MachineMemOperand *MMO) | |||
2400 | : MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, AM, MemVT, MMO) { | |||
2401 | StoreSDNodeBits.IsTruncating = isTrunc; | |||
2402 | StoreSDNodeBits.IsCompressing = isCompressing; | |||
2403 | } | |||
2404 | ||||
2405 | /// Return true if the op does a truncation before store. | |||
2406 | /// For integers this is the same as doing a TRUNCATE and storing the result. | |||
2407 | /// For floats, it is the same as doing an FP_ROUND and storing the result. | |||
2408 | bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; } | |||
2409 | ||||
2410 | /// Returns true if the op does a compression to the vector before storing. | |||
2411 | /// The node contiguously stores the active elements (integers or floats) | |||
2412 | /// in src (those with their respective bit set in writemask k) to unaligned | |||
2413 | /// memory at base_addr. | |||
2414 | bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; } | |||
2415 | ||||
2416 | const SDValue &getValue() const { return getOperand(1); } | |||
2417 | const SDValue &getBasePtr() const { return getOperand(2); } | |||
2418 | const SDValue &getOffset() const { return getOperand(3); } | |||
2419 | const SDValue &getMask() const { return getOperand(4); } | |||
2420 | ||||
2421 | static bool classof(const SDNode *N) { | |||
2422 | return N->getOpcode() == ISD::MSTORE; | |||
2423 | } | |||
2424 | }; | |||
2425 | ||||
2426 | /// This is a base class used to represent | |||
2427 | /// MGATHER and MSCATTER nodes | |||
2428 | /// | |||
2429 | class MaskedGatherScatterSDNode : public MemSDNode { | |||
2430 | public: | |||
2431 | friend class SelectionDAG; | |||
2432 | ||||
2433 | MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order, | |||
2434 | const DebugLoc &dl, SDVTList VTs, EVT MemVT, | |||
2435 | MachineMemOperand *MMO, ISD::MemIndexType IndexType) | |||
2436 | : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) { | |||
2437 | LSBaseSDNodeBits.AddressingMode = IndexType; | |||
2438 | assert(getIndexType() == IndexType && "Value truncated")(static_cast <bool> (getIndexType() == IndexType && "Value truncated") ? void (0) : __assert_fail ("getIndexType() == IndexType && \"Value truncated\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2438, __extension__ __PRETTY_FUNCTION__)); | |||
2439 | } | |||
2440 | ||||
2441 | /// How is Index applied to BasePtr when computing addresses. | |||
2442 | ISD::MemIndexType getIndexType() const { | |||
2443 | return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode); | |||
2444 | } | |||
2445 | void setIndexType(ISD::MemIndexType IndexType) { | |||
2446 | LSBaseSDNodeBits.AddressingMode = IndexType; | |||
2447 | } | |||
2448 | bool isIndexScaled() const { | |||
2449 | return (getIndexType() == ISD::SIGNED_SCALED) || | |||
2450 | (getIndexType() == ISD::UNSIGNED_SCALED); | |||
2451 | } | |||
2452 | bool isIndexSigned() const { | |||
2453 | return (getIndexType() == ISD::SIGNED_SCALED) || | |||
2454 | (getIndexType() == ISD::SIGNED_UNSCALED); | |||
2455 | } | |||
2456 | ||||
2457 | // In the both nodes address is Op1, mask is Op2: | |||
2458 | // MaskedGatherSDNode (Chain, passthru, mask, base, index, scale) | |||
2459 | // MaskedScatterSDNode (Chain, value, mask, base, index, scale) | |||
2460 | // Mask is a vector of i1 elements | |||
2461 | const SDValue &getBasePtr() const { return getOperand(3); } | |||
2462 | const SDValue &getIndex() const { return getOperand(4); } | |||
2463 | const SDValue &getMask() const { return getOperand(2); } | |||
2464 | const SDValue &getScale() const { return getOperand(5); } | |||
2465 | ||||
2466 | static bool classof(const SDNode *N) { | |||
2467 | return N->getOpcode() == ISD::MGATHER || | |||
2468 | N->getOpcode() == ISD::MSCATTER; | |||
2469 | } | |||
2470 | }; | |||
2471 | ||||
2472 | /// This class is used to represent an MGATHER node | |||
2473 | /// | |||
2474 | class MaskedGatherSDNode : public MaskedGatherScatterSDNode { | |||
2475 | public: | |||
2476 | friend class SelectionDAG; | |||
2477 | ||||
2478 | MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2479 | EVT MemVT, MachineMemOperand *MMO, | |||
2480 | ISD::MemIndexType IndexType, ISD::LoadExtType ETy) | |||
2481 | : MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO, | |||
2482 | IndexType) { | |||
2483 | LoadSDNodeBits.ExtTy = ETy; | |||
2484 | } | |||
2485 | ||||
2486 | const SDValue &getPassThru() const { return getOperand(1); } | |||
2487 | ||||
2488 | ISD::LoadExtType getExtensionType() const { | |||
2489 | return ISD::LoadExtType(LoadSDNodeBits.ExtTy); | |||
2490 | } | |||
2491 | ||||
2492 | static bool classof(const SDNode *N) { | |||
2493 | return N->getOpcode() == ISD::MGATHER; | |||
2494 | } | |||
2495 | }; | |||
2496 | ||||
2497 | /// This class is used to represent an MSCATTER node | |||
2498 | /// | |||
2499 | class MaskedScatterSDNode : public MaskedGatherScatterSDNode { | |||
2500 | public: | |||
2501 | friend class SelectionDAG; | |||
2502 | ||||
2503 | MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2504 | EVT MemVT, MachineMemOperand *MMO, | |||
2505 | ISD::MemIndexType IndexType, bool IsTrunc) | |||
2506 | : MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO, | |||
2507 | IndexType) { | |||
2508 | StoreSDNodeBits.IsTruncating = IsTrunc; | |||
2509 | } | |||
2510 | ||||
2511 | /// Return true if the op does a truncation before store. | |||
2512 | /// For integers this is the same as doing a TRUNCATE and storing the result. | |||
2513 | /// For floats, it is the same as doing an FP_ROUND and storing the result. | |||
2514 | bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; } | |||
2515 | ||||
2516 | const SDValue &getValue() const { return getOperand(1); } | |||
2517 | ||||
2518 | static bool classof(const SDNode *N) { | |||
2519 | return N->getOpcode() == ISD::MSCATTER; | |||
2520 | } | |||
2521 | }; | |||
2522 | ||||
2523 | /// An SDNode that represents everything that will be needed | |||
2524 | /// to construct a MachineInstr. These nodes are created during the | |||
2525 | /// instruction selection proper phase. | |||
2526 | /// | |||
2527 | /// Note that the only supported way to set the `memoperands` is by calling the | |||
2528 | /// `SelectionDAG::setNodeMemRefs` function as the memory management happens | |||
2529 | /// inside the DAG rather than in the node. | |||
2530 | class MachineSDNode : public SDNode { | |||
2531 | private: | |||
2532 | friend class SelectionDAG; | |||
2533 | ||||
2534 | MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs) | |||
2535 | : SDNode(Opc, Order, DL, VTs) {} | |||
2536 | ||||
2537 | // We use a pointer union between a single `MachineMemOperand` pointer and | |||
2538 | // a pointer to an array of `MachineMemOperand` pointers. This is null when | |||
2539 | // the number of these is zero, the single pointer variant used when the | |||
2540 | // number is one, and the array is used for larger numbers. | |||
2541 | // | |||
2542 | // The array is allocated via the `SelectionDAG`'s allocator and so will | |||
2543 | // always live until the DAG is cleaned up and doesn't require ownership here. | |||
2544 | // | |||
2545 | // We can't use something simpler like `TinyPtrVector` here because `SDNode` | |||
2546 | // subclasses aren't managed in a conforming C++ manner. See the comments on | |||
2547 | // `SelectionDAG::MorphNodeTo` which details what all goes on, but the | |||
2548 | // constraint here is that these don't manage memory with their constructor or | |||
2549 | // destructor and can be initialized to a good state even if they start off | |||
2550 | // uninitialized. | |||
2551 | PointerUnion<MachineMemOperand *, MachineMemOperand **> MemRefs = {}; | |||
2552 | ||||
2553 | // Note that this could be folded into the above `MemRefs` member if doing so | |||
2554 | // is advantageous at some point. We don't need to store this in most cases. | |||
2555 | // However, at the moment this doesn't appear to make the allocation any | |||
2556 | // smaller and makes the code somewhat simpler to read. | |||
2557 | int NumMemRefs = 0; | |||
2558 | ||||
2559 | public: | |||
2560 | using mmo_iterator = ArrayRef<MachineMemOperand *>::const_iterator; | |||
2561 | ||||
2562 | ArrayRef<MachineMemOperand *> memoperands() const { | |||
2563 | // Special case the common cases. | |||
2564 | if (NumMemRefs == 0) | |||
2565 | return {}; | |||
2566 | if (NumMemRefs == 1) | |||
2567 | return makeArrayRef(MemRefs.getAddrOfPtr1(), 1); | |||
2568 | ||||
2569 | // Otherwise we have an actual array. | |||
2570 | return makeArrayRef(MemRefs.get<MachineMemOperand **>(), NumMemRefs); | |||
2571 | } | |||
2572 | mmo_iterator memoperands_begin() const { return memoperands().begin(); } | |||
2573 | mmo_iterator memoperands_end() const { return memoperands().end(); } | |||
2574 | bool memoperands_empty() const { return memoperands().empty(); } | |||
2575 | ||||
2576 | /// Clear out the memory reference descriptor list. | |||
2577 | void clearMemRefs() { | |||
2578 | MemRefs = nullptr; | |||
2579 | NumMemRefs = 0; | |||
2580 | } | |||
2581 | ||||
2582 | static bool classof(const SDNode *N) { | |||
2583 | return N->isMachineOpcode(); | |||
2584 | } | |||
2585 | }; | |||
2586 | ||||
2587 | /// An SDNode that records if a register contains a value that is guaranteed to | |||
2588 | /// be aligned accordingly. | |||
2589 | class AssertAlignSDNode : public SDNode { | |||
2590 | Align Alignment; | |||
2591 | ||||
2592 | public: | |||
2593 | AssertAlignSDNode(unsigned Order, const DebugLoc &DL, EVT VT, Align A) | |||
2594 | : SDNode(ISD::AssertAlign, Order, DL, getSDVTList(VT)), Alignment(A) {} | |||
2595 | ||||
2596 | Align getAlign() const { return Alignment; } | |||
2597 | ||||
2598 | static bool classof(const SDNode *N) { | |||
2599 | return N->getOpcode() == ISD::AssertAlign; | |||
2600 | } | |||
2601 | }; | |||
2602 | ||||
2603 | class SDNodeIterator { | |||
2604 | const SDNode *Node; | |||
2605 | unsigned Operand; | |||
2606 | ||||
2607 | SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {} | |||
2608 | ||||
2609 | public: | |||
2610 | using iterator_category = std::forward_iterator_tag; | |||
2611 | using value_type = SDNode; | |||
2612 | using difference_type = std::ptrdiff_t; | |||
2613 | using pointer = value_type *; | |||
2614 | using reference = value_type &; | |||
2615 | ||||
2616 | bool operator==(const SDNodeIterator& x) const { | |||
2617 | return Operand == x.Operand; | |||
2618 | } | |||
2619 | bool operator!=(const SDNodeIterator& x) const { return !operator==(x); } | |||
2620 | ||||
2621 | pointer operator*() const { | |||
2622 | return Node->getOperand(Operand).getNode(); | |||
2623 | } | |||
2624 | pointer operator->() const { return operator*(); } | |||
2625 | ||||
2626 | SDNodeIterator& operator++() { // Preincrement | |||
2627 | ++Operand; | |||
2628 | return *this; | |||
2629 | } | |||
2630 | SDNodeIterator operator++(int) { // Postincrement | |||
2631 | SDNodeIterator tmp = *this; ++*this; return tmp; | |||
2632 | } | |||
2633 | size_t operator-(SDNodeIterator Other) const { | |||
2634 | assert(Node == Other.Node &&(static_cast <bool> (Node == Other.Node && "Cannot compare iterators of two different nodes!" ) ? void (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2635, __extension__ __PRETTY_FUNCTION__)) | |||
2635 | "Cannot compare iterators of two different nodes!")(static_cast <bool> (Node == Other.Node && "Cannot compare iterators of two different nodes!" ) ? void (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2635, __extension__ __PRETTY_FUNCTION__)); | |||
2636 | return Operand - Other.Operand; | |||
2637 | } | |||
2638 | ||||
2639 | static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); } | |||
2640 | static SDNodeIterator end (const SDNode *N) { | |||
2641 | return SDNodeIterator(N, N->getNumOperands()); | |||
2642 | } | |||
2643 | ||||
2644 | unsigned getOperand() const { return Operand; } | |||
2645 | const SDNode *getNode() const { return Node; } | |||
2646 | }; | |||
2647 | ||||
2648 | template <> struct GraphTraits<SDNode*> { | |||
2649 | using NodeRef = SDNode *; | |||
2650 | using ChildIteratorType = SDNodeIterator; | |||
2651 | ||||
2652 | static NodeRef getEntryNode(SDNode *N) { return N; } | |||
2653 | ||||
2654 | static ChildIteratorType child_begin(NodeRef N) { | |||
2655 | return SDNodeIterator::begin(N); | |||
2656 | } | |||
2657 | ||||
2658 | static ChildIteratorType child_end(NodeRef N) { | |||
2659 | return SDNodeIterator::end(N); | |||
2660 | } | |||
2661 | }; | |||
2662 | ||||
2663 | /// A representation of the largest SDNode, for use in sizeof(). | |||
2664 | /// | |||
2665 | /// This needs to be a union because the largest node differs on 32 bit systems | |||
2666 | /// with 4 and 8 byte pointer alignment, respectively. | |||
2667 | using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode, | |||
2668 | BlockAddressSDNode, | |||
2669 | GlobalAddressSDNode, | |||
2670 | PseudoProbeSDNode>; | |||
2671 | ||||
2672 | /// The SDNode class with the greatest alignment requirement. | |||
2673 | using MostAlignedSDNode = GlobalAddressSDNode; | |||
2674 | ||||
2675 | namespace ISD { | |||
2676 | ||||
2677 | /// Returns true if the specified node is a non-extending and unindexed load. | |||
2678 | inline bool isNormalLoad(const SDNode *N) { | |||
2679 | const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N); | |||
2680 | return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD && | |||
2681 | Ld->getAddressingMode() == ISD::UNINDEXED; | |||
2682 | } | |||
2683 | ||||
2684 | /// Returns true if the specified node is a non-extending load. | |||
2685 | inline bool isNON_EXTLoad(const SDNode *N) { | |||
2686 | return isa<LoadSDNode>(N) && | |||
2687 | cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD; | |||
2688 | } | |||
2689 | ||||
2690 | /// Returns true if the specified node is a EXTLOAD. | |||
2691 | inline bool isEXTLoad(const SDNode *N) { | |||
2692 | return isa<LoadSDNode>(N) && | |||
2693 | cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD; | |||
2694 | } | |||
2695 | ||||
2696 | /// Returns true if the specified node is a SEXTLOAD. | |||
2697 | inline bool isSEXTLoad(const SDNode *N) { | |||
2698 | return isa<LoadSDNode>(N) && | |||
2699 | cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD; | |||
2700 | } | |||
2701 | ||||
2702 | /// Returns true if the specified node is a ZEXTLOAD. | |||
2703 | inline bool isZEXTLoad(const SDNode *N) { | |||
2704 | return isa<LoadSDNode>(N) && | |||
2705 | cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD; | |||
2706 | } | |||
2707 | ||||
2708 | /// Returns true if the specified node is an unindexed load. | |||
2709 | inline bool isUNINDEXEDLoad(const SDNode *N) { | |||
2710 | return isa<LoadSDNode>(N) && | |||
2711 | cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED; | |||
2712 | } | |||
2713 | ||||
2714 | /// Returns true if the specified node is a non-truncating | |||
2715 | /// and unindexed store. | |||
2716 | inline bool isNormalStore(const SDNode *N) { | |||
2717 | const StoreSDNode *St = dyn_cast<StoreSDNode>(N); | |||
2718 | return St && !St->isTruncatingStore() && | |||
2719 | St->getAddressingMode() == ISD::UNINDEXED; | |||
2720 | } | |||
2721 | ||||
2722 | /// Returns true if the specified node is an unindexed store. | |||
2723 | inline bool isUNINDEXEDStore(const SDNode *N) { | |||
2724 | return isa<StoreSDNode>(N) && | |||
2725 | cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED; | |||
2726 | } | |||
2727 | ||||
2728 | /// Attempt to match a unary predicate against a scalar/splat constant or | |||
2729 | /// every element of a constant BUILD_VECTOR. | |||
2730 | /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match. | |||
2731 | bool matchUnaryPredicate(SDValue Op, | |||
2732 | std::function<bool(ConstantSDNode *)> Match, | |||
2733 | bool AllowUndefs = false); | |||
2734 | ||||
2735 | /// Attempt to match a binary predicate against a pair of scalar/splat | |||
2736 | /// constants or every element of a pair of constant BUILD_VECTORs. | |||
2737 | /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match. | |||
2738 | /// If AllowTypeMismatch is true then RetType + ArgTypes don't need to match. | |||
2739 | bool matchBinaryPredicate( | |||
2740 | SDValue LHS, SDValue RHS, | |||
2741 | std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match, | |||
2742 | bool AllowUndefs = false, bool AllowTypeMismatch = false); | |||
2743 | ||||
2744 | /// Returns true if the specified value is the overflow result from one | |||
2745 | /// of the overflow intrinsic nodes. | |||
2746 | inline bool isOverflowIntrOpRes(SDValue Op) { | |||
2747 | unsigned Opc = Op.getOpcode(); | |||
2748 | return (Op.getResNo() == 1 && | |||
2749 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || | |||
2750 | Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO)); | |||
2751 | } | |||
2752 | ||||
2753 | } // end namespace ISD | |||
2754 | ||||
2755 | } // end namespace llvm | |||
2756 | ||||
2757 | #endif // LLVM_CODEGEN_SELECTIONDAGNODES_H |