File: | llvm/include/llvm/CodeGen/SelectionDAGNodes.h |
Warning: | line 1110, column 10 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file defines the interfaces that RISCV uses to lower LLVM code into a | |||
10 | // selection DAG. | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #include "RISCVISelLowering.h" | |||
15 | #include "MCTargetDesc/RISCVMatInt.h" | |||
16 | #include "RISCV.h" | |||
17 | #include "RISCVMachineFunctionInfo.h" | |||
18 | #include "RISCVRegisterInfo.h" | |||
19 | #include "RISCVSubtarget.h" | |||
20 | #include "RISCVTargetMachine.h" | |||
21 | #include "llvm/ADT/SmallSet.h" | |||
22 | #include "llvm/ADT/Statistic.h" | |||
23 | #include "llvm/CodeGen/CallingConvLower.h" | |||
24 | #include "llvm/CodeGen/MachineFrameInfo.h" | |||
25 | #include "llvm/CodeGen/MachineFunction.h" | |||
26 | #include "llvm/CodeGen/MachineInstrBuilder.h" | |||
27 | #include "llvm/CodeGen/MachineRegisterInfo.h" | |||
28 | #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" | |||
29 | #include "llvm/CodeGen/ValueTypes.h" | |||
30 | #include "llvm/IR/DiagnosticInfo.h" | |||
31 | #include "llvm/IR/DiagnosticPrinter.h" | |||
32 | #include "llvm/IR/IntrinsicsRISCV.h" | |||
33 | #include "llvm/Support/Debug.h" | |||
34 | #include "llvm/Support/ErrorHandling.h" | |||
35 | #include "llvm/Support/KnownBits.h" | |||
36 | #include "llvm/Support/MathExtras.h" | |||
37 | #include "llvm/Support/raw_ostream.h" | |||
38 | ||||
39 | using namespace llvm; | |||
40 | ||||
41 | #define DEBUG_TYPE"riscv-lower" "riscv-lower" | |||
42 | ||||
43 | STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"riscv-lower", "NumTailCalls" , "Number of tail calls"}; | |||
44 | ||||
45 | RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, | |||
46 | const RISCVSubtarget &STI) | |||
47 | : TargetLowering(TM), Subtarget(STI) { | |||
48 | ||||
49 | if (Subtarget.isRV32E()) | |||
50 | report_fatal_error("Codegen not yet implemented for RV32E"); | |||
51 | ||||
52 | RISCVABI::ABI ABI = Subtarget.getTargetABI(); | |||
53 | assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI")((ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI" ) ? static_cast<void> (0) : __assert_fail ("ABI != RISCVABI::ABI_Unknown && \"Improperly initialised target ABI\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 53, __PRETTY_FUNCTION__)); | |||
54 | ||||
55 | if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) && | |||
56 | !Subtarget.hasStdExtF()) { | |||
57 | errs() << "Hard-float 'f' ABI can't be used for a target that " | |||
58 | "doesn't support the F instruction set extension (ignoring " | |||
59 | "target-abi)\n"; | |||
60 | ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; | |||
61 | } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) && | |||
62 | !Subtarget.hasStdExtD()) { | |||
63 | errs() << "Hard-float 'd' ABI can't be used for a target that " | |||
64 | "doesn't support the D instruction set extension (ignoring " | |||
65 | "target-abi)\n"; | |||
66 | ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; | |||
67 | } | |||
68 | ||||
69 | switch (ABI) { | |||
70 | default: | |||
71 | report_fatal_error("Don't know how to lower this ABI"); | |||
72 | case RISCVABI::ABI_ILP32: | |||
73 | case RISCVABI::ABI_ILP32F: | |||
74 | case RISCVABI::ABI_ILP32D: | |||
75 | case RISCVABI::ABI_LP64: | |||
76 | case RISCVABI::ABI_LP64F: | |||
77 | case RISCVABI::ABI_LP64D: | |||
78 | break; | |||
79 | } | |||
80 | ||||
81 | MVT XLenVT = Subtarget.getXLenVT(); | |||
82 | ||||
83 | // Set up the register classes. | |||
84 | addRegisterClass(XLenVT, &RISCV::GPRRegClass); | |||
85 | ||||
86 | if (Subtarget.hasStdExtZfh()) | |||
87 | addRegisterClass(MVT::f16, &RISCV::FPR16RegClass); | |||
88 | if (Subtarget.hasStdExtF()) | |||
89 | addRegisterClass(MVT::f32, &RISCV::FPR32RegClass); | |||
90 | if (Subtarget.hasStdExtD()) | |||
91 | addRegisterClass(MVT::f64, &RISCV::FPR64RegClass); | |||
92 | ||||
93 | static const MVT::SimpleValueType BoolVecVTs[] = { | |||
94 | MVT::nxv1i1, MVT::nxv2i1, MVT::nxv4i1, MVT::nxv8i1, | |||
95 | MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1}; | |||
96 | static const MVT::SimpleValueType IntVecVTs[] = { | |||
97 | MVT::nxv1i8, MVT::nxv2i8, MVT::nxv4i8, MVT::nxv8i8, MVT::nxv16i8, | |||
98 | MVT::nxv32i8, MVT::nxv64i8, MVT::nxv1i16, MVT::nxv2i16, MVT::nxv4i16, | |||
99 | MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32, | |||
100 | MVT::nxv4i32, MVT::nxv8i32, MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64, | |||
101 | MVT::nxv4i64, MVT::nxv8i64}; | |||
102 | static const MVT::SimpleValueType F16VecVTs[] = { | |||
103 | MVT::nxv1f16, MVT::nxv2f16, MVT::nxv4f16, | |||
104 | MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16}; | |||
105 | static const MVT::SimpleValueType F32VecVTs[] = { | |||
106 | MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32}; | |||
107 | static const MVT::SimpleValueType F64VecVTs[] = { | |||
108 | MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64}; | |||
109 | ||||
110 | if (Subtarget.hasStdExtV()) { | |||
111 | auto addRegClassForRVV = [this](MVT VT) { | |||
112 | unsigned Size = VT.getSizeInBits().getKnownMinValue(); | |||
113 | assert(Size <= 512 && isPowerOf2_32(Size))((Size <= 512 && isPowerOf2_32(Size)) ? static_cast <void> (0) : __assert_fail ("Size <= 512 && isPowerOf2_32(Size)" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 113, __PRETTY_FUNCTION__)); | |||
114 | const TargetRegisterClass *RC; | |||
115 | if (Size <= 64) | |||
116 | RC = &RISCV::VRRegClass; | |||
117 | else if (Size == 128) | |||
118 | RC = &RISCV::VRM2RegClass; | |||
119 | else if (Size == 256) | |||
120 | RC = &RISCV::VRM4RegClass; | |||
121 | else | |||
122 | RC = &RISCV::VRM8RegClass; | |||
123 | ||||
124 | addRegisterClass(VT, RC); | |||
125 | }; | |||
126 | ||||
127 | for (MVT VT : BoolVecVTs) | |||
128 | addRegClassForRVV(VT); | |||
129 | for (MVT VT : IntVecVTs) | |||
130 | addRegClassForRVV(VT); | |||
131 | ||||
132 | if (Subtarget.hasStdExtZfh()) | |||
133 | for (MVT VT : F16VecVTs) | |||
134 | addRegClassForRVV(VT); | |||
135 | ||||
136 | if (Subtarget.hasStdExtF()) | |||
137 | for (MVT VT : F32VecVTs) | |||
138 | addRegClassForRVV(VT); | |||
139 | ||||
140 | if (Subtarget.hasStdExtD()) | |||
141 | for (MVT VT : F64VecVTs) | |||
142 | addRegClassForRVV(VT); | |||
143 | ||||
144 | if (Subtarget.useRVVForFixedLengthVectors()) { | |||
145 | auto addRegClassForFixedVectors = [this](MVT VT) { | |||
146 | unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT); | |||
147 | const TargetRegisterClass *RC; | |||
148 | if (LMul == 1) | |||
149 | RC = &RISCV::VRRegClass; | |||
150 | else if (LMul == 2) | |||
151 | RC = &RISCV::VRM2RegClass; | |||
152 | else if (LMul == 4) | |||
153 | RC = &RISCV::VRM4RegClass; | |||
154 | else if (LMul == 8) | |||
155 | RC = &RISCV::VRM8RegClass; | |||
156 | else | |||
157 | llvm_unreachable("Unexpected LMul!")::llvm::llvm_unreachable_internal("Unexpected LMul!", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 157); | |||
158 | ||||
159 | addRegisterClass(VT, RC); | |||
160 | }; | |||
161 | for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) | |||
162 | if (useRVVForFixedLengthVectorVT(VT)) | |||
163 | addRegClassForFixedVectors(VT); | |||
164 | ||||
165 | for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) | |||
166 | if (useRVVForFixedLengthVectorVT(VT)) | |||
167 | addRegClassForFixedVectors(VT); | |||
168 | } | |||
169 | } | |||
170 | ||||
171 | // Compute derived properties from the register classes. | |||
172 | computeRegisterProperties(STI.getRegisterInfo()); | |||
173 | ||||
174 | setStackPointerRegisterToSaveRestore(RISCV::X2); | |||
175 | ||||
176 | for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) | |||
177 | setLoadExtAction(N, XLenVT, MVT::i1, Promote); | |||
178 | ||||
179 | // TODO: add all necessary setOperationAction calls. | |||
180 | setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand); | |||
181 | ||||
182 | setOperationAction(ISD::BR_JT, MVT::Other, Expand); | |||
183 | setOperationAction(ISD::BR_CC, XLenVT, Expand); | |||
184 | setOperationAction(ISD::SELECT_CC, XLenVT, Expand); | |||
185 | ||||
186 | setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); | |||
187 | setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); | |||
188 | ||||
189 | setOperationAction(ISD::VASTART, MVT::Other, Custom); | |||
190 | setOperationAction(ISD::VAARG, MVT::Other, Expand); | |||
191 | setOperationAction(ISD::VACOPY, MVT::Other, Expand); | |||
192 | setOperationAction(ISD::VAEND, MVT::Other, Expand); | |||
193 | ||||
194 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); | |||
195 | if (!Subtarget.hasStdExtZbb()) { | |||
196 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); | |||
197 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); | |||
198 | } | |||
199 | ||||
200 | if (Subtarget.is64Bit()) { | |||
201 | setOperationAction(ISD::ADD, MVT::i32, Custom); | |||
202 | setOperationAction(ISD::SUB, MVT::i32, Custom); | |||
203 | setOperationAction(ISD::SHL, MVT::i32, Custom); | |||
204 | setOperationAction(ISD::SRA, MVT::i32, Custom); | |||
205 | setOperationAction(ISD::SRL, MVT::i32, Custom); | |||
206 | } | |||
207 | ||||
208 | if (!Subtarget.hasStdExtM()) { | |||
209 | setOperationAction(ISD::MUL, XLenVT, Expand); | |||
210 | setOperationAction(ISD::MULHS, XLenVT, Expand); | |||
211 | setOperationAction(ISD::MULHU, XLenVT, Expand); | |||
212 | setOperationAction(ISD::SDIV, XLenVT, Expand); | |||
213 | setOperationAction(ISD::UDIV, XLenVT, Expand); | |||
214 | setOperationAction(ISD::SREM, XLenVT, Expand); | |||
215 | setOperationAction(ISD::UREM, XLenVT, Expand); | |||
216 | } | |||
217 | ||||
218 | if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) { | |||
219 | setOperationAction(ISD::MUL, MVT::i32, Custom); | |||
220 | ||||
221 | setOperationAction(ISD::SDIV, MVT::i8, Custom); | |||
222 | setOperationAction(ISD::UDIV, MVT::i8, Custom); | |||
223 | setOperationAction(ISD::UREM, MVT::i8, Custom); | |||
224 | setOperationAction(ISD::SDIV, MVT::i16, Custom); | |||
225 | setOperationAction(ISD::UDIV, MVT::i16, Custom); | |||
226 | setOperationAction(ISD::UREM, MVT::i16, Custom); | |||
227 | setOperationAction(ISD::SDIV, MVT::i32, Custom); | |||
228 | setOperationAction(ISD::UDIV, MVT::i32, Custom); | |||
229 | setOperationAction(ISD::UREM, MVT::i32, Custom); | |||
230 | } | |||
231 | ||||
232 | setOperationAction(ISD::SDIVREM, XLenVT, Expand); | |||
233 | setOperationAction(ISD::UDIVREM, XLenVT, Expand); | |||
234 | setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand); | |||
235 | setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand); | |||
236 | ||||
237 | setOperationAction(ISD::SHL_PARTS, XLenVT, Custom); | |||
238 | setOperationAction(ISD::SRL_PARTS, XLenVT, Custom); | |||
239 | setOperationAction(ISD::SRA_PARTS, XLenVT, Custom); | |||
240 | ||||
241 | if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) { | |||
242 | if (Subtarget.is64Bit()) { | |||
243 | setOperationAction(ISD::ROTL, MVT::i32, Custom); | |||
244 | setOperationAction(ISD::ROTR, MVT::i32, Custom); | |||
245 | } | |||
246 | } else { | |||
247 | setOperationAction(ISD::ROTL, XLenVT, Expand); | |||
248 | setOperationAction(ISD::ROTR, XLenVT, Expand); | |||
249 | } | |||
250 | ||||
251 | if (Subtarget.hasStdExtZbp()) { | |||
252 | // Custom lower bswap/bitreverse so we can convert them to GREVI to enable | |||
253 | // more combining. | |||
254 | setOperationAction(ISD::BITREVERSE, XLenVT, Custom); | |||
255 | setOperationAction(ISD::BSWAP, XLenVT, Custom); | |||
256 | ||||
257 | if (Subtarget.is64Bit()) { | |||
258 | setOperationAction(ISD::BITREVERSE, MVT::i32, Custom); | |||
259 | setOperationAction(ISD::BSWAP, MVT::i32, Custom); | |||
260 | } | |||
261 | } else { | |||
262 | // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll | |||
263 | // pattern match it directly in isel. | |||
264 | setOperationAction(ISD::BSWAP, XLenVT, | |||
265 | Subtarget.hasStdExtZbb() ? Legal : Expand); | |||
266 | } | |||
267 | ||||
268 | if (Subtarget.hasStdExtZbb()) { | |||
269 | setOperationAction(ISD::SMIN, XLenVT, Legal); | |||
270 | setOperationAction(ISD::SMAX, XLenVT, Legal); | |||
271 | setOperationAction(ISD::UMIN, XLenVT, Legal); | |||
272 | setOperationAction(ISD::UMAX, XLenVT, Legal); | |||
273 | } else { | |||
274 | setOperationAction(ISD::CTTZ, XLenVT, Expand); | |||
275 | setOperationAction(ISD::CTLZ, XLenVT, Expand); | |||
276 | setOperationAction(ISD::CTPOP, XLenVT, Expand); | |||
277 | } | |||
278 | ||||
279 | if (Subtarget.hasStdExtZbt()) { | |||
280 | setOperationAction(ISD::FSHL, XLenVT, Custom); | |||
281 | setOperationAction(ISD::FSHR, XLenVT, Custom); | |||
282 | setOperationAction(ISD::SELECT, XLenVT, Legal); | |||
283 | ||||
284 | if (Subtarget.is64Bit()) { | |||
285 | setOperationAction(ISD::FSHL, MVT::i32, Custom); | |||
286 | setOperationAction(ISD::FSHR, MVT::i32, Custom); | |||
287 | } | |||
288 | } else { | |||
289 | setOperationAction(ISD::SELECT, XLenVT, Custom); | |||
290 | } | |||
291 | ||||
292 | ISD::CondCode FPCCToExpand[] = { | |||
293 | ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, | |||
294 | ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT, | |||
295 | ISD::SETGE, ISD::SETNE, ISD::SETO, ISD::SETUO}; | |||
296 | ||||
297 | ISD::NodeType FPOpToExpand[] = { | |||
298 | ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP, | |||
299 | ISD::FP_TO_FP16}; | |||
300 | ||||
301 | if (Subtarget.hasStdExtZfh()) | |||
302 | setOperationAction(ISD::BITCAST, MVT::i16, Custom); | |||
303 | ||||
304 | if (Subtarget.hasStdExtZfh()) { | |||
305 | setOperationAction(ISD::FMINNUM, MVT::f16, Legal); | |||
306 | setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); | |||
307 | for (auto CC : FPCCToExpand) | |||
308 | setCondCodeAction(CC, MVT::f16, Expand); | |||
309 | setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); | |||
310 | setOperationAction(ISD::SELECT, MVT::f16, Custom); | |||
311 | setOperationAction(ISD::BR_CC, MVT::f16, Expand); | |||
312 | for (auto Op : FPOpToExpand) | |||
313 | setOperationAction(Op, MVT::f16, Expand); | |||
314 | } | |||
315 | ||||
316 | if (Subtarget.hasStdExtF()) { | |||
317 | setOperationAction(ISD::FMINNUM, MVT::f32, Legal); | |||
318 | setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); | |||
319 | for (auto CC : FPCCToExpand) | |||
320 | setCondCodeAction(CC, MVT::f32, Expand); | |||
321 | setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); | |||
322 | setOperationAction(ISD::SELECT, MVT::f32, Custom); | |||
323 | setOperationAction(ISD::BR_CC, MVT::f32, Expand); | |||
324 | for (auto Op : FPOpToExpand) | |||
325 | setOperationAction(Op, MVT::f32, Expand); | |||
326 | setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); | |||
327 | setTruncStoreAction(MVT::f32, MVT::f16, Expand); | |||
328 | } | |||
329 | ||||
330 | if (Subtarget.hasStdExtF() && Subtarget.is64Bit()) | |||
331 | setOperationAction(ISD::BITCAST, MVT::i32, Custom); | |||
332 | ||||
333 | if (Subtarget.hasStdExtD()) { | |||
334 | setOperationAction(ISD::FMINNUM, MVT::f64, Legal); | |||
335 | setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); | |||
336 | for (auto CC : FPCCToExpand) | |||
337 | setCondCodeAction(CC, MVT::f64, Expand); | |||
338 | setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); | |||
339 | setOperationAction(ISD::SELECT, MVT::f64, Custom); | |||
340 | setOperationAction(ISD::BR_CC, MVT::f64, Expand); | |||
341 | setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); | |||
342 | setTruncStoreAction(MVT::f64, MVT::f32, Expand); | |||
343 | for (auto Op : FPOpToExpand) | |||
344 | setOperationAction(Op, MVT::f64, Expand); | |||
345 | setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); | |||
346 | setTruncStoreAction(MVT::f64, MVT::f16, Expand); | |||
347 | } | |||
348 | ||||
349 | if (Subtarget.is64Bit()) { | |||
350 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); | |||
351 | setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); | |||
352 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); | |||
353 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); | |||
354 | } | |||
355 | ||||
356 | setOperationAction(ISD::GlobalAddress, XLenVT, Custom); | |||
357 | setOperationAction(ISD::BlockAddress, XLenVT, Custom); | |||
358 | setOperationAction(ISD::ConstantPool, XLenVT, Custom); | |||
359 | setOperationAction(ISD::JumpTable, XLenVT, Custom); | |||
360 | ||||
361 | setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom); | |||
362 | ||||
363 | // TODO: On M-mode only targets, the cycle[h] CSR may not be present. | |||
364 | // Unfortunately this can't be determined just from the ISA naming string. | |||
365 | setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, | |||
366 | Subtarget.is64Bit() ? Legal : Custom); | |||
367 | ||||
368 | setOperationAction(ISD::TRAP, MVT::Other, Legal); | |||
369 | setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); | |||
370 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); | |||
371 | ||||
372 | if (Subtarget.hasStdExtA()) { | |||
373 | setMaxAtomicSizeInBitsSupported(Subtarget.getXLen()); | |||
374 | setMinCmpXchgSizeInBits(32); | |||
375 | } else { | |||
376 | setMaxAtomicSizeInBitsSupported(0); | |||
377 | } | |||
378 | ||||
379 | setBooleanContents(ZeroOrOneBooleanContent); | |||
380 | ||||
381 | if (Subtarget.hasStdExtV()) { | |||
382 | setBooleanVectorContents(ZeroOrOneBooleanContent); | |||
383 | ||||
384 | setOperationAction(ISD::VSCALE, XLenVT, Custom); | |||
385 | ||||
386 | // RVV intrinsics may have illegal operands. | |||
387 | // We also need to custom legalize vmv.x.s. | |||
388 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom); | |||
389 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); | |||
390 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); | |||
391 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom); | |||
392 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom); | |||
393 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom); | |||
394 | ||||
395 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); | |||
396 | ||||
397 | if (Subtarget.is64Bit()) { | |||
398 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); | |||
399 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); | |||
400 | } else { | |||
401 | // We must custom-lower certain vXi64 operations on RV32 due to the vector | |||
402 | // element type being illegal. | |||
403 | setOperationAction(ISD::SPLAT_VECTOR, MVT::i64, Custom); | |||
404 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom); | |||
405 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom); | |||
406 | ||||
407 | setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom); | |||
408 | setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom); | |||
409 | setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom); | |||
410 | setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom); | |||
411 | setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom); | |||
412 | setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom); | |||
413 | setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom); | |||
414 | setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom); | |||
415 | } | |||
416 | ||||
417 | for (MVT VT : BoolVecVTs) { | |||
418 | setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); | |||
419 | ||||
420 | // Mask VTs are custom-expanded into a series of standard nodes | |||
421 | setOperationAction(ISD::TRUNCATE, VT, Custom); | |||
422 | setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); | |||
423 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); | |||
424 | } | |||
425 | ||||
426 | for (MVT VT : IntVecVTs) { | |||
427 | setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); | |||
428 | ||||
429 | setOperationAction(ISD::SMIN, VT, Legal); | |||
430 | setOperationAction(ISD::SMAX, VT, Legal); | |||
431 | setOperationAction(ISD::UMIN, VT, Legal); | |||
432 | setOperationAction(ISD::UMAX, VT, Legal); | |||
433 | ||||
434 | setOperationAction(ISD::ROTL, VT, Expand); | |||
435 | setOperationAction(ISD::ROTR, VT, Expand); | |||
436 | ||||
437 | // Custom-lower extensions and truncations from/to mask types. | |||
438 | setOperationAction(ISD::ANY_EXTEND, VT, Custom); | |||
439 | setOperationAction(ISD::SIGN_EXTEND, VT, Custom); | |||
440 | setOperationAction(ISD::ZERO_EXTEND, VT, Custom); | |||
441 | ||||
442 | // RVV has native int->float & float->int conversions where the | |||
443 | // element type sizes are within one power-of-two of each other. Any | |||
444 | // wider distances between type sizes have to be lowered as sequences | |||
445 | // which progressively narrow the gap in stages. | |||
446 | setOperationAction(ISD::SINT_TO_FP, VT, Custom); | |||
447 | setOperationAction(ISD::UINT_TO_FP, VT, Custom); | |||
448 | setOperationAction(ISD::FP_TO_SINT, VT, Custom); | |||
449 | setOperationAction(ISD::FP_TO_UINT, VT, Custom); | |||
450 | ||||
451 | // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL" | |||
452 | // nodes which truncate by one power of two at a time. | |||
453 | setOperationAction(ISD::TRUNCATE, VT, Custom); | |||
454 | ||||
455 | // Custom-lower insert/extract operations to simplify patterns. | |||
456 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
457 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
458 | ||||
459 | // Custom-lower reduction operations to set up the corresponding custom | |||
460 | // nodes' operands. | |||
461 | setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); | |||
462 | setOperationAction(ISD::VECREDUCE_AND, VT, Custom); | |||
463 | setOperationAction(ISD::VECREDUCE_OR, VT, Custom); | |||
464 | setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); | |||
465 | setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); | |||
466 | setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); | |||
467 | setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); | |||
468 | setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); | |||
469 | ||||
470 | setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); | |||
471 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); | |||
472 | } | |||
473 | ||||
474 | // Expand various CCs to best match the RVV ISA, which natively supports UNE | |||
475 | // but no other unordered comparisons, and supports all ordered comparisons | |||
476 | // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization | |||
477 | // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE), | |||
478 | // and we pattern-match those back to the "original", swapping operands once | |||
479 | // more. This way we catch both operations and both "vf" and "fv" forms with | |||
480 | // fewer patterns. | |||
481 | ISD::CondCode VFPCCToExpand[] = { | |||
482 | ISD::SETO, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, | |||
483 | ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO, | |||
484 | ISD::SETGT, ISD::SETOGT, ISD::SETGE, ISD::SETOGE, | |||
485 | }; | |||
486 | ||||
487 | // Sets common operation actions on RVV floating-point vector types. | |||
488 | const auto SetCommonVFPActions = [&](MVT VT) { | |||
489 | setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); | |||
490 | // RVV has native FP_ROUND & FP_EXTEND conversions where the element type | |||
491 | // sizes are within one power-of-two of each other. Therefore conversions | |||
492 | // between vXf16 and vXf64 must be lowered as sequences which convert via | |||
493 | // vXf32. | |||
494 | setOperationAction(ISD::FP_ROUND, VT, Custom); | |||
495 | setOperationAction(ISD::FP_EXTEND, VT, Custom); | |||
496 | // Custom-lower insert/extract operations to simplify patterns. | |||
497 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
498 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
499 | // Expand various condition codes (explained above). | |||
500 | for (auto CC : VFPCCToExpand) | |||
501 | setCondCodeAction(CC, VT, Expand); | |||
502 | ||||
503 | setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); | |||
504 | setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); | |||
505 | setOperationAction(ISD::FCOPYSIGN, VT, Legal); | |||
506 | ||||
507 | setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); | |||
508 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); | |||
509 | }; | |||
510 | ||||
511 | if (Subtarget.hasStdExtZfh()) | |||
512 | for (MVT VT : F16VecVTs) | |||
513 | SetCommonVFPActions(VT); | |||
514 | ||||
515 | if (Subtarget.hasStdExtF()) | |||
516 | for (MVT VT : F32VecVTs) | |||
517 | SetCommonVFPActions(VT); | |||
518 | ||||
519 | if (Subtarget.hasStdExtD()) | |||
520 | for (MVT VT : F64VecVTs) | |||
521 | SetCommonVFPActions(VT); | |||
522 | ||||
523 | if (Subtarget.useRVVForFixedLengthVectors()) { | |||
524 | for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { | |||
525 | if (!useRVVForFixedLengthVectorVT(VT)) | |||
526 | continue; | |||
527 | ||||
528 | // By default everything must be expanded. | |||
529 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) | |||
530 | setOperationAction(Op, VT, Expand); | |||
531 | for (MVT OtherVT : MVT::fixedlen_vector_valuetypes()) | |||
532 | setTruncStoreAction(VT, OtherVT, Expand); | |||
533 | ||||
534 | // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed. | |||
535 | setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); | |||
536 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); | |||
537 | ||||
538 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
539 | setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); | |||
540 | ||||
541 | setOperationAction(ISD::LOAD, VT, Custom); | |||
542 | setOperationAction(ISD::STORE, VT, Custom); | |||
543 | ||||
544 | setOperationAction(ISD::SETCC, VT, Custom); | |||
545 | ||||
546 | setOperationAction(ISD::TRUNCATE, VT, Custom); | |||
547 | ||||
548 | // Operations below are different for between masks and other vectors. | |||
549 | if (VT.getVectorElementType() == MVT::i1) { | |||
550 | setOperationAction(ISD::AND, VT, Custom); | |||
551 | setOperationAction(ISD::OR, VT, Custom); | |||
552 | setOperationAction(ISD::XOR, VT, Custom); | |||
553 | continue; | |||
554 | } | |||
555 | ||||
556 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | |||
557 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
558 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
559 | ||||
560 | setOperationAction(ISD::ADD, VT, Custom); | |||
561 | setOperationAction(ISD::MUL, VT, Custom); | |||
562 | setOperationAction(ISD::SUB, VT, Custom); | |||
563 | setOperationAction(ISD::AND, VT, Custom); | |||
564 | setOperationAction(ISD::OR, VT, Custom); | |||
565 | setOperationAction(ISD::XOR, VT, Custom); | |||
566 | setOperationAction(ISD::SDIV, VT, Custom); | |||
567 | setOperationAction(ISD::SREM, VT, Custom); | |||
568 | setOperationAction(ISD::UDIV, VT, Custom); | |||
569 | setOperationAction(ISD::UREM, VT, Custom); | |||
570 | setOperationAction(ISD::SHL, VT, Custom); | |||
571 | setOperationAction(ISD::SRA, VT, Custom); | |||
572 | setOperationAction(ISD::SRL, VT, Custom); | |||
573 | ||||
574 | setOperationAction(ISD::SMIN, VT, Custom); | |||
575 | setOperationAction(ISD::SMAX, VT, Custom); | |||
576 | setOperationAction(ISD::UMIN, VT, Custom); | |||
577 | setOperationAction(ISD::UMAX, VT, Custom); | |||
578 | ||||
579 | setOperationAction(ISD::MULHS, VT, Custom); | |||
580 | setOperationAction(ISD::MULHU, VT, Custom); | |||
581 | ||||
582 | setOperationAction(ISD::SINT_TO_FP, VT, Custom); | |||
583 | setOperationAction(ISD::UINT_TO_FP, VT, Custom); | |||
584 | setOperationAction(ISD::FP_TO_SINT, VT, Custom); | |||
585 | setOperationAction(ISD::FP_TO_UINT, VT, Custom); | |||
586 | ||||
587 | setOperationAction(ISD::VSELECT, VT, Custom); | |||
588 | ||||
589 | setOperationAction(ISD::ANY_EXTEND, VT, Custom); | |||
590 | setOperationAction(ISD::SIGN_EXTEND, VT, Custom); | |||
591 | setOperationAction(ISD::ZERO_EXTEND, VT, Custom); | |||
592 | ||||
593 | setOperationAction(ISD::BITCAST, VT, Custom); | |||
594 | } | |||
595 | ||||
596 | for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) { | |||
597 | if (!useRVVForFixedLengthVectorVT(VT)) | |||
598 | continue; | |||
599 | ||||
600 | // By default everything must be expanded. | |||
601 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) | |||
602 | setOperationAction(Op, VT, Expand); | |||
603 | for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) { | |||
604 | setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); | |||
605 | setTruncStoreAction(VT, OtherVT, Expand); | |||
606 | } | |||
607 | ||||
608 | // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed. | |||
609 | setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); | |||
610 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); | |||
611 | ||||
612 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
613 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | |||
614 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
615 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
616 | ||||
617 | setOperationAction(ISD::LOAD, VT, Custom); | |||
618 | setOperationAction(ISD::STORE, VT, Custom); | |||
619 | setOperationAction(ISD::FADD, VT, Custom); | |||
620 | setOperationAction(ISD::FSUB, VT, Custom); | |||
621 | setOperationAction(ISD::FMUL, VT, Custom); | |||
622 | setOperationAction(ISD::FDIV, VT, Custom); | |||
623 | setOperationAction(ISD::FNEG, VT, Custom); | |||
624 | setOperationAction(ISD::FABS, VT, Custom); | |||
625 | setOperationAction(ISD::FSQRT, VT, Custom); | |||
626 | setOperationAction(ISD::FMA, VT, Custom); | |||
627 | ||||
628 | setOperationAction(ISD::FP_ROUND, VT, Custom); | |||
629 | setOperationAction(ISD::FP_EXTEND, VT, Custom); | |||
630 | ||||
631 | for (auto CC : VFPCCToExpand) | |||
632 | setCondCodeAction(CC, VT, Expand); | |||
633 | ||||
634 | setOperationAction(ISD::VSELECT, VT, Custom); | |||
635 | ||||
636 | setOperationAction(ISD::BITCAST, VT, Custom); | |||
637 | } | |||
638 | } | |||
639 | } | |||
640 | ||||
641 | // Function alignments. | |||
642 | const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); | |||
643 | setMinFunctionAlignment(FunctionAlignment); | |||
644 | setPrefFunctionAlignment(FunctionAlignment); | |||
645 | ||||
646 | setMinimumJumpTableEntries(5); | |||
647 | ||||
648 | // Jumps are expensive, compared to logic | |||
649 | setJumpIsExpensive(); | |||
650 | ||||
651 | // We can use any register for comparisons | |||
652 | setHasMultipleConditionRegisters(); | |||
653 | ||||
654 | setTargetDAGCombine(ISD::SETCC); | |||
655 | if (Subtarget.hasStdExtZbp()) { | |||
656 | setTargetDAGCombine(ISD::OR); | |||
657 | } | |||
658 | if (Subtarget.hasStdExtV()) | |||
659 | setTargetDAGCombine(ISD::FCOPYSIGN); | |||
660 | } | |||
661 | ||||
662 | EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, | |||
663 | LLVMContext &Context, | |||
664 | EVT VT) const { | |||
665 | if (!VT.isVector()) | |||
666 | return getPointerTy(DL); | |||
667 | if (Subtarget.hasStdExtV() && | |||
668 | (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors())) | |||
669 | return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount()); | |||
670 | return VT.changeVectorElementTypeToInteger(); | |||
671 | } | |||
672 | ||||
673 | bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, | |||
674 | const CallInst &I, | |||
675 | MachineFunction &MF, | |||
676 | unsigned Intrinsic) const { | |||
677 | switch (Intrinsic) { | |||
678 | default: | |||
679 | return false; | |||
680 | case Intrinsic::riscv_masked_atomicrmw_xchg_i32: | |||
681 | case Intrinsic::riscv_masked_atomicrmw_add_i32: | |||
682 | case Intrinsic::riscv_masked_atomicrmw_sub_i32: | |||
683 | case Intrinsic::riscv_masked_atomicrmw_nand_i32: | |||
684 | case Intrinsic::riscv_masked_atomicrmw_max_i32: | |||
685 | case Intrinsic::riscv_masked_atomicrmw_min_i32: | |||
686 | case Intrinsic::riscv_masked_atomicrmw_umax_i32: | |||
687 | case Intrinsic::riscv_masked_atomicrmw_umin_i32: | |||
688 | case Intrinsic::riscv_masked_cmpxchg_i32: | |||
689 | PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); | |||
690 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
691 | Info.memVT = MVT::getVT(PtrTy->getElementType()); | |||
692 | Info.ptrVal = I.getArgOperand(0); | |||
693 | Info.offset = 0; | |||
694 | Info.align = Align(4); | |||
695 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore | | |||
696 | MachineMemOperand::MOVolatile; | |||
697 | return true; | |||
698 | } | |||
699 | } | |||
700 | ||||
701 | bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL, | |||
702 | const AddrMode &AM, Type *Ty, | |||
703 | unsigned AS, | |||
704 | Instruction *I) const { | |||
705 | // No global is ever allowed as a base. | |||
706 | if (AM.BaseGV) | |||
707 | return false; | |||
708 | ||||
709 | // Require a 12-bit signed offset. | |||
710 | if (!isInt<12>(AM.BaseOffs)) | |||
711 | return false; | |||
712 | ||||
713 | switch (AM.Scale) { | |||
714 | case 0: // "r+i" or just "i", depending on HasBaseReg. | |||
715 | break; | |||
716 | case 1: | |||
717 | if (!AM.HasBaseReg) // allow "r+i". | |||
718 | break; | |||
719 | return false; // disallow "r+r" or "r+r+i". | |||
720 | default: | |||
721 | return false; | |||
722 | } | |||
723 | ||||
724 | return true; | |||
725 | } | |||
726 | ||||
727 | bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const { | |||
728 | return isInt<12>(Imm); | |||
729 | } | |||
730 | ||||
731 | bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const { | |||
732 | return isInt<12>(Imm); | |||
733 | } | |||
734 | ||||
735 | // On RV32, 64-bit integers are split into their high and low parts and held | |||
736 | // in two different registers, so the trunc is free since the low register can | |||
737 | // just be used. | |||
738 | bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { | |||
739 | if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) | |||
740 | return false; | |||
741 | unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); | |||
742 | unsigned DestBits = DstTy->getPrimitiveSizeInBits(); | |||
743 | return (SrcBits == 64 && DestBits == 32); | |||
744 | } | |||
745 | ||||
746 | bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { | |||
747 | if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() || | |||
748 | !SrcVT.isInteger() || !DstVT.isInteger()) | |||
749 | return false; | |||
750 | unsigned SrcBits = SrcVT.getSizeInBits(); | |||
751 | unsigned DestBits = DstVT.getSizeInBits(); | |||
752 | return (SrcBits == 64 && DestBits == 32); | |||
753 | } | |||
754 | ||||
755 | bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { | |||
756 | // Zexts are free if they can be combined with a load. | |||
757 | if (auto *LD = dyn_cast<LoadSDNode>(Val)) { | |||
758 | EVT MemVT = LD->getMemoryVT(); | |||
759 | if ((MemVT == MVT::i8 || MemVT == MVT::i16 || | |||
760 | (Subtarget.is64Bit() && MemVT == MVT::i32)) && | |||
761 | (LD->getExtensionType() == ISD::NON_EXTLOAD || | |||
762 | LD->getExtensionType() == ISD::ZEXTLOAD)) | |||
763 | return true; | |||
764 | } | |||
765 | ||||
766 | return TargetLowering::isZExtFree(Val, VT2); | |||
767 | } | |||
768 | ||||
769 | bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const { | |||
770 | return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64; | |||
771 | } | |||
772 | ||||
773 | bool RISCVTargetLowering::isCheapToSpeculateCttz() const { | |||
774 | return Subtarget.hasStdExtZbb(); | |||
775 | } | |||
776 | ||||
777 | bool RISCVTargetLowering::isCheapToSpeculateCtlz() const { | |||
778 | return Subtarget.hasStdExtZbb(); | |||
779 | } | |||
780 | ||||
781 | bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, | |||
782 | bool ForCodeSize) const { | |||
783 | if (VT == MVT::f16 && !Subtarget.hasStdExtZfh()) | |||
784 | return false; | |||
785 | if (VT == MVT::f32 && !Subtarget.hasStdExtF()) | |||
786 | return false; | |||
787 | if (VT == MVT::f64 && !Subtarget.hasStdExtD()) | |||
788 | return false; | |||
789 | if (Imm.isNegZero()) | |||
790 | return false; | |||
791 | return Imm.isZero(); | |||
792 | } | |||
793 | ||||
794 | bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const { | |||
795 | return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) || | |||
796 | (VT == MVT::f32 && Subtarget.hasStdExtF()) || | |||
797 | (VT == MVT::f64 && Subtarget.hasStdExtD()); | |||
798 | } | |||
799 | ||||
800 | // Changes the condition code and swaps operands if necessary, so the SetCC | |||
801 | // operation matches one of the comparisons supported directly in the RISC-V | |||
802 | // ISA. | |||
803 | static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) { | |||
804 | switch (CC) { | |||
805 | default: | |||
806 | break; | |||
807 | case ISD::SETGT: | |||
808 | case ISD::SETLE: | |||
809 | case ISD::SETUGT: | |||
810 | case ISD::SETULE: | |||
811 | CC = ISD::getSetCCSwappedOperands(CC); | |||
812 | std::swap(LHS, RHS); | |||
813 | break; | |||
814 | } | |||
815 | } | |||
816 | ||||
817 | // Return the RISC-V branch opcode that matches the given DAG integer | |||
818 | // condition code. The CondCode must be one of those supported by the RISC-V | |||
819 | // ISA (see normaliseSetCC). | |||
820 | static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) { | |||
821 | switch (CC) { | |||
822 | default: | |||
823 | llvm_unreachable("Unsupported CondCode")::llvm::llvm_unreachable_internal("Unsupported CondCode", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 823); | |||
824 | case ISD::SETEQ: | |||
825 | return RISCV::BEQ; | |||
826 | case ISD::SETNE: | |||
827 | return RISCV::BNE; | |||
828 | case ISD::SETLT: | |||
829 | return RISCV::BLT; | |||
830 | case ISD::SETGE: | |||
831 | return RISCV::BGE; | |||
832 | case ISD::SETULT: | |||
833 | return RISCV::BLTU; | |||
834 | case ISD::SETUGE: | |||
835 | return RISCV::BGEU; | |||
836 | } | |||
837 | } | |||
838 | ||||
839 | RISCVVLMUL RISCVTargetLowering::getLMUL(MVT VT) { | |||
840 | assert(VT.isScalableVector() && "Expecting a scalable vector type")((VT.isScalableVector() && "Expecting a scalable vector type" ) ? static_cast<void> (0) : __assert_fail ("VT.isScalableVector() && \"Expecting a scalable vector type\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 840, __PRETTY_FUNCTION__)); | |||
841 | unsigned KnownSize = VT.getSizeInBits().getKnownMinValue(); | |||
842 | if (VT.getVectorElementType() == MVT::i1) | |||
843 | KnownSize *= 8; | |||
844 | ||||
845 | switch (KnownSize) { | |||
846 | default: | |||
847 | llvm_unreachable("Invalid LMUL.")::llvm::llvm_unreachable_internal("Invalid LMUL.", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 847); | |||
848 | case 8: | |||
849 | return RISCVVLMUL::LMUL_F8; | |||
850 | case 16: | |||
851 | return RISCVVLMUL::LMUL_F4; | |||
852 | case 32: | |||
853 | return RISCVVLMUL::LMUL_F2; | |||
854 | case 64: | |||
855 | return RISCVVLMUL::LMUL_1; | |||
856 | case 128: | |||
857 | return RISCVVLMUL::LMUL_2; | |||
858 | case 256: | |||
859 | return RISCVVLMUL::LMUL_4; | |||
860 | case 512: | |||
861 | return RISCVVLMUL::LMUL_8; | |||
862 | } | |||
863 | } | |||
864 | ||||
865 | unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVVLMUL LMul) { | |||
866 | switch (LMul) { | |||
867 | default: | |||
868 | llvm_unreachable("Invalid LMUL.")::llvm::llvm_unreachable_internal("Invalid LMUL.", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 868); | |||
869 | case RISCVVLMUL::LMUL_F8: | |||
870 | case RISCVVLMUL::LMUL_F4: | |||
871 | case RISCVVLMUL::LMUL_F2: | |||
872 | case RISCVVLMUL::LMUL_1: | |||
873 | return RISCV::VRRegClassID; | |||
874 | case RISCVVLMUL::LMUL_2: | |||
875 | return RISCV::VRM2RegClassID; | |||
876 | case RISCVVLMUL::LMUL_4: | |||
877 | return RISCV::VRM4RegClassID; | |||
878 | case RISCVVLMUL::LMUL_8: | |||
879 | return RISCV::VRM8RegClassID; | |||
880 | } | |||
881 | } | |||
882 | ||||
883 | unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) { | |||
884 | RISCVVLMUL LMUL = getLMUL(VT); | |||
885 | if (LMUL == RISCVVLMUL::LMUL_F8 || LMUL == RISCVVLMUL::LMUL_F4 || | |||
886 | LMUL == RISCVVLMUL::LMUL_F2 || LMUL == RISCVVLMUL::LMUL_1) { | |||
887 | static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, | |||
888 | "Unexpected subreg numbering"); | |||
889 | return RISCV::sub_vrm1_0 + Index; | |||
890 | } | |||
891 | if (LMUL == RISCVVLMUL::LMUL_2) { | |||
892 | static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, | |||
893 | "Unexpected subreg numbering"); | |||
894 | return RISCV::sub_vrm2_0 + Index; | |||
895 | } | |||
896 | if (LMUL == RISCVVLMUL::LMUL_4) { | |||
897 | static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, | |||
898 | "Unexpected subreg numbering"); | |||
899 | return RISCV::sub_vrm4_0 + Index; | |||
900 | } | |||
901 | llvm_unreachable("Invalid vector type.")::llvm::llvm_unreachable_internal("Invalid vector type.", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 901); | |||
902 | } | |||
903 | ||||
904 | unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) { | |||
905 | if (VT.getVectorElementType() == MVT::i1) | |||
906 | return RISCV::VRRegClassID; | |||
907 | return getRegClassIDForLMUL(getLMUL(VT)); | |||
908 | } | |||
909 | ||||
910 | // Attempt to decompose a subvector insert/extract between VecVT and | |||
911 | // SubVecVT via subregister indices. Returns the subregister index that | |||
912 | // can perform the subvector insert/extract with the given element index, as | |||
913 | // well as the index corresponding to any leftover subvectors that must be | |||
914 | // further inserted/extracted within the register class for SubVecVT. | |||
915 | std::pair<unsigned, unsigned> | |||
916 | RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( | |||
917 | MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, | |||
918 | const RISCVRegisterInfo *TRI) { | |||
919 | static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID && | |||
920 | RISCV::VRM4RegClassID > RISCV::VRM2RegClassID && | |||
921 | RISCV::VRM2RegClassID > RISCV::VRRegClassID), | |||
922 | "Register classes not ordered"); | |||
923 | unsigned VecRegClassID = getRegClassIDForVecVT(VecVT); | |||
924 | unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT); | |||
925 | // Try to compose a subregister index that takes us from the incoming | |||
926 | // LMUL>1 register class down to the outgoing one. At each step we half | |||
927 | // the LMUL: | |||
928 | // nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0 | |||
929 | // Note that this is not guaranteed to find a subregister index, such as | |||
930 | // when we are extracting from one VR type to another. | |||
931 | unsigned SubRegIdx = RISCV::NoSubRegister; | |||
932 | for (const unsigned RCID : | |||
933 | {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID}) | |||
934 | if (VecRegClassID > RCID && SubRegClassID <= RCID) { | |||
935 | VecVT = VecVT.getHalfNumVectorElementsVT(); | |||
936 | bool IsHi = | |||
937 | InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue(); | |||
938 | SubRegIdx = TRI->composeSubRegIndices(SubRegIdx, | |||
939 | getSubregIndexByMVT(VecVT, IsHi)); | |||
940 | if (IsHi) | |||
941 | InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue(); | |||
942 | } | |||
943 | return {SubRegIdx, InsertExtractIdx}; | |||
944 | } | |||
945 | ||||
946 | // Return the largest legal scalable vector type that matches VT's element type. | |||
947 | MVT RISCVTargetLowering::getContainerForFixedLengthVector( | |||
948 | const TargetLowering &TLI, MVT VT, const RISCVSubtarget &Subtarget) { | |||
949 | assert(VT.isFixedLengthVector() && TLI.isTypeLegal(VT) &&((VT.isFixedLengthVector() && TLI.isTypeLegal(VT) && "Expected legal fixed length vector!") ? static_cast<void > (0) : __assert_fail ("VT.isFixedLengthVector() && TLI.isTypeLegal(VT) && \"Expected legal fixed length vector!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 950, __PRETTY_FUNCTION__)) | |||
950 | "Expected legal fixed length vector!")((VT.isFixedLengthVector() && TLI.isTypeLegal(VT) && "Expected legal fixed length vector!") ? static_cast<void > (0) : __assert_fail ("VT.isFixedLengthVector() && TLI.isTypeLegal(VT) && \"Expected legal fixed length vector!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 950, __PRETTY_FUNCTION__)); | |||
951 | ||||
952 | unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT); | |||
953 | assert(LMul <= 8 && isPowerOf2_32(LMul) && "Unexpected LMUL!")((LMul <= 8 && isPowerOf2_32(LMul) && "Unexpected LMUL!" ) ? static_cast<void> (0) : __assert_fail ("LMul <= 8 && isPowerOf2_32(LMul) && \"Unexpected LMUL!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 953, __PRETTY_FUNCTION__)); | |||
954 | ||||
955 | MVT EltVT = VT.getVectorElementType(); | |||
956 | switch (EltVT.SimpleTy) { | |||
957 | default: | |||
958 | llvm_unreachable("unexpected element type for RVV container")::llvm::llvm_unreachable_internal("unexpected element type for RVV container" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 958); | |||
959 | case MVT::i1: { | |||
960 | // Masks are calculated assuming 8-bit elements since that's when we need | |||
961 | // the most elements. | |||
962 | unsigned EltsPerBlock = RISCV::RVVBitsPerBlock / 8; | |||
963 | return MVT::getScalableVectorVT(MVT::i1, LMul * EltsPerBlock); | |||
964 | } | |||
965 | case MVT::i8: | |||
966 | case MVT::i16: | |||
967 | case MVT::i32: | |||
968 | case MVT::i64: | |||
969 | case MVT::f16: | |||
970 | case MVT::f32: | |||
971 | case MVT::f64: { | |||
972 | unsigned EltsPerBlock = RISCV::RVVBitsPerBlock / EltVT.getSizeInBits(); | |||
973 | return MVT::getScalableVectorVT(EltVT, LMul * EltsPerBlock); | |||
974 | } | |||
975 | } | |||
976 | } | |||
977 | ||||
978 | MVT RISCVTargetLowering::getContainerForFixedLengthVector( | |||
979 | SelectionDAG &DAG, MVT VT, const RISCVSubtarget &Subtarget) { | |||
980 | return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT, | |||
981 | Subtarget); | |||
982 | } | |||
983 | ||||
984 | MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const { | |||
985 | return getContainerForFixedLengthVector(*this, VT, getSubtarget()); | |||
986 | } | |||
987 | ||||
988 | // Grow V to consume an entire RVV register. | |||
989 | static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG, | |||
990 | const RISCVSubtarget &Subtarget) { | |||
991 | assert(VT.isScalableVector() &&((VT.isScalableVector() && "Expected to convert into a scalable vector!" ) ? static_cast<void> (0) : __assert_fail ("VT.isScalableVector() && \"Expected to convert into a scalable vector!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 992, __PRETTY_FUNCTION__)) | |||
992 | "Expected to convert into a scalable vector!")((VT.isScalableVector() && "Expected to convert into a scalable vector!" ) ? static_cast<void> (0) : __assert_fail ("VT.isScalableVector() && \"Expected to convert into a scalable vector!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 992, __PRETTY_FUNCTION__)); | |||
993 | assert(V.getValueType().isFixedLengthVector() &&((V.getValueType().isFixedLengthVector() && "Expected a fixed length vector operand!" ) ? static_cast<void> (0) : __assert_fail ("V.getValueType().isFixedLengthVector() && \"Expected a fixed length vector operand!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 994, __PRETTY_FUNCTION__)) | |||
994 | "Expected a fixed length vector operand!")((V.getValueType().isFixedLengthVector() && "Expected a fixed length vector operand!" ) ? static_cast<void> (0) : __assert_fail ("V.getValueType().isFixedLengthVector() && \"Expected a fixed length vector operand!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 994, __PRETTY_FUNCTION__)); | |||
995 | SDLoc DL(V); | |||
996 | SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); | |||
997 | return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero); | |||
998 | } | |||
999 | ||||
1000 | // Shrink V so it's just big enough to maintain a VT's worth of data. | |||
1001 | static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG, | |||
1002 | const RISCVSubtarget &Subtarget) { | |||
1003 | assert(VT.isFixedLengthVector() &&((VT.isFixedLengthVector() && "Expected to convert into a fixed length vector!" ) ? static_cast<void> (0) : __assert_fail ("VT.isFixedLengthVector() && \"Expected to convert into a fixed length vector!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1004, __PRETTY_FUNCTION__)) | |||
1004 | "Expected to convert into a fixed length vector!")((VT.isFixedLengthVector() && "Expected to convert into a fixed length vector!" ) ? static_cast<void> (0) : __assert_fail ("VT.isFixedLengthVector() && \"Expected to convert into a fixed length vector!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1004, __PRETTY_FUNCTION__)); | |||
1005 | assert(V.getValueType().isScalableVector() &&((V.getValueType().isScalableVector() && "Expected a scalable vector operand!" ) ? static_cast<void> (0) : __assert_fail ("V.getValueType().isScalableVector() && \"Expected a scalable vector operand!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1006, __PRETTY_FUNCTION__)) | |||
1006 | "Expected a scalable vector operand!")((V.getValueType().isScalableVector() && "Expected a scalable vector operand!" ) ? static_cast<void> (0) : __assert_fail ("V.getValueType().isScalableVector() && \"Expected a scalable vector operand!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1006, __PRETTY_FUNCTION__)); | |||
1007 | SDLoc DL(V); | |||
1008 | SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); | |||
1009 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero); | |||
1010 | } | |||
1011 | ||||
1012 | // Gets the two common "VL" operands: an all-ones mask and the vector length. | |||
1013 | // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is | |||
1014 | // the vector type that it is contained in. | |||
1015 | static std::pair<SDValue, SDValue> | |||
1016 | getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG, | |||
1017 | const RISCVSubtarget &Subtarget) { | |||
1018 | assert(ContainerVT.isScalableVector() && "Expecting scalable container type")((ContainerVT.isScalableVector() && "Expecting scalable container type" ) ? static_cast<void> (0) : __assert_fail ("ContainerVT.isScalableVector() && \"Expecting scalable container type\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1018, __PRETTY_FUNCTION__)); | |||
1019 | MVT XLenVT = Subtarget.getXLenVT(); | |||
1020 | SDValue VL = VecVT.isFixedLengthVector() | |||
1021 | ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT) | |||
1022 | : DAG.getRegister(RISCV::X0, XLenVT); | |||
1023 | MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); | |||
1024 | SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); | |||
1025 | return {Mask, VL}; | |||
1026 | } | |||
1027 | ||||
1028 | // As above but assuming the given type is a scalable vector type. | |||
1029 | static std::pair<SDValue, SDValue> | |||
1030 | getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG, | |||
1031 | const RISCVSubtarget &Subtarget) { | |||
1032 | assert(VecVT.isScalableVector() && "Expecting a scalable vector")((VecVT.isScalableVector() && "Expecting a scalable vector" ) ? static_cast<void> (0) : __assert_fail ("VecVT.isScalableVector() && \"Expecting a scalable vector\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1032, __PRETTY_FUNCTION__)); | |||
1033 | return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget); | |||
1034 | } | |||
1035 | ||||
1036 | // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few | |||
1037 | // of either is (currently) supported. This can get us into an infinite loop | |||
1038 | // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR | |||
1039 | // as a ..., etc. | |||
1040 | // Until either (or both) of these can reliably lower any node, reporting that | |||
1041 | // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks | |||
1042 | // the infinite loop. Note that this lowers BUILD_VECTOR through the stack, | |||
1043 | // which is not desirable. | |||
1044 | bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles( | |||
1045 | EVT VT, unsigned DefinedValues) const { | |||
1046 | return false; | |||
1047 | } | |||
1048 | ||||
1049 | static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, | |||
1050 | const RISCVSubtarget &Subtarget) { | |||
1051 | MVT VT = Op.getSimpleValueType(); | |||
1052 | assert(VT.isFixedLengthVector() && "Unexpected vector!")((VT.isFixedLengthVector() && "Unexpected vector!") ? static_cast<void> (0) : __assert_fail ("VT.isFixedLengthVector() && \"Unexpected vector!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1052, __PRETTY_FUNCTION__)); | |||
1053 | ||||
1054 | MVT ContainerVT = | |||
1055 | RISCVTargetLowering::getContainerForFixedLengthVector(DAG, VT, Subtarget); | |||
1056 | ||||
1057 | SDLoc DL(Op); | |||
1058 | SDValue Mask, VL; | |||
1059 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
1060 | ||||
1061 | if (VT.getVectorElementType() == MVT::i1) { | |||
1062 | if (ISD::isBuildVectorAllZeros(Op.getNode())) { | |||
1063 | SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL); | |||
1064 | return convertFromScalableVector(VT, VMClr, DAG, Subtarget); | |||
1065 | } | |||
1066 | ||||
1067 | if (ISD::isBuildVectorAllOnes(Op.getNode())) { | |||
1068 | SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL); | |||
1069 | return convertFromScalableVector(VT, VMSet, DAG, Subtarget); | |||
1070 | } | |||
1071 | ||||
1072 | return SDValue(); | |||
1073 | } | |||
1074 | ||||
1075 | if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) { | |||
1076 | unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL | |||
1077 | : RISCVISD::VMV_V_X_VL; | |||
1078 | Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL); | |||
1079 | return convertFromScalableVector(VT, Splat, DAG, Subtarget); | |||
1080 | } | |||
1081 | ||||
1082 | // Try and match an index sequence, which we can lower directly to the vid | |||
1083 | // instruction. An all-undef vector is matched by getSplatValue, above. | |||
1084 | if (VT.isInteger()) { | |||
1085 | bool IsVID = true; | |||
1086 | for (unsigned i = 0, e = Op.getNumOperands(); i < e && IsVID; i++) | |||
1087 | IsVID &= Op.getOperand(i).isUndef() || | |||
1088 | (isa<ConstantSDNode>(Op.getOperand(i)) && | |||
1089 | Op.getConstantOperandVal(i) == i); | |||
1090 | ||||
1091 | if (IsVID) { | |||
1092 | SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL); | |||
1093 | return convertFromScalableVector(VT, VID, DAG, Subtarget); | |||
1094 | } | |||
1095 | } | |||
1096 | ||||
1097 | return SDValue(); | |||
1098 | } | |||
1099 | ||||
1100 | static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, | |||
1101 | const RISCVSubtarget &Subtarget) { | |||
1102 | SDValue V1 = Op.getOperand(0); | |||
1103 | SDLoc DL(Op); | |||
1104 | MVT VT = Op.getSimpleValueType(); | |||
1105 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); | |||
1106 | ||||
1107 | if (SVN->isSplat()) { | |||
1108 | int Lane = SVN->getSplatIndex(); | |||
1109 | if (Lane >= 0) { | |||
1110 | MVT ContainerVT = RISCVTargetLowering::getContainerForFixedLengthVector( | |||
1111 | DAG, VT, Subtarget); | |||
1112 | ||||
1113 | V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget); | |||
1114 | assert(Lane < (int)VT.getVectorNumElements() && "Unexpected lane!")((Lane < (int)VT.getVectorNumElements() && "Unexpected lane!" ) ? static_cast<void> (0) : __assert_fail ("Lane < (int)VT.getVectorNumElements() && \"Unexpected lane!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1114, __PRETTY_FUNCTION__)); | |||
1115 | ||||
1116 | SDValue Mask, VL; | |||
1117 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
1118 | MVT XLenVT = Subtarget.getXLenVT(); | |||
1119 | SDValue Gather = | |||
1120 | DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1, | |||
1121 | DAG.getConstant(Lane, DL, XLenVT), Mask, VL); | |||
1122 | return convertFromScalableVector(VT, Gather, DAG, Subtarget); | |||
1123 | } | |||
1124 | } | |||
1125 | ||||
1126 | return SDValue(); | |||
1127 | } | |||
1128 | ||||
1129 | static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT, | |||
1130 | SDLoc DL, SelectionDAG &DAG, | |||
1131 | const RISCVSubtarget &Subtarget) { | |||
1132 | if (VT.isScalableVector()) | |||
1133 | return DAG.getFPExtendOrRound(Op, DL, VT); | |||
1134 | assert(VT.isFixedLengthVector() &&((VT.isFixedLengthVector() && "Unexpected value type for RVV FP extend/round lowering" ) ? static_cast<void> (0) : __assert_fail ("VT.isFixedLengthVector() && \"Unexpected value type for RVV FP extend/round lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1135, __PRETTY_FUNCTION__)) | |||
1135 | "Unexpected value type for RVV FP extend/round lowering")((VT.isFixedLengthVector() && "Unexpected value type for RVV FP extend/round lowering" ) ? static_cast<void> (0) : __assert_fail ("VT.isFixedLengthVector() && \"Unexpected value type for RVV FP extend/round lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1135, __PRETTY_FUNCTION__)); | |||
1136 | SDValue Mask, VL; | |||
1137 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
1138 | unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType()) | |||
1139 | ? RISCVISD::FP_EXTEND_VL | |||
1140 | : RISCVISD::FP_ROUND_VL; | |||
1141 | return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL); | |||
1142 | } | |||
1143 | ||||
1144 | SDValue RISCVTargetLowering::LowerOperation(SDValue Op, | |||
1145 | SelectionDAG &DAG) const { | |||
1146 | switch (Op.getOpcode()) { | |||
| ||||
1147 | default: | |||
1148 | report_fatal_error("unimplemented operand"); | |||
1149 | case ISD::GlobalAddress: | |||
1150 | return lowerGlobalAddress(Op, DAG); | |||
1151 | case ISD::BlockAddress: | |||
1152 | return lowerBlockAddress(Op, DAG); | |||
1153 | case ISD::ConstantPool: | |||
1154 | return lowerConstantPool(Op, DAG); | |||
1155 | case ISD::JumpTable: | |||
1156 | return lowerJumpTable(Op, DAG); | |||
1157 | case ISD::GlobalTLSAddress: | |||
1158 | return lowerGlobalTLSAddress(Op, DAG); | |||
1159 | case ISD::SELECT: | |||
1160 | return lowerSELECT(Op, DAG); | |||
1161 | case ISD::VASTART: | |||
1162 | return lowerVASTART(Op, DAG); | |||
1163 | case ISD::FRAMEADDR: | |||
1164 | return lowerFRAMEADDR(Op, DAG); | |||
1165 | case ISD::RETURNADDR: | |||
1166 | return lowerRETURNADDR(Op, DAG); | |||
1167 | case ISD::SHL_PARTS: | |||
1168 | return lowerShiftLeftParts(Op, DAG); | |||
1169 | case ISD::SRA_PARTS: | |||
1170 | return lowerShiftRightParts(Op, DAG, true); | |||
1171 | case ISD::SRL_PARTS: | |||
1172 | return lowerShiftRightParts(Op, DAG, false); | |||
1173 | case ISD::BITCAST: { | |||
1174 | SDValue Op0 = Op.getOperand(0); | |||
1175 | // We can handle fixed length vector bitcasts with a simple replacement | |||
1176 | // in isel. | |||
1177 | if (Op.getValueType().isFixedLengthVector()) { | |||
1178 | if (Op0.getValueType().isFixedLengthVector()) | |||
1179 | return Op; | |||
1180 | return SDValue(); | |||
1181 | } | |||
1182 | assert(((Subtarget.is64Bit() && Subtarget.hasStdExtF()) ||((((Subtarget.is64Bit() && Subtarget.hasStdExtF()) || Subtarget.hasStdExtZfh()) && "Unexpected custom legalisation" ) ? static_cast<void> (0) : __assert_fail ("((Subtarget.is64Bit() && Subtarget.hasStdExtF()) || Subtarget.hasStdExtZfh()) && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1184, __PRETTY_FUNCTION__)) | |||
1183 | Subtarget.hasStdExtZfh()) &&((((Subtarget.is64Bit() && Subtarget.hasStdExtF()) || Subtarget.hasStdExtZfh()) && "Unexpected custom legalisation" ) ? static_cast<void> (0) : __assert_fail ("((Subtarget.is64Bit() && Subtarget.hasStdExtF()) || Subtarget.hasStdExtZfh()) && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1184, __PRETTY_FUNCTION__)) | |||
1184 | "Unexpected custom legalisation")((((Subtarget.is64Bit() && Subtarget.hasStdExtF()) || Subtarget.hasStdExtZfh()) && "Unexpected custom legalisation" ) ? static_cast<void> (0) : __assert_fail ("((Subtarget.is64Bit() && Subtarget.hasStdExtF()) || Subtarget.hasStdExtZfh()) && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1184, __PRETTY_FUNCTION__)); | |||
1185 | SDLoc DL(Op); | |||
1186 | if (Op.getValueType() == MVT::f16 && Subtarget.hasStdExtZfh()) { | |||
1187 | if (Op0.getValueType() != MVT::i16) | |||
1188 | return SDValue(); | |||
1189 | SDValue NewOp0 = | |||
1190 | DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), Op0); | |||
1191 | SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0); | |||
1192 | return FPConv; | |||
1193 | } else if (Op.getValueType() == MVT::f32 && Subtarget.is64Bit() && | |||
1194 | Subtarget.hasStdExtF()) { | |||
1195 | if (Op0.getValueType() != MVT::i32) | |||
1196 | return SDValue(); | |||
1197 | SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); | |||
1198 | SDValue FPConv = | |||
1199 | DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0); | |||
1200 | return FPConv; | |||
1201 | } | |||
1202 | return SDValue(); | |||
1203 | } | |||
1204 | case ISD::INTRINSIC_WO_CHAIN: | |||
1205 | return LowerINTRINSIC_WO_CHAIN(Op, DAG); | |||
1206 | case ISD::INTRINSIC_W_CHAIN: | |||
1207 | return LowerINTRINSIC_W_CHAIN(Op, DAG); | |||
1208 | case ISD::BSWAP: | |||
1209 | case ISD::BITREVERSE: { | |||
1210 | // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining. | |||
1211 | assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation")((Subtarget.hasStdExtZbp() && "Unexpected custom legalisation" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasStdExtZbp() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1211, __PRETTY_FUNCTION__)); | |||
1212 | MVT VT = Op.getSimpleValueType(); | |||
1213 | SDLoc DL(Op); | |||
1214 | // Start with the maximum immediate value which is the bitwidth - 1. | |||
1215 | unsigned Imm = VT.getSizeInBits() - 1; | |||
1216 | // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits. | |||
1217 | if (Op.getOpcode() == ISD::BSWAP) | |||
1218 | Imm &= ~0x7U; | |||
1219 | return DAG.getNode(RISCVISD::GREVI, DL, VT, Op.getOperand(0), | |||
1220 | DAG.getTargetConstant(Imm, DL, Subtarget.getXLenVT())); | |||
1221 | } | |||
1222 | case ISD::FSHL: | |||
1223 | case ISD::FSHR: { | |||
1224 | MVT VT = Op.getSimpleValueType(); | |||
1225 | assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization")((VT == Subtarget.getXLenVT() && "Unexpected custom legalization" ) ? static_cast<void> (0) : __assert_fail ("VT == Subtarget.getXLenVT() && \"Unexpected custom legalization\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1225, __PRETTY_FUNCTION__)); | |||
1226 | SDLoc DL(Op); | |||
1227 | // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only | |||
1228 | // use log(XLen) bits. Mask the shift amount accordingly. | |||
1229 | unsigned ShAmtWidth = Subtarget.getXLen() - 1; | |||
1230 | SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2), | |||
1231 | DAG.getConstant(ShAmtWidth, DL, VT)); | |||
1232 | unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR; | |||
1233 | return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt); | |||
1234 | } | |||
1235 | case ISD::TRUNCATE: { | |||
1236 | SDLoc DL(Op); | |||
1237 | MVT VT = Op.getSimpleValueType(); | |||
1238 | // Only custom-lower vector truncates | |||
1239 | if (!VT.isVector()) | |||
1240 | return Op; | |||
1241 | ||||
1242 | // Truncates to mask types are handled differently | |||
1243 | if (VT.getVectorElementType() == MVT::i1) | |||
1244 | return lowerVectorMaskTrunc(Op, DAG); | |||
1245 | ||||
1246 | // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary | |||
1247 | // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which | |||
1248 | // truncate by one power of two at a time. | |||
1249 | MVT DstEltVT = VT.getVectorElementType(); | |||
1250 | ||||
1251 | SDValue Src = Op.getOperand(0); | |||
1252 | MVT SrcVT = Src.getSimpleValueType(); | |||
1253 | MVT SrcEltVT = SrcVT.getVectorElementType(); | |||
1254 | ||||
1255 | assert(DstEltVT.bitsLT(SrcEltVT) &&((DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT .getSizeInBits()) && isPowerOf2_64(SrcEltVT.getSizeInBits ()) && "Unexpected vector truncate lowering") ? static_cast <void> (0) : __assert_fail ("DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) && isPowerOf2_64(SrcEltVT.getSizeInBits()) && \"Unexpected vector truncate lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1258, __PRETTY_FUNCTION__)) | |||
1256 | isPowerOf2_64(DstEltVT.getSizeInBits()) &&((DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT .getSizeInBits()) && isPowerOf2_64(SrcEltVT.getSizeInBits ()) && "Unexpected vector truncate lowering") ? static_cast <void> (0) : __assert_fail ("DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) && isPowerOf2_64(SrcEltVT.getSizeInBits()) && \"Unexpected vector truncate lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1258, __PRETTY_FUNCTION__)) | |||
1257 | isPowerOf2_64(SrcEltVT.getSizeInBits()) &&((DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT .getSizeInBits()) && isPowerOf2_64(SrcEltVT.getSizeInBits ()) && "Unexpected vector truncate lowering") ? static_cast <void> (0) : __assert_fail ("DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) && isPowerOf2_64(SrcEltVT.getSizeInBits()) && \"Unexpected vector truncate lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1258, __PRETTY_FUNCTION__)) | |||
1258 | "Unexpected vector truncate lowering")((DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT .getSizeInBits()) && isPowerOf2_64(SrcEltVT.getSizeInBits ()) && "Unexpected vector truncate lowering") ? static_cast <void> (0) : __assert_fail ("DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) && isPowerOf2_64(SrcEltVT.getSizeInBits()) && \"Unexpected vector truncate lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1258, __PRETTY_FUNCTION__)); | |||
1259 | ||||
1260 | MVT ContainerVT = SrcVT; | |||
1261 | if (SrcVT.isFixedLengthVector()) { | |||
1262 | ContainerVT = getContainerForFixedLengthVector(SrcVT); | |||
1263 | Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget); | |||
1264 | } | |||
1265 | ||||
1266 | SDValue Result = Src; | |||
1267 | SDValue Mask, VL; | |||
1268 | std::tie(Mask, VL) = | |||
1269 | getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget); | |||
1270 | LLVMContext &Context = *DAG.getContext(); | |||
1271 | const ElementCount Count = ContainerVT.getVectorElementCount(); | |||
1272 | do { | |||
1273 | SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2); | |||
1274 | EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count); | |||
1275 | Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result, | |||
1276 | Mask, VL); | |||
1277 | } while (SrcEltVT != DstEltVT); | |||
1278 | ||||
1279 | if (SrcVT.isFixedLengthVector()) | |||
1280 | Result = convertFromScalableVector(VT, Result, DAG, Subtarget); | |||
1281 | ||||
1282 | return Result; | |||
1283 | } | |||
1284 | case ISD::ANY_EXTEND: | |||
1285 | case ISD::ZERO_EXTEND: | |||
1286 | if (Op.getOperand(0).getValueType().isVector() && | |||
1287 | Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) | |||
1288 | return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1); | |||
1289 | return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL); | |||
1290 | case ISD::SIGN_EXTEND: | |||
1291 | if (Op.getOperand(0).getValueType().isVector() && | |||
1292 | Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) | |||
1293 | return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1); | |||
1294 | return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL); | |||
1295 | case ISD::SPLAT_VECTOR: | |||
1296 | return lowerSPLATVECTOR(Op, DAG); | |||
1297 | case ISD::INSERT_VECTOR_ELT: | |||
1298 | return lowerINSERT_VECTOR_ELT(Op, DAG); | |||
1299 | case ISD::EXTRACT_VECTOR_ELT: | |||
1300 | return lowerEXTRACT_VECTOR_ELT(Op, DAG); | |||
1301 | case ISD::VSCALE: { | |||
1302 | MVT VT = Op.getSimpleValueType(); | |||
1303 | SDLoc DL(Op); | |||
1304 | SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT); | |||
1305 | // We define our scalable vector types for lmul=1 to use a 64 bit known | |||
1306 | // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate | |||
1307 | // vscale as VLENB / 8. | |||
1308 | SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB, | |||
1309 | DAG.getConstant(3, DL, VT)); | |||
1310 | return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0)); | |||
1311 | } | |||
1312 | case ISD::FP_EXTEND: { | |||
1313 | // RVV can only do fp_extend to types double the size as the source. We | |||
1314 | // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going | |||
1315 | // via f32. | |||
1316 | SDLoc DL(Op); | |||
1317 | MVT VT = Op.getSimpleValueType(); | |||
1318 | SDValue Src = Op.getOperand(0); | |||
1319 | MVT SrcVT = Src.getSimpleValueType(); | |||
1320 | ||||
1321 | // Prepare any fixed-length vector operands. | |||
1322 | MVT ContainerVT = VT; | |||
1323 | if (SrcVT.isFixedLengthVector()) { | |||
1324 | ContainerVT = getContainerForFixedLengthVector(VT); | |||
1325 | MVT SrcContainerVT = | |||
1326 | ContainerVT.changeVectorElementType(SrcVT.getVectorElementType()); | |||
1327 | Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); | |||
1328 | } | |||
1329 | ||||
1330 | if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 || | |||
1331 | SrcVT.getVectorElementType() != MVT::f16) { | |||
1332 | // For scalable vectors, we only need to close the gap between | |||
1333 | // vXf16->vXf64. | |||
1334 | if (!VT.isFixedLengthVector()) | |||
1335 | return Op; | |||
1336 | // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version. | |||
1337 | Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget); | |||
1338 | return convertFromScalableVector(VT, Src, DAG, Subtarget); | |||
1339 | } | |||
1340 | ||||
1341 | MVT InterVT = VT.changeVectorElementType(MVT::f32); | |||
1342 | MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32); | |||
1343 | SDValue IntermediateExtend = getRVVFPExtendOrRound( | |||
1344 | Src, InterVT, InterContainerVT, DL, DAG, Subtarget); | |||
1345 | ||||
1346 | SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT, | |||
1347 | DL, DAG, Subtarget); | |||
1348 | if (VT.isFixedLengthVector()) | |||
1349 | return convertFromScalableVector(VT, Extend, DAG, Subtarget); | |||
1350 | return Extend; | |||
1351 | } | |||
1352 | case ISD::FP_ROUND: { | |||
1353 | // RVV can only do fp_round to types half the size as the source. We | |||
1354 | // custom-lower f64->f16 rounds via RVV's round-to-odd float | |||
1355 | // conversion instruction. | |||
1356 | SDLoc DL(Op); | |||
1357 | MVT VT = Op.getSimpleValueType(); | |||
1358 | SDValue Src = Op.getOperand(0); | |||
1359 | MVT SrcVT = Src.getSimpleValueType(); | |||
1360 | ||||
1361 | // Prepare any fixed-length vector operands. | |||
1362 | MVT ContainerVT = VT; | |||
1363 | if (VT.isFixedLengthVector()) { | |||
1364 | MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT); | |||
1365 | ContainerVT = | |||
1366 | SrcContainerVT.changeVectorElementType(VT.getVectorElementType()); | |||
1367 | Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); | |||
1368 | } | |||
1369 | ||||
1370 | if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 || | |||
1371 | SrcVT.getVectorElementType() != MVT::f64) { | |||
1372 | // For scalable vectors, we only need to close the gap between | |||
1373 | // vXf64<->vXf16. | |||
1374 | if (!VT.isFixedLengthVector()) | |||
1375 | return Op; | |||
1376 | // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version. | |||
1377 | Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget); | |||
1378 | return convertFromScalableVector(VT, Src, DAG, Subtarget); | |||
1379 | } | |||
1380 | ||||
1381 | SDValue Mask, VL; | |||
1382 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
1383 | ||||
1384 | MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32); | |||
1385 | SDValue IntermediateRound = | |||
1386 | DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL); | |||
1387 | SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT, | |||
1388 | DL, DAG, Subtarget); | |||
1389 | ||||
1390 | if (VT.isFixedLengthVector()) | |||
1391 | return convertFromScalableVector(VT, Round, DAG, Subtarget); | |||
1392 | return Round; | |||
1393 | } | |||
1394 | case ISD::FP_TO_SINT: | |||
1395 | case ISD::FP_TO_UINT: | |||
1396 | case ISD::SINT_TO_FP: | |||
1397 | case ISD::UINT_TO_FP: { | |||
1398 | // RVV can only do fp<->int conversions to types half/double the size as | |||
1399 | // the source. We custom-lower any conversions that do two hops into | |||
1400 | // sequences. | |||
1401 | MVT VT = Op.getSimpleValueType(); | |||
1402 | if (!VT.isVector()) | |||
1403 | return Op; | |||
1404 | SDLoc DL(Op); | |||
1405 | SDValue Src = Op.getOperand(0); | |||
1406 | MVT EltVT = VT.getVectorElementType(); | |||
1407 | MVT SrcVT = Src.getSimpleValueType(); | |||
1408 | MVT SrcEltVT = SrcVT.getVectorElementType(); | |||
1409 | unsigned EltSize = EltVT.getSizeInBits(); | |||
1410 | unsigned SrcEltSize = SrcEltVT.getSizeInBits(); | |||
1411 | assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&((isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) && "Unexpected vector element types") ? static_cast< void> (0) : __assert_fail ("isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) && \"Unexpected vector element types\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1412, __PRETTY_FUNCTION__)) | |||
1412 | "Unexpected vector element types")((isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) && "Unexpected vector element types") ? static_cast< void> (0) : __assert_fail ("isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) && \"Unexpected vector element types\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1412, __PRETTY_FUNCTION__)); | |||
1413 | ||||
1414 | bool IsInt2FP = SrcEltVT.isInteger(); | |||
1415 | // Widening conversions | |||
1416 | if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) { | |||
1417 | if (IsInt2FP) { | |||
1418 | // Do a regular integer sign/zero extension then convert to float. | |||
1419 | MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()), | |||
1420 | VT.getVectorElementCount()); | |||
1421 | unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP | |||
1422 | ? ISD::ZERO_EXTEND | |||
1423 | : ISD::SIGN_EXTEND; | |||
1424 | SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src); | |||
1425 | return DAG.getNode(Op.getOpcode(), DL, VT, Ext); | |||
1426 | } | |||
1427 | // FP2Int | |||
1428 | assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering")((SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering" ) ? static_cast<void> (0) : __assert_fail ("SrcEltVT == MVT::f16 && \"Unexpected FP_TO_[US]INT lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1428, __PRETTY_FUNCTION__)); | |||
1429 | // Do one doubling fp_extend then complete the operation by converting | |||
1430 | // to int. | |||
1431 | MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount()); | |||
1432 | SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT); | |||
1433 | return DAG.getNode(Op.getOpcode(), DL, VT, FExt); | |||
1434 | } | |||
1435 | ||||
1436 | // Narrowing conversions | |||
1437 | if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) { | |||
1438 | if (IsInt2FP) { | |||
1439 | // One narrowing int_to_fp, then an fp_round. | |||
1440 | assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering")((EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering" ) ? static_cast<void> (0) : __assert_fail ("EltVT == MVT::f16 && \"Unexpected [US]_TO_FP lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1440, __PRETTY_FUNCTION__)); | |||
1441 | MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount()); | |||
1442 | SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src); | |||
1443 | return DAG.getFPExtendOrRound(Int2FP, DL, VT); | |||
1444 | } | |||
1445 | // FP2Int | |||
1446 | // One narrowing fp_to_int, then truncate the integer. If the float isn't | |||
1447 | // representable by the integer, the result is poison. | |||
1448 | MVT IVecVT = | |||
1449 | MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2), | |||
1450 | VT.getVectorElementCount()); | |||
1451 | SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src); | |||
1452 | return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int); | |||
1453 | } | |||
1454 | ||||
1455 | // Scalable vectors can exit here. Patterns will handle equally-sized | |||
1456 | // conversions halving/doubling ones. | |||
1457 | if (!VT.isFixedLengthVector()) | |||
1458 | return Op; | |||
1459 | ||||
1460 | // For fixed-length vectors we lower to a custom "VL" node. | |||
1461 | unsigned RVVOpc = 0; | |||
1462 | switch (Op.getOpcode()) { | |||
1463 | default: | |||
1464 | llvm_unreachable("Impossible opcode")::llvm::llvm_unreachable_internal("Impossible opcode", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1464); | |||
1465 | case ISD::FP_TO_SINT: | |||
1466 | RVVOpc = RISCVISD::FP_TO_SINT_VL; | |||
1467 | break; | |||
1468 | case ISD::FP_TO_UINT: | |||
1469 | RVVOpc = RISCVISD::FP_TO_UINT_VL; | |||
1470 | break; | |||
1471 | case ISD::SINT_TO_FP: | |||
1472 | RVVOpc = RISCVISD::SINT_TO_FP_VL; | |||
1473 | break; | |||
1474 | case ISD::UINT_TO_FP: | |||
1475 | RVVOpc = RISCVISD::UINT_TO_FP_VL; | |||
1476 | break; | |||
1477 | } | |||
1478 | ||||
1479 | MVT ContainerVT, SrcContainerVT; | |||
1480 | // Derive the reference container type from the larger vector type. | |||
1481 | if (SrcEltSize > EltSize) { | |||
1482 | SrcContainerVT = getContainerForFixedLengthVector(SrcVT); | |||
1483 | ContainerVT = | |||
1484 | SrcContainerVT.changeVectorElementType(VT.getVectorElementType()); | |||
1485 | } else { | |||
1486 | ContainerVT = getContainerForFixedLengthVector(VT); | |||
1487 | SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT); | |||
1488 | } | |||
1489 | ||||
1490 | SDValue Mask, VL; | |||
1491 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
1492 | ||||
1493 | Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); | |||
1494 | Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL); | |||
1495 | return convertFromScalableVector(VT, Src, DAG, Subtarget); | |||
1496 | } | |||
1497 | case ISD::VECREDUCE_ADD: | |||
1498 | case ISD::VECREDUCE_UMAX: | |||
1499 | case ISD::VECREDUCE_SMAX: | |||
1500 | case ISD::VECREDUCE_UMIN: | |||
1501 | case ISD::VECREDUCE_SMIN: | |||
1502 | case ISD::VECREDUCE_AND: | |||
1503 | case ISD::VECREDUCE_OR: | |||
1504 | case ISD::VECREDUCE_XOR: | |||
1505 | return lowerVECREDUCE(Op, DAG); | |||
1506 | case ISD::VECREDUCE_FADD: | |||
1507 | case ISD::VECREDUCE_SEQ_FADD: | |||
1508 | return lowerFPVECREDUCE(Op, DAG); | |||
1509 | case ISD::INSERT_SUBVECTOR: | |||
1510 | return lowerINSERT_SUBVECTOR(Op, DAG); | |||
1511 | case ISD::EXTRACT_SUBVECTOR: | |||
1512 | return lowerEXTRACT_SUBVECTOR(Op, DAG); | |||
1513 | case ISD::BUILD_VECTOR: | |||
1514 | return lowerBUILD_VECTOR(Op, DAG, Subtarget); | |||
1515 | case ISD::VECTOR_SHUFFLE: | |||
1516 | return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget); | |||
1517 | case ISD::CONCAT_VECTORS: { | |||
1518 | // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is | |||
1519 | // better than going through the stack, as the default expansion does. | |||
1520 | SDLoc DL(Op); | |||
1521 | MVT VT = Op.getSimpleValueType(); | |||
1522 | assert(VT.isFixedLengthVector() && "Unexpected CONCAT_VECTORS lowering")((VT.isFixedLengthVector() && "Unexpected CONCAT_VECTORS lowering" ) ? static_cast<void> (0) : __assert_fail ("VT.isFixedLengthVector() && \"Unexpected CONCAT_VECTORS lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1522, __PRETTY_FUNCTION__)); | |||
1523 | unsigned NumOpElts = | |||
1524 | Op.getOperand(0).getSimpleValueType().getVectorNumElements(); | |||
1525 | SDValue Vec = DAG.getUNDEF(VT); | |||
1526 | for (const auto &OpIdx : enumerate(Op->ops())) | |||
1527 | Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(), | |||
1528 | DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL)); | |||
1529 | return Vec; | |||
1530 | } | |||
1531 | case ISD::LOAD: | |||
1532 | return lowerFixedLengthVectorLoadToRVV(Op, DAG); | |||
1533 | case ISD::STORE: | |||
1534 | return lowerFixedLengthVectorStoreToRVV(Op, DAG); | |||
1535 | case ISD::SETCC: | |||
1536 | return lowerFixedLengthVectorSetccToRVV(Op, DAG); | |||
1537 | case ISD::ADD: | |||
1538 | return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL); | |||
1539 | case ISD::SUB: | |||
1540 | return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL); | |||
1541 | case ISD::MUL: | |||
1542 | return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL); | |||
1543 | case ISD::MULHS: | |||
1544 | return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL); | |||
1545 | case ISD::MULHU: | |||
1546 | return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL); | |||
1547 | case ISD::AND: | |||
1548 | return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL, | |||
1549 | RISCVISD::AND_VL); | |||
1550 | case ISD::OR: | |||
1551 | return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL, | |||
1552 | RISCVISD::OR_VL); | |||
1553 | case ISD::XOR: | |||
1554 | return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL, | |||
1555 | RISCVISD::XOR_VL); | |||
1556 | case ISD::SDIV: | |||
1557 | return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL); | |||
1558 | case ISD::SREM: | |||
1559 | return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL); | |||
1560 | case ISD::UDIV: | |||
1561 | return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL); | |||
1562 | case ISD::UREM: | |||
1563 | return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL); | |||
1564 | case ISD::SHL: | |||
1565 | return lowerToScalableOp(Op, DAG, RISCVISD::SHL_VL); | |||
1566 | case ISD::SRA: | |||
1567 | return lowerToScalableOp(Op, DAG, RISCVISD::SRA_VL); | |||
1568 | case ISD::SRL: | |||
1569 | return lowerToScalableOp(Op, DAG, RISCVISD::SRL_VL); | |||
1570 | case ISD::FADD: | |||
1571 | return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL); | |||
1572 | case ISD::FSUB: | |||
1573 | return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL); | |||
1574 | case ISD::FMUL: | |||
1575 | return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL); | |||
1576 | case ISD::FDIV: | |||
1577 | return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL); | |||
1578 | case ISD::FNEG: | |||
1579 | return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL); | |||
1580 | case ISD::FABS: | |||
1581 | return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL); | |||
1582 | case ISD::FSQRT: | |||
1583 | return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL); | |||
1584 | case ISD::FMA: | |||
1585 | return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL); | |||
1586 | case ISD::SMIN: | |||
1587 | return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL); | |||
1588 | case ISD::SMAX: | |||
1589 | return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL); | |||
1590 | case ISD::UMIN: | |||
1591 | return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL); | |||
1592 | case ISD::UMAX: | |||
1593 | return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL); | |||
1594 | case ISD::VSELECT: | |||
1595 | return lowerFixedLengthVectorSelectToRVV(Op, DAG); | |||
1596 | } | |||
1597 | } | |||
1598 | ||||
1599 | static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, | |||
1600 | SelectionDAG &DAG, unsigned Flags) { | |||
1601 | return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags); | |||
1602 | } | |||
1603 | ||||
1604 | static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty, | |||
1605 | SelectionDAG &DAG, unsigned Flags) { | |||
1606 | return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(), | |||
1607 | Flags); | |||
1608 | } | |||
1609 | ||||
1610 | static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty, | |||
1611 | SelectionDAG &DAG, unsigned Flags) { | |||
1612 | return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(), | |||
1613 | N->getOffset(), Flags); | |||
1614 | } | |||
1615 | ||||
1616 | static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty, | |||
1617 | SelectionDAG &DAG, unsigned Flags) { | |||
1618 | return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags); | |||
1619 | } | |||
1620 | ||||
1621 | template <class NodeTy> | |||
1622 | SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, | |||
1623 | bool IsLocal) const { | |||
1624 | SDLoc DL(N); | |||
1625 | EVT Ty = getPointerTy(DAG.getDataLayout()); | |||
1626 | ||||
1627 | if (isPositionIndependent()) { | |||
1628 | SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); | |||
1629 | if (IsLocal) | |||
1630 | // Use PC-relative addressing to access the symbol. This generates the | |||
1631 | // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym)) | |||
1632 | // %pcrel_lo(auipc)). | |||
1633 | return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); | |||
1634 | ||||
1635 | // Use PC-relative addressing to access the GOT for this symbol, then load | |||
1636 | // the address from the GOT. This generates the pattern (PseudoLA sym), | |||
1637 | // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))). | |||
1638 | return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0); | |||
1639 | } | |||
1640 | ||||
1641 | switch (getTargetMachine().getCodeModel()) { | |||
1642 | default: | |||
1643 | report_fatal_error("Unsupported code model for lowering"); | |||
1644 | case CodeModel::Small: { | |||
1645 | // Generate a sequence for accessing addresses within the first 2 GiB of | |||
1646 | // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)). | |||
1647 | SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI); | |||
1648 | SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO); | |||
1649 | SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); | |||
1650 | return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0); | |||
1651 | } | |||
1652 | case CodeModel::Medium: { | |||
1653 | // Generate a sequence for accessing addresses within any 2GiB range within | |||
1654 | // the address space. This generates the pattern (PseudoLLA sym), which | |||
1655 | // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)). | |||
1656 | SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); | |||
1657 | return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); | |||
1658 | } | |||
1659 | } | |||
1660 | } | |||
1661 | ||||
1662 | SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, | |||
1663 | SelectionDAG &DAG) const { | |||
1664 | SDLoc DL(Op); | |||
1665 | EVT Ty = Op.getValueType(); | |||
1666 | GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); | |||
1667 | int64_t Offset = N->getOffset(); | |||
1668 | MVT XLenVT = Subtarget.getXLenVT(); | |||
1669 | ||||
1670 | const GlobalValue *GV = N->getGlobal(); | |||
1671 | bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); | |||
1672 | SDValue Addr = getAddr(N, DAG, IsLocal); | |||
1673 | ||||
1674 | // In order to maximise the opportunity for common subexpression elimination, | |||
1675 | // emit a separate ADD node for the global address offset instead of folding | |||
1676 | // it in the global address node. Later peephole optimisations may choose to | |||
1677 | // fold it back in when profitable. | |||
1678 | if (Offset != 0) | |||
1679 | return DAG.getNode(ISD::ADD, DL, Ty, Addr, | |||
1680 | DAG.getConstant(Offset, DL, XLenVT)); | |||
1681 | return Addr; | |||
1682 | } | |||
1683 | ||||
1684 | SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, | |||
1685 | SelectionDAG &DAG) const { | |||
1686 | BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op); | |||
1687 | ||||
1688 | return getAddr(N, DAG); | |||
1689 | } | |||
1690 | ||||
1691 | SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op, | |||
1692 | SelectionDAG &DAG) const { | |||
1693 | ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); | |||
1694 | ||||
1695 | return getAddr(N, DAG); | |||
1696 | } | |||
1697 | ||||
1698 | SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op, | |||
1699 | SelectionDAG &DAG) const { | |||
1700 | JumpTableSDNode *N = cast<JumpTableSDNode>(Op); | |||
1701 | ||||
1702 | return getAddr(N, DAG); | |||
1703 | } | |||
1704 | ||||
1705 | SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N, | |||
1706 | SelectionDAG &DAG, | |||
1707 | bool UseGOT) const { | |||
1708 | SDLoc DL(N); | |||
1709 | EVT Ty = getPointerTy(DAG.getDataLayout()); | |||
1710 | const GlobalValue *GV = N->getGlobal(); | |||
1711 | MVT XLenVT = Subtarget.getXLenVT(); | |||
1712 | ||||
1713 | if (UseGOT) { | |||
1714 | // Use PC-relative addressing to access the GOT for this TLS symbol, then | |||
1715 | // load the address from the GOT and add the thread pointer. This generates | |||
1716 | // the pattern (PseudoLA_TLS_IE sym), which expands to | |||
1717 | // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)). | |||
1718 | SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); | |||
1719 | SDValue Load = | |||
1720 | SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0); | |||
1721 | ||||
1722 | // Add the thread pointer. | |||
1723 | SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); | |||
1724 | return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg); | |||
1725 | } | |||
1726 | ||||
1727 | // Generate a sequence for accessing the address relative to the thread | |||
1728 | // pointer, with the appropriate adjustment for the thread pointer offset. | |||
1729 | // This generates the pattern | |||
1730 | // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym)) | |||
1731 | SDValue AddrHi = | |||
1732 | DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI); | |||
1733 | SDValue AddrAdd = | |||
1734 | DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD); | |||
1735 | SDValue AddrLo = | |||
1736 | DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO); | |||
1737 | ||||
1738 | SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); | |||
1739 | SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); | |||
1740 | SDValue MNAdd = SDValue( | |||
1741 | DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd), | |||
1742 | 0); | |||
1743 | return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0); | |||
1744 | } | |||
1745 | ||||
1746 | SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N, | |||
1747 | SelectionDAG &DAG) const { | |||
1748 | SDLoc DL(N); | |||
1749 | EVT Ty = getPointerTy(DAG.getDataLayout()); | |||
1750 | IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits()); | |||
1751 | const GlobalValue *GV = N->getGlobal(); | |||
1752 | ||||
1753 | // Use a PC-relative addressing mode to access the global dynamic GOT address. | |||
1754 | // This generates the pattern (PseudoLA_TLS_GD sym), which expands to | |||
1755 | // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)). | |||
1756 | SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); | |||
1757 | SDValue Load = | |||
1758 | SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0); | |||
1759 | ||||
1760 | // Prepare argument list to generate call. | |||
1761 | ArgListTy Args; | |||
1762 | ArgListEntry Entry; | |||
1763 | Entry.Node = Load; | |||
1764 | Entry.Ty = CallTy; | |||
1765 | Args.push_back(Entry); | |||
1766 | ||||
1767 | // Setup call to __tls_get_addr. | |||
1768 | TargetLowering::CallLoweringInfo CLI(DAG); | |||
1769 | CLI.setDebugLoc(DL) | |||
1770 | .setChain(DAG.getEntryNode()) | |||
1771 | .setLibCallee(CallingConv::C, CallTy, | |||
1772 | DAG.getExternalSymbol("__tls_get_addr", Ty), | |||
1773 | std::move(Args)); | |||
1774 | ||||
1775 | return LowerCallTo(CLI).first; | |||
1776 | } | |||
1777 | ||||
1778 | SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op, | |||
1779 | SelectionDAG &DAG) const { | |||
1780 | SDLoc DL(Op); | |||
1781 | EVT Ty = Op.getValueType(); | |||
1782 | GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); | |||
1783 | int64_t Offset = N->getOffset(); | |||
1784 | MVT XLenVT = Subtarget.getXLenVT(); | |||
1785 | ||||
1786 | TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal()); | |||
1787 | ||||
1788 | if (DAG.getMachineFunction().getFunction().getCallingConv() == | |||
1789 | CallingConv::GHC) | |||
1790 | report_fatal_error("In GHC calling convention TLS is not supported"); | |||
1791 | ||||
1792 | SDValue Addr; | |||
1793 | switch (Model) { | |||
1794 | case TLSModel::LocalExec: | |||
1795 | Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false); | |||
1796 | break; | |||
1797 | case TLSModel::InitialExec: | |||
1798 | Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true); | |||
1799 | break; | |||
1800 | case TLSModel::LocalDynamic: | |||
1801 | case TLSModel::GeneralDynamic: | |||
1802 | Addr = getDynamicTLSAddr(N, DAG); | |||
1803 | break; | |||
1804 | } | |||
1805 | ||||
1806 | // In order to maximise the opportunity for common subexpression elimination, | |||
1807 | // emit a separate ADD node for the global address offset instead of folding | |||
1808 | // it in the global address node. Later peephole optimisations may choose to | |||
1809 | // fold it back in when profitable. | |||
1810 | if (Offset != 0) | |||
1811 | return DAG.getNode(ISD::ADD, DL, Ty, Addr, | |||
1812 | DAG.getConstant(Offset, DL, XLenVT)); | |||
1813 | return Addr; | |||
1814 | } | |||
1815 | ||||
1816 | SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { | |||
1817 | SDValue CondV = Op.getOperand(0); | |||
1818 | SDValue TrueV = Op.getOperand(1); | |||
1819 | SDValue FalseV = Op.getOperand(2); | |||
1820 | SDLoc DL(Op); | |||
1821 | MVT XLenVT = Subtarget.getXLenVT(); | |||
1822 | ||||
1823 | // If the result type is XLenVT and CondV is the output of a SETCC node | |||
1824 | // which also operated on XLenVT inputs, then merge the SETCC node into the | |||
1825 | // lowered RISCVISD::SELECT_CC to take advantage of the integer | |||
1826 | // compare+branch instructions. i.e.: | |||
1827 | // (select (setcc lhs, rhs, cc), truev, falsev) | |||
1828 | // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev) | |||
1829 | if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC && | |||
1830 | CondV.getOperand(0).getSimpleValueType() == XLenVT) { | |||
1831 | SDValue LHS = CondV.getOperand(0); | |||
1832 | SDValue RHS = CondV.getOperand(1); | |||
1833 | auto CC = cast<CondCodeSDNode>(CondV.getOperand(2)); | |||
1834 | ISD::CondCode CCVal = CC->get(); | |||
1835 | ||||
1836 | normaliseSetCC(LHS, RHS, CCVal); | |||
1837 | ||||
1838 | SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT); | |||
1839 | SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; | |||
1840 | return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); | |||
1841 | } | |||
1842 | ||||
1843 | // Otherwise: | |||
1844 | // (select condv, truev, falsev) | |||
1845 | // -> (riscvisd::select_cc condv, zero, setne, truev, falsev) | |||
1846 | SDValue Zero = DAG.getConstant(0, DL, XLenVT); | |||
1847 | SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT); | |||
1848 | ||||
1849 | SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV}; | |||
1850 | ||||
1851 | return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); | |||
1852 | } | |||
1853 | ||||
1854 | SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const { | |||
1855 | MachineFunction &MF = DAG.getMachineFunction(); | |||
1856 | RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>(); | |||
1857 | ||||
1858 | SDLoc DL(Op); | |||
1859 | SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), | |||
1860 | getPointerTy(MF.getDataLayout())); | |||
1861 | ||||
1862 | // vastart just stores the address of the VarArgsFrameIndex slot into the | |||
1863 | // memory location argument. | |||
1864 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); | |||
1865 | return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1), | |||
1866 | MachinePointerInfo(SV)); | |||
1867 | } | |||
1868 | ||||
1869 | SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op, | |||
1870 | SelectionDAG &DAG) const { | |||
1871 | const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); | |||
1872 | MachineFunction &MF = DAG.getMachineFunction(); | |||
1873 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
1874 | MFI.setFrameAddressIsTaken(true); | |||
1875 | Register FrameReg = RI.getFrameRegister(MF); | |||
1876 | int XLenInBytes = Subtarget.getXLen() / 8; | |||
1877 | ||||
1878 | EVT VT = Op.getValueType(); | |||
1879 | SDLoc DL(Op); | |||
1880 | SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); | |||
1881 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
1882 | while (Depth--) { | |||
1883 | int Offset = -(XLenInBytes * 2); | |||
1884 | SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr, | |||
1885 | DAG.getIntPtrConstant(Offset, DL)); | |||
1886 | FrameAddr = | |||
1887 | DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); | |||
1888 | } | |||
1889 | return FrameAddr; | |||
1890 | } | |||
1891 | ||||
1892 | SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op, | |||
1893 | SelectionDAG &DAG) const { | |||
1894 | const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); | |||
1895 | MachineFunction &MF = DAG.getMachineFunction(); | |||
1896 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
1897 | MFI.setReturnAddressIsTaken(true); | |||
1898 | MVT XLenVT = Subtarget.getXLenVT(); | |||
1899 | int XLenInBytes = Subtarget.getXLen() / 8; | |||
1900 | ||||
1901 | if (verifyReturnAddressArgumentIsConstant(Op, DAG)) | |||
1902 | return SDValue(); | |||
1903 | ||||
1904 | EVT VT = Op.getValueType(); | |||
1905 | SDLoc DL(Op); | |||
1906 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
1907 | if (Depth) { | |||
1908 | int Off = -XLenInBytes; | |||
1909 | SDValue FrameAddr = lowerFRAMEADDR(Op, DAG); | |||
1910 | SDValue Offset = DAG.getConstant(Off, DL, VT); | |||
1911 | return DAG.getLoad(VT, DL, DAG.getEntryNode(), | |||
1912 | DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), | |||
1913 | MachinePointerInfo()); | |||
1914 | } | |||
1915 | ||||
1916 | // Return the value of the return address register, marking it an implicit | |||
1917 | // live-in. | |||
1918 | Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT)); | |||
1919 | return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT); | |||
1920 | } | |||
1921 | ||||
1922 | SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op, | |||
1923 | SelectionDAG &DAG) const { | |||
1924 | SDLoc DL(Op); | |||
1925 | SDValue Lo = Op.getOperand(0); | |||
1926 | SDValue Hi = Op.getOperand(1); | |||
1927 | SDValue Shamt = Op.getOperand(2); | |||
1928 | EVT VT = Lo.getValueType(); | |||
1929 | ||||
1930 | // if Shamt-XLEN < 0: // Shamt < XLEN | |||
1931 | // Lo = Lo << Shamt | |||
1932 | // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt)) | |||
1933 | // else: | |||
1934 | // Lo = 0 | |||
1935 | // Hi = Lo << (Shamt-XLEN) | |||
1936 | ||||
1937 | SDValue Zero = DAG.getConstant(0, DL, VT); | |||
1938 | SDValue One = DAG.getConstant(1, DL, VT); | |||
1939 | SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); | |||
1940 | SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); | |||
1941 | SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); | |||
1942 | SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); | |||
1943 | ||||
1944 | SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt); | |||
1945 | SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One); | |||
1946 | SDValue ShiftRightLo = | |||
1947 | DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt); | |||
1948 | SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt); | |||
1949 | SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo); | |||
1950 | SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen); | |||
1951 | ||||
1952 | SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); | |||
1953 | ||||
1954 | Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero); | |||
1955 | Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); | |||
1956 | ||||
1957 | SDValue Parts[2] = {Lo, Hi}; | |||
1958 | return DAG.getMergeValues(Parts, DL); | |||
1959 | } | |||
1960 | ||||
1961 | SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, | |||
1962 | bool IsSRA) const { | |||
1963 | SDLoc DL(Op); | |||
1964 | SDValue Lo = Op.getOperand(0); | |||
1965 | SDValue Hi = Op.getOperand(1); | |||
1966 | SDValue Shamt = Op.getOperand(2); | |||
1967 | EVT VT = Lo.getValueType(); | |||
1968 | ||||
1969 | // SRA expansion: | |||
1970 | // if Shamt-XLEN < 0: // Shamt < XLEN | |||
1971 | // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) | |||
1972 | // Hi = Hi >>s Shamt | |||
1973 | // else: | |||
1974 | // Lo = Hi >>s (Shamt-XLEN); | |||
1975 | // Hi = Hi >>s (XLEN-1) | |||
1976 | // | |||
1977 | // SRL expansion: | |||
1978 | // if Shamt-XLEN < 0: // Shamt < XLEN | |||
1979 | // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) | |||
1980 | // Hi = Hi >>u Shamt | |||
1981 | // else: | |||
1982 | // Lo = Hi >>u (Shamt-XLEN); | |||
1983 | // Hi = 0; | |||
1984 | ||||
1985 | unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL; | |||
1986 | ||||
1987 | SDValue Zero = DAG.getConstant(0, DL, VT); | |||
1988 | SDValue One = DAG.getConstant(1, DL, VT); | |||
1989 | SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); | |||
1990 | SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); | |||
1991 | SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); | |||
1992 | SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); | |||
1993 | ||||
1994 | SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt); | |||
1995 | SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One); | |||
1996 | SDValue ShiftLeftHi = | |||
1997 | DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt); | |||
1998 | SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi); | |||
1999 | SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt); | |||
2000 | SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen); | |||
2001 | SDValue HiFalse = | |||
2002 | IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero; | |||
2003 | ||||
2004 | SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); | |||
2005 | ||||
2006 | Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse); | |||
2007 | Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); | |||
2008 | ||||
2009 | SDValue Parts[2] = {Lo, Hi}; | |||
2010 | return DAG.getMergeValues(Parts, DL); | |||
2011 | } | |||
2012 | ||||
2013 | // Custom-lower a SPLAT_VECTOR where XLEN<SEW, as the SEW element type is | |||
2014 | // illegal (currently only vXi64 RV32). | |||
2015 | // FIXME: We could also catch non-constant sign-extended i32 values and lower | |||
2016 | // them to SPLAT_VECTOR_I64 | |||
2017 | SDValue RISCVTargetLowering::lowerSPLATVECTOR(SDValue Op, | |||
2018 | SelectionDAG &DAG) const { | |||
2019 | SDLoc DL(Op); | |||
2020 | EVT VecVT = Op.getValueType(); | |||
2021 | assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&((!Subtarget.is64Bit() && VecVT.getVectorElementType( ) == MVT::i64 && "Unexpected SPLAT_VECTOR lowering") ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 && \"Unexpected SPLAT_VECTOR lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2022, __PRETTY_FUNCTION__)) | |||
2022 | "Unexpected SPLAT_VECTOR lowering")((!Subtarget.is64Bit() && VecVT.getVectorElementType( ) == MVT::i64 && "Unexpected SPLAT_VECTOR lowering") ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 && \"Unexpected SPLAT_VECTOR lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2022, __PRETTY_FUNCTION__)); | |||
2023 | SDValue SplatVal = Op.getOperand(0); | |||
2024 | ||||
2025 | // If we can prove that the value is a sign-extended 32-bit value, lower this | |||
2026 | // as a custom node in order to try and match RVV vector/scalar instructions. | |||
2027 | if (auto *CVal = dyn_cast<ConstantSDNode>(SplatVal)) { | |||
2028 | if (isInt<32>(CVal->getSExtValue())) | |||
2029 | return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, | |||
2030 | DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32)); | |||
2031 | } | |||
2032 | ||||
2033 | if (SplatVal.getOpcode() == ISD::SIGN_EXTEND && | |||
2034 | SplatVal.getOperand(0).getValueType() == MVT::i32) { | |||
2035 | return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, | |||
2036 | SplatVal.getOperand(0)); | |||
2037 | } | |||
2038 | ||||
2039 | // Else, on RV32 we lower an i64-element SPLAT_VECTOR thus, being careful not | |||
2040 | // to accidentally sign-extend the 32-bit halves to the e64 SEW: | |||
2041 | // vmv.v.x vX, hi | |||
2042 | // vsll.vx vX, vX, /*32*/ | |||
2043 | // vmv.v.x vY, lo | |||
2044 | // vsll.vx vY, vY, /*32*/ | |||
2045 | // vsrl.vx vY, vY, /*32*/ | |||
2046 | // vor.vv vX, vX, vY | |||
2047 | SDValue One = DAG.getConstant(1, DL, MVT::i32); | |||
2048 | SDValue Zero = DAG.getConstant(0, DL, MVT::i32); | |||
2049 | SDValue ThirtyTwoV = DAG.getConstant(32, DL, VecVT); | |||
2050 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, SplatVal, Zero); | |||
2051 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, SplatVal, One); | |||
2052 | ||||
2053 | Lo = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo); | |||
2054 | Lo = DAG.getNode(ISD::SHL, DL, VecVT, Lo, ThirtyTwoV); | |||
2055 | Lo = DAG.getNode(ISD::SRL, DL, VecVT, Lo, ThirtyTwoV); | |||
2056 | ||||
2057 | if (isNullConstant(Hi)) | |||
2058 | return Lo; | |||
2059 | ||||
2060 | Hi = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Hi); | |||
2061 | Hi = DAG.getNode(ISD::SHL, DL, VecVT, Hi, ThirtyTwoV); | |||
2062 | ||||
2063 | return DAG.getNode(ISD::OR, DL, VecVT, Lo, Hi); | |||
2064 | } | |||
2065 | ||||
2066 | // Custom-lower extensions from mask vectors by using a vselect either with 1 | |||
2067 | // for zero/any-extension or -1 for sign-extension: | |||
2068 | // (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0) | |||
2069 | // Note that any-extension is lowered identically to zero-extension. | |||
2070 | SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG, | |||
2071 | int64_t ExtTrueVal) const { | |||
2072 | SDLoc DL(Op); | |||
2073 | MVT VecVT = Op.getSimpleValueType(); | |||
2074 | SDValue Src = Op.getOperand(0); | |||
2075 | // Only custom-lower extensions from mask types | |||
2076 | assert(Src.getValueType().isVector() &&((Src.getValueType().isVector() && Src.getValueType() .getVectorElementType() == MVT::i1) ? static_cast<void> (0) : __assert_fail ("Src.getValueType().isVector() && Src.getValueType().getVectorElementType() == MVT::i1" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2077, __PRETTY_FUNCTION__)) | |||
2077 | Src.getValueType().getVectorElementType() == MVT::i1)((Src.getValueType().isVector() && Src.getValueType() .getVectorElementType() == MVT::i1) ? static_cast<void> (0) : __assert_fail ("Src.getValueType().isVector() && Src.getValueType().getVectorElementType() == MVT::i1" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2077, __PRETTY_FUNCTION__)); | |||
2078 | ||||
2079 | MVT XLenVT = Subtarget.getXLenVT(); | |||
2080 | SDValue SplatZero = DAG.getConstant(0, DL, XLenVT); | |||
2081 | SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT); | |||
2082 | ||||
2083 | if (VecVT.isScalableVector()) { | |||
2084 | // Be careful not to introduce illegal scalar types at this stage, and be | |||
2085 | // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is | |||
2086 | // illegal and must be expanded. Since we know that the constants are | |||
2087 | // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly. | |||
2088 | bool IsRV32E64 = | |||
2089 | !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64; | |||
2090 | ||||
2091 | if (!IsRV32E64) { | |||
2092 | SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero); | |||
2093 | SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal); | |||
2094 | } else { | |||
2095 | SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero); | |||
2096 | SplatTrueVal = | |||
2097 | DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal); | |||
2098 | } | |||
2099 | ||||
2100 | return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero); | |||
2101 | } | |||
2102 | ||||
2103 | MVT ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
2104 | MVT I1ContainerVT = | |||
2105 | MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); | |||
2106 | ||||
2107 | SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget); | |||
2108 | ||||
2109 | SDValue Mask, VL; | |||
2110 | std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); | |||
2111 | ||||
2112 | SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL); | |||
2113 | SplatTrueVal = | |||
2114 | DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL); | |||
2115 | SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, | |||
2116 | SplatTrueVal, SplatZero, VL); | |||
2117 | ||||
2118 | return convertFromScalableVector(VecVT, Select, DAG, Subtarget); | |||
2119 | } | |||
2120 | ||||
2121 | SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV( | |||
2122 | SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const { | |||
2123 | MVT ExtVT = Op.getSimpleValueType(); | |||
2124 | // Only custom-lower extensions from fixed-length vector types. | |||
2125 | if (!ExtVT.isFixedLengthVector()) | |||
2126 | return Op; | |||
2127 | MVT VT = Op.getOperand(0).getSimpleValueType(); | |||
2128 | // Grab the canonical container type for the extended type. Infer the smaller | |||
2129 | // type from that to ensure the same number of vector elements, as we know | |||
2130 | // the LMUL will be sufficient to hold the smaller type. | |||
2131 | MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT); | |||
2132 | // Get the extended container type manually to ensure the same number of | |||
2133 | // vector elements between source and dest. | |||
2134 | MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(), | |||
2135 | ContainerExtVT.getVectorElementCount()); | |||
2136 | ||||
2137 | SDValue Op1 = | |||
2138 | convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget); | |||
2139 | ||||
2140 | SDLoc DL(Op); | |||
2141 | SDValue Mask, VL; | |||
2142 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
2143 | ||||
2144 | SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL); | |||
2145 | ||||
2146 | return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget); | |||
2147 | } | |||
2148 | ||||
2149 | // Custom-lower truncations from vectors to mask vectors by using a mask and a | |||
2150 | // setcc operation: | |||
2151 | // (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne) | |||
2152 | SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op, | |||
2153 | SelectionDAG &DAG) const { | |||
2154 | SDLoc DL(Op); | |||
2155 | EVT MaskVT = Op.getValueType(); | |||
2156 | // Only expect to custom-lower truncations to mask types | |||
2157 | assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&((MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 && "Unexpected type for vector mask lowering" ) ? static_cast<void> (0) : __assert_fail ("MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 && \"Unexpected type for vector mask lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2158, __PRETTY_FUNCTION__)) | |||
2158 | "Unexpected type for vector mask lowering")((MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 && "Unexpected type for vector mask lowering" ) ? static_cast<void> (0) : __assert_fail ("MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 && \"Unexpected type for vector mask lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2158, __PRETTY_FUNCTION__)); | |||
2159 | SDValue Src = Op.getOperand(0); | |||
2160 | MVT VecVT = Src.getSimpleValueType(); | |||
2161 | ||||
2162 | // If this is a fixed vector, we need to convert it to a scalable vector. | |||
2163 | MVT ContainerVT = VecVT; | |||
2164 | if (VecVT.isFixedLengthVector()) { | |||
2165 | ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
2166 | Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget); | |||
2167 | } | |||
2168 | ||||
2169 | SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT()); | |||
2170 | SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); | |||
2171 | ||||
2172 | SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne); | |||
2173 | SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero); | |||
2174 | ||||
2175 | if (VecVT.isScalableVector()) { | |||
2176 | SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne); | |||
2177 | return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE); | |||
2178 | } | |||
2179 | ||||
2180 | SDValue Mask, VL; | |||
2181 | std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); | |||
2182 | ||||
2183 | MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1); | |||
2184 | SDValue Trunc = | |||
2185 | DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL); | |||
2186 | Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero, | |||
2187 | DAG.getCondCode(ISD::SETNE), Mask, VL); | |||
2188 | return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget); | |||
2189 | } | |||
2190 | ||||
2191 | SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, | |||
2192 | SelectionDAG &DAG) const { | |||
2193 | SDLoc DL(Op); | |||
2194 | MVT VecVT = Op.getSimpleValueType(); | |||
2195 | SDValue Vec = Op.getOperand(0); | |||
2196 | SDValue Val = Op.getOperand(1); | |||
2197 | SDValue Idx = Op.getOperand(2); | |||
2198 | ||||
2199 | MVT ContainerVT = VecVT; | |||
2200 | // If the operand is a fixed-length vector, convert to a scalable one. | |||
2201 | if (VecVT.isFixedLengthVector()) { | |||
2202 | ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
2203 | Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); | |||
2204 | } | |||
2205 | ||||
2206 | SDValue Mask, VL; | |||
2207 | std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); | |||
2208 | ||||
2209 | // Custom-legalize INSERT_VECTOR_ELT where XLEN>=SEW, so that the vector is | |||
2210 | // first slid down into position, the value is inserted into the first | |||
2211 | // position, and the vector is slid back up. We do this to simplify patterns. | |||
2212 | // (slideup vec, (insertelt (slidedown impdef, vec, idx), val, 0), idx), | |||
2213 | if (Subtarget.is64Bit() || Val.getValueType() != MVT::i64) { | |||
2214 | if (isNullConstant(Idx)) | |||
2215 | return DAG.getNode(RISCVISD::VMV_S_XF_VL, DL, ContainerVT, Vec, Val, VL); | |||
2216 | SDValue Slidedown = | |||
2217 | DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, | |||
2218 | DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL); | |||
2219 | SDValue InsertElt0 = | |||
2220 | DAG.getNode(RISCVISD::VMV_S_XF_VL, DL, ContainerVT, Slidedown, Val, VL); | |||
2221 | return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec, InsertElt0, | |||
2222 | Idx, Mask, VL); | |||
2223 | } | |||
2224 | ||||
2225 | // Custom-legalize INSERT_VECTOR_ELT where XLEN<SEW, as the SEW element type | |||
2226 | // is illegal (currently only vXi64 RV32). | |||
2227 | // Since there is no easy way of getting a single element into a vector when | |||
2228 | // XLEN<SEW, we lower the operation to the following sequence: | |||
2229 | // splat vVal, rVal | |||
2230 | // vid.v vVid | |||
2231 | // vmseq.vx mMask, vVid, rIdx | |||
2232 | // vmerge.vvm vDest, vSrc, vVal, mMask | |||
2233 | // This essentially merges the original vector with the inserted element by | |||
2234 | // using a mask whose only set bit is that corresponding to the insert | |||
2235 | // index. | |||
2236 | SDValue SplattedVal = DAG.getSplatVector(ContainerVT, DL, Val); | |||
2237 | SDValue SplattedIdx = | |||
2238 | DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, Idx, VL); | |||
2239 | ||||
2240 | SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL); | |||
2241 | auto SetCCVT = | |||
2242 | getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ContainerVT); | |||
2243 | SDValue SelectCond = | |||
2244 | DAG.getNode(RISCVISD::SETCC_VL, DL, SetCCVT, VID, SplattedIdx, | |||
2245 | DAG.getCondCode(ISD::SETEQ), Mask, VL); | |||
2246 | SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, | |||
2247 | SelectCond, SplattedVal, Vec, VL); | |||
2248 | if (!VecVT.isFixedLengthVector()) | |||
2249 | return Select; | |||
2250 | return convertFromScalableVector(VecVT, Select, DAG, Subtarget); | |||
2251 | } | |||
2252 | ||||
2253 | // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then | |||
2254 | // extract the first element: (extractelt (slidedown vec, idx), 0). For integer | |||
2255 | // types this is done using VMV_X_S to allow us to glean information about the | |||
2256 | // sign bits of the result. | |||
2257 | SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, | |||
2258 | SelectionDAG &DAG) const { | |||
2259 | SDLoc DL(Op); | |||
2260 | SDValue Idx = Op.getOperand(1); | |||
2261 | SDValue Vec = Op.getOperand(0); | |||
2262 | EVT EltVT = Op.getValueType(); | |||
2263 | MVT VecVT = Vec.getSimpleValueType(); | |||
2264 | MVT XLenVT = Subtarget.getXLenVT(); | |||
2265 | ||||
2266 | // If this is a fixed vector, we need to convert it to a scalable vector. | |||
2267 | MVT ContainerVT = VecVT; | |||
2268 | if (VecVT.isFixedLengthVector()) { | |||
2269 | ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
2270 | Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); | |||
2271 | } | |||
2272 | ||||
2273 | // If the index is 0, the vector is already in the right position. | |||
2274 | if (!isNullConstant(Idx)) { | |||
2275 | // Use a VL of 1 to avoid processing more elements than we need. | |||
2276 | SDValue VL = DAG.getConstant(1, DL, XLenVT); | |||
2277 | MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); | |||
2278 | SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); | |||
2279 | Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, | |||
2280 | DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL); | |||
2281 | } | |||
2282 | ||||
2283 | if (!EltVT.isInteger()) { | |||
2284 | // Floating-point extracts are handled in TableGen. | |||
2285 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, | |||
2286 | DAG.getConstant(0, DL, XLenVT)); | |||
2287 | } | |||
2288 | ||||
2289 | SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); | |||
2290 | return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0); | |||
2291 | } | |||
2292 | ||||
2293 | SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, | |||
2294 | SelectionDAG &DAG) const { | |||
2295 | unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
2296 | SDLoc DL(Op); | |||
2297 | ||||
2298 | if (Subtarget.hasStdExtV()) { | |||
2299 | // Some RVV intrinsics may claim that they want an integer operand to be | |||
2300 | // extended. | |||
2301 | if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = | |||
2302 | RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) { | |||
2303 | if (II->ExtendedOperand) { | |||
2304 | assert(II->ExtendedOperand < Op.getNumOperands())((II->ExtendedOperand < Op.getNumOperands()) ? static_cast <void> (0) : __assert_fail ("II->ExtendedOperand < Op.getNumOperands()" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2304, __PRETTY_FUNCTION__)); | |||
2305 | SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end()); | |||
2306 | SDValue &ScalarOp = Operands[II->ExtendedOperand]; | |||
2307 | EVT OpVT = ScalarOp.getValueType(); | |||
2308 | if (OpVT == MVT::i8 || OpVT == MVT::i16 || | |||
2309 | (OpVT == MVT::i32 && Subtarget.is64Bit())) { | |||
2310 | // If the operand is a constant, sign extend to increase our chances | |||
2311 | // of being able to use a .vi instruction. ANY_EXTEND would become a | |||
2312 | // a zero extend and the simm5 check in isel would fail. | |||
2313 | // FIXME: Should we ignore the upper bits in isel instead? | |||
2314 | unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND | |||
2315 | : ISD::ANY_EXTEND; | |||
2316 | ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp); | |||
2317 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(), | |||
2318 | Operands); | |||
2319 | } | |||
2320 | } | |||
2321 | } | |||
2322 | } | |||
2323 | ||||
2324 | switch (IntNo) { | |||
2325 | default: | |||
2326 | return SDValue(); // Don't custom lower most intrinsics. | |||
2327 | case Intrinsic::thread_pointer: { | |||
2328 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
2329 | return DAG.getRegister(RISCV::X4, PtrVT); | |||
2330 | } | |||
2331 | case Intrinsic::riscv_vmv_x_s: | |||
2332 | assert(Op.getValueType() == Subtarget.getXLenVT() && "Unexpected VT!")((Op.getValueType() == Subtarget.getXLenVT() && "Unexpected VT!" ) ? static_cast<void> (0) : __assert_fail ("Op.getValueType() == Subtarget.getXLenVT() && \"Unexpected VT!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2332, __PRETTY_FUNCTION__)); | |||
2333 | return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(), | |||
2334 | Op.getOperand(1)); | |||
2335 | case Intrinsic::riscv_vmv_v_x: { | |||
2336 | SDValue Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), | |||
2337 | Op.getOperand(1)); | |||
2338 | return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, Op.getValueType(), | |||
2339 | Scalar, Op.getOperand(2)); | |||
2340 | } | |||
2341 | case Intrinsic::riscv_vfmv_v_f: | |||
2342 | return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(), | |||
2343 | Op.getOperand(1), Op.getOperand(2)); | |||
2344 | } | |||
2345 | } | |||
2346 | ||||
2347 | SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, | |||
2348 | SelectionDAG &DAG) const { | |||
2349 | unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); | |||
2350 | SDLoc DL(Op); | |||
2351 | ||||
2352 | if (Subtarget.hasStdExtV()) { | |||
2353 | // Some RVV intrinsics may claim that they want an integer operand to be | |||
2354 | // extended. | |||
2355 | if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = | |||
2356 | RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) { | |||
2357 | if (II->ExtendedOperand) { | |||
2358 | // The operands start from the second argument in INTRINSIC_W_CHAIN. | |||
2359 | unsigned ExtendOp = II->ExtendedOperand + 1; | |||
2360 | assert(ExtendOp < Op.getNumOperands())((ExtendOp < Op.getNumOperands()) ? static_cast<void> (0) : __assert_fail ("ExtendOp < Op.getNumOperands()", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2360, __PRETTY_FUNCTION__)); | |||
2361 | SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end()); | |||
2362 | SDValue &ScalarOp = Operands[ExtendOp]; | |||
2363 | EVT OpVT = ScalarOp.getValueType(); | |||
2364 | if (OpVT == MVT::i8 || OpVT == MVT::i16 || | |||
2365 | (OpVT == MVT::i32 && Subtarget.is64Bit())) { | |||
2366 | // If the operand is a constant, sign extend to increase our chances | |||
2367 | // of being able to use a .vi instruction. ANY_EXTEND would become a | |||
2368 | // a zero extend and the simm5 check in isel would fail. | |||
2369 | // FIXME: Should we ignore the upper bits in isel instead? | |||
2370 | unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND | |||
2371 | : ISD::ANY_EXTEND; | |||
2372 | ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp); | |||
2373 | return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(), | |||
2374 | Operands); | |||
2375 | } | |||
2376 | } | |||
2377 | } | |||
2378 | } | |||
2379 | ||||
2380 | return SDValue(); // Don't custom lower most intrinsics. | |||
2381 | } | |||
2382 | ||||
2383 | static MVT getLMUL1VT(MVT VT) { | |||
2384 | assert(VT.getVectorElementType().getSizeInBits() <= 64 &&((VT.getVectorElementType().getSizeInBits() <= 64 && "Unexpected vector MVT") ? static_cast<void> (0) : __assert_fail ("VT.getVectorElementType().getSizeInBits() <= 64 && \"Unexpected vector MVT\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2385, __PRETTY_FUNCTION__)) | |||
2385 | "Unexpected vector MVT")((VT.getVectorElementType().getSizeInBits() <= 64 && "Unexpected vector MVT") ? static_cast<void> (0) : __assert_fail ("VT.getVectorElementType().getSizeInBits() <= 64 && \"Unexpected vector MVT\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2385, __PRETTY_FUNCTION__)); | |||
2386 | return MVT::getScalableVectorVT( | |||
2387 | VT.getVectorElementType(), | |||
2388 | RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits()); | |||
2389 | } | |||
2390 | ||||
2391 | static std::pair<unsigned, uint64_t> | |||
2392 | getRVVReductionOpAndIdentityVal(unsigned ISDOpcode, unsigned EltSizeBits) { | |||
2393 | switch (ISDOpcode) { | |||
2394 | default: | |||
2395 | llvm_unreachable("Unhandled reduction")::llvm::llvm_unreachable_internal("Unhandled reduction", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2395); | |||
2396 | case ISD::VECREDUCE_ADD: | |||
2397 | return {RISCVISD::VECREDUCE_ADD, 0}; | |||
2398 | case ISD::VECREDUCE_UMAX: | |||
2399 | return {RISCVISD::VECREDUCE_UMAX, 0}; | |||
2400 | case ISD::VECREDUCE_SMAX: | |||
2401 | return {RISCVISD::VECREDUCE_SMAX, minIntN(EltSizeBits)}; | |||
2402 | case ISD::VECREDUCE_UMIN: | |||
2403 | return {RISCVISD::VECREDUCE_UMIN, maxUIntN(EltSizeBits)}; | |||
2404 | case ISD::VECREDUCE_SMIN: | |||
2405 | return {RISCVISD::VECREDUCE_SMIN, maxIntN(EltSizeBits)}; | |||
2406 | case ISD::VECREDUCE_AND: | |||
2407 | return {RISCVISD::VECREDUCE_AND, -1}; | |||
2408 | case ISD::VECREDUCE_OR: | |||
2409 | return {RISCVISD::VECREDUCE_OR, 0}; | |||
2410 | case ISD::VECREDUCE_XOR: | |||
2411 | return {RISCVISD::VECREDUCE_XOR, 0}; | |||
2412 | } | |||
2413 | } | |||
2414 | ||||
2415 | // Take a (supported) standard ISD reduction opcode and transform it to a RISCV | |||
2416 | // reduction opcode. Note that this returns a vector type, which must be | |||
2417 | // further processed to access the scalar result in element 0. | |||
2418 | SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op, | |||
2419 | SelectionDAG &DAG) const { | |||
2420 | SDLoc DL(Op); | |||
2421 | assert(Op.getValueType().isSimple() &&((Op.getValueType().isSimple() && Op.getOperand(0).getValueType ().isSimple() && "Unexpected vector-reduce lowering") ? static_cast<void> (0) : __assert_fail ("Op.getValueType().isSimple() && Op.getOperand(0).getValueType().isSimple() && \"Unexpected vector-reduce lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2423, __PRETTY_FUNCTION__)) | |||
2422 | Op.getOperand(0).getValueType().isSimple() &&((Op.getValueType().isSimple() && Op.getOperand(0).getValueType ().isSimple() && "Unexpected vector-reduce lowering") ? static_cast<void> (0) : __assert_fail ("Op.getValueType().isSimple() && Op.getOperand(0).getValueType().isSimple() && \"Unexpected vector-reduce lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2423, __PRETTY_FUNCTION__)) | |||
2423 | "Unexpected vector-reduce lowering")((Op.getValueType().isSimple() && Op.getOperand(0).getValueType ().isSimple() && "Unexpected vector-reduce lowering") ? static_cast<void> (0) : __assert_fail ("Op.getValueType().isSimple() && Op.getOperand(0).getValueType().isSimple() && \"Unexpected vector-reduce lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2423, __PRETTY_FUNCTION__)); | |||
2424 | MVT VecVT = Op.getOperand(0).getSimpleValueType(); | |||
2425 | MVT VecEltVT = VecVT.getVectorElementType(); | |||
2426 | unsigned RVVOpcode; | |||
2427 | uint64_t IdentityVal; | |||
2428 | std::tie(RVVOpcode, IdentityVal) = | |||
2429 | getRVVReductionOpAndIdentityVal(Op.getOpcode(), VecEltVT.getSizeInBits()); | |||
2430 | MVT M1VT = getLMUL1VT(VecVT); | |||
2431 | SDValue IdentitySplat = | |||
2432 | DAG.getSplatVector(M1VT, DL, DAG.getConstant(IdentityVal, DL, VecEltVT)); | |||
2433 | SDValue Reduction = | |||
2434 | DAG.getNode(RVVOpcode, DL, M1VT, Op.getOperand(0), IdentitySplat); | |||
2435 | SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, | |||
2436 | DAG.getConstant(0, DL, Subtarget.getXLenVT())); | |||
2437 | return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType()); | |||
2438 | } | |||
2439 | ||||
2440 | // Given a reduction op, this function returns the matching reduction opcode, | |||
2441 | // the vector SDValue and the scalar SDValue required to lower this to a | |||
2442 | // RISCVISD node. | |||
2443 | static std::tuple<unsigned, SDValue, SDValue> | |||
2444 | getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) { | |||
2445 | SDLoc DL(Op); | |||
2446 | switch (Op.getOpcode()) { | |||
2447 | default: | |||
2448 | llvm_unreachable("Unhandled reduction")::llvm::llvm_unreachable_internal("Unhandled reduction", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2448); | |||
2449 | case ISD::VECREDUCE_FADD: | |||
2450 | return std::make_tuple(RISCVISD::VECREDUCE_FADD, Op.getOperand(0), | |||
2451 | DAG.getConstantFP(0.0, DL, EltVT)); | |||
2452 | case ISD::VECREDUCE_SEQ_FADD: | |||
2453 | return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD, Op.getOperand(1), | |||
2454 | Op.getOperand(0)); | |||
2455 | } | |||
2456 | } | |||
2457 | ||||
2458 | SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op, | |||
2459 | SelectionDAG &DAG) const { | |||
2460 | SDLoc DL(Op); | |||
2461 | MVT VecEltVT = Op.getSimpleValueType(); | |||
2462 | ||||
2463 | unsigned RVVOpcode; | |||
2464 | SDValue VectorVal, ScalarVal; | |||
2465 | std::tie(RVVOpcode, VectorVal, ScalarVal) = | |||
2466 | getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT); | |||
2467 | ||||
2468 | MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType()); | |||
2469 | SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal); | |||
2470 | SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat); | |||
2471 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, | |||
2472 | DAG.getConstant(0, DL, Subtarget.getXLenVT())); | |||
2473 | } | |||
2474 | ||||
2475 | SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, | |||
2476 | SelectionDAG &DAG) const { | |||
2477 | SDValue Vec = Op.getOperand(0); | |||
2478 | SDValue SubVec = Op.getOperand(1); | |||
2479 | MVT VecVT = Vec.getSimpleValueType(); | |||
2480 | MVT SubVecVT = SubVec.getSimpleValueType(); | |||
2481 | ||||
2482 | SDLoc DL(Op); | |||
2483 | MVT XLenVT = Subtarget.getXLenVT(); | |||
2484 | unsigned OrigIdx = Op.getConstantOperandVal(2); | |||
2485 | const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
2486 | ||||
2487 | // We don't have the ability to slide mask vectors up indexed by their i1 | |||
2488 | // elements; the smallest we can do is i8. Often we are able to bitcast to | |||
2489 | // equivalent i8 vectors. Note that when inserting a fixed-length vector | |||
2490 | // into a scalable one, we might not necessarily have enough scalable | |||
2491 | // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid. | |||
2492 | if (SubVecVT.getVectorElementType() == MVT::i1 && | |||
2493 | (OrigIdx != 0 || !Vec.isUndef())) { | |||
2494 | if (VecVT.getVectorMinNumElements() >= 8 && | |||
2495 | SubVecVT.getVectorMinNumElements() >= 8) { | |||
2496 | assert(OrigIdx % 8 == 0 && "Invalid index")((OrigIdx % 8 == 0 && "Invalid index") ? static_cast< void> (0) : __assert_fail ("OrigIdx % 8 == 0 && \"Invalid index\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2496, __PRETTY_FUNCTION__)); | |||
2497 | assert(VecVT.getVectorMinNumElements() % 8 == 0 &&((VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT .getVectorMinNumElements() % 8 == 0 && "Unexpected mask vector lowering" ) ? static_cast<void> (0) : __assert_fail ("VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && \"Unexpected mask vector lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2499, __PRETTY_FUNCTION__)) | |||
2498 | SubVecVT.getVectorMinNumElements() % 8 == 0 &&((VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT .getVectorMinNumElements() % 8 == 0 && "Unexpected mask vector lowering" ) ? static_cast<void> (0) : __assert_fail ("VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && \"Unexpected mask vector lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2499, __PRETTY_FUNCTION__)) | |||
2499 | "Unexpected mask vector lowering")((VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT .getVectorMinNumElements() % 8 == 0 && "Unexpected mask vector lowering" ) ? static_cast<void> (0) : __assert_fail ("VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && \"Unexpected mask vector lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2499, __PRETTY_FUNCTION__)); | |||
2500 | OrigIdx /= 8; | |||
2501 | SubVecVT = | |||
2502 | MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8, | |||
2503 | SubVecVT.isScalableVector()); | |||
2504 | VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8, | |||
2505 | VecVT.isScalableVector()); | |||
2506 | Vec = DAG.getBitcast(VecVT, Vec); | |||
2507 | SubVec = DAG.getBitcast(SubVecVT, SubVec); | |||
2508 | } else { | |||
2509 | // We can't slide this mask vector up indexed by its i1 elements. | |||
2510 | // This poses a problem when we wish to insert a scalable vector which | |||
2511 | // can't be re-expressed as a larger type. Just choose the slow path and | |||
2512 | // extend to a larger type, then truncate back down. | |||
2513 | MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8); | |||
2514 | MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8); | |||
2515 | Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec); | |||
2516 | SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec); | |||
2517 | Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec, | |||
2518 | Op.getOperand(2)); | |||
2519 | SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT); | |||
2520 | return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE); | |||
2521 | } | |||
2522 | } | |||
2523 | ||||
2524 | // If the subvector vector is a fixed-length type, we cannot use subregister | |||
2525 | // manipulation to simplify the codegen; we don't know which register of a | |||
2526 | // LMUL group contains the specific subvector as we only know the minimum | |||
2527 | // register size. Therefore we must slide the vector group up the full | |||
2528 | // amount. | |||
2529 | if (SubVecVT.isFixedLengthVector()) { | |||
2530 | if (OrigIdx == 0 && Vec.isUndef()) | |||
2531 | return Op; | |||
2532 | MVT ContainerVT = VecVT; | |||
2533 | if (VecVT.isFixedLengthVector()) { | |||
2534 | ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
2535 | Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); | |||
2536 | } | |||
2537 | SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT, | |||
2538 | DAG.getUNDEF(ContainerVT), SubVec, | |||
2539 | DAG.getConstant(0, DL, XLenVT)); | |||
2540 | SDValue Mask = | |||
2541 | getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first; | |||
2542 | // Set the vector length to only the number of elements we care about. Note | |||
2543 | // that for slideup this includes the offset. | |||
2544 | SDValue VL = | |||
2545 | DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT); | |||
2546 | SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT); | |||
2547 | SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec, | |||
2548 | SubVec, SlideupAmt, Mask, VL); | |||
2549 | if (!VecVT.isFixedLengthVector()) | |||
2550 | return Slideup; | |||
2551 | return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget); | |||
2552 | } | |||
2553 | ||||
2554 | unsigned SubRegIdx, RemIdx; | |||
2555 | std::tie(SubRegIdx, RemIdx) = | |||
2556 | RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( | |||
2557 | VecVT, SubVecVT, OrigIdx, TRI); | |||
2558 | ||||
2559 | RISCVVLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT); | |||
2560 | bool IsSubVecPartReg = SubVecLMUL == RISCVVLMUL::LMUL_F2 || | |||
2561 | SubVecLMUL == RISCVVLMUL::LMUL_F4 || | |||
2562 | SubVecLMUL == RISCVVLMUL::LMUL_F8; | |||
2563 | ||||
2564 | // 1. If the Idx has been completely eliminated and this subvector's size is | |||
2565 | // a vector register or a multiple thereof, or the surrounding elements are | |||
2566 | // undef, then this is a subvector insert which naturally aligns to a vector | |||
2567 | // register. These can easily be handled using subregister manipulation. | |||
2568 | // 2. If the subvector is smaller than a vector register, then the insertion | |||
2569 | // must preserve the undisturbed elements of the register. We do this by | |||
2570 | // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type | |||
2571 | // (which resolves to a subregister copy), performing a VSLIDEUP to place the | |||
2572 | // subvector within the vector register, and an INSERT_SUBVECTOR of that | |||
2573 | // LMUL=1 type back into the larger vector (resolving to another subregister | |||
2574 | // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type | |||
2575 | // to avoid allocating a large register group to hold our subvector. | |||
2576 | if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef())) | |||
2577 | return Op; | |||
2578 | ||||
2579 | // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements | |||
2580 | // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy | |||
2581 | // (in our case undisturbed). This means we can set up a subvector insertion | |||
2582 | // where OFFSET is the insertion offset, and the VL is the OFFSET plus the | |||
2583 | // size of the subvector. | |||
2584 | MVT InterSubVT = VecVT; | |||
2585 | SDValue AlignedExtract = Vec; | |||
2586 | unsigned AlignedIdx = OrigIdx - RemIdx; | |||
2587 | if (VecVT.bitsGT(getLMUL1VT(VecVT))) { | |||
2588 | InterSubVT = getLMUL1VT(VecVT); | |||
2589 | // Extract a subvector equal to the nearest full vector register type. This | |||
2590 | // should resolve to a EXTRACT_SUBREG instruction. | |||
2591 | AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec, | |||
2592 | DAG.getConstant(AlignedIdx, DL, XLenVT)); | |||
2593 | } | |||
2594 | ||||
2595 | SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT); | |||
2596 | // For scalable vectors this must be further multiplied by vscale. | |||
2597 | SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt); | |||
2598 | ||||
2599 | SDValue Mask, VL; | |||
2600 | std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget); | |||
2601 | ||||
2602 | // Construct the vector length corresponding to RemIdx + length(SubVecVT). | |||
2603 | VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT); | |||
2604 | VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL); | |||
2605 | VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL); | |||
2606 | ||||
2607 | SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT, | |||
2608 | DAG.getUNDEF(InterSubVT), SubVec, | |||
2609 | DAG.getConstant(0, DL, XLenVT)); | |||
2610 | ||||
2611 | SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT, | |||
2612 | AlignedExtract, SubVec, SlideupAmt, Mask, VL); | |||
2613 | ||||
2614 | // If required, insert this subvector back into the correct vector register. | |||
2615 | // This should resolve to an INSERT_SUBREG instruction. | |||
2616 | if (VecVT.bitsGT(InterSubVT)) | |||
2617 | Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup, | |||
2618 | DAG.getConstant(AlignedIdx, DL, XLenVT)); | |||
2619 | ||||
2620 | // We might have bitcast from a mask type: cast back to the original type if | |||
2621 | // required. | |||
2622 | return DAG.getBitcast(Op.getSimpleValueType(), Slideup); | |||
2623 | } | |||
2624 | ||||
2625 | SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op, | |||
2626 | SelectionDAG &DAG) const { | |||
2627 | SDValue Vec = Op.getOperand(0); | |||
2628 | MVT SubVecVT = Op.getSimpleValueType(); | |||
2629 | MVT VecVT = Vec.getSimpleValueType(); | |||
2630 | ||||
2631 | SDLoc DL(Op); | |||
2632 | MVT XLenVT = Subtarget.getXLenVT(); | |||
2633 | unsigned OrigIdx = Op.getConstantOperandVal(1); | |||
2634 | const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
2635 | ||||
2636 | // We don't have the ability to slide mask vectors down indexed by their i1 | |||
2637 | // elements; the smallest we can do is i8. Often we are able to bitcast to | |||
2638 | // equivalent i8 vectors. Note that when extracting a fixed-length vector | |||
2639 | // from a scalable one, we might not necessarily have enough scalable | |||
2640 | // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid. | |||
2641 | if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) { | |||
2642 | if (VecVT.getVectorMinNumElements() >= 8 && | |||
2643 | SubVecVT.getVectorMinNumElements() >= 8) { | |||
2644 | assert(OrigIdx % 8 == 0 && "Invalid index")((OrigIdx % 8 == 0 && "Invalid index") ? static_cast< void> (0) : __assert_fail ("OrigIdx % 8 == 0 && \"Invalid index\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2644, __PRETTY_FUNCTION__)); | |||
2645 | assert(VecVT.getVectorMinNumElements() % 8 == 0 &&((VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT .getVectorMinNumElements() % 8 == 0 && "Unexpected mask vector lowering" ) ? static_cast<void> (0) : __assert_fail ("VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && \"Unexpected mask vector lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2647, __PRETTY_FUNCTION__)) | |||
2646 | SubVecVT.getVectorMinNumElements() % 8 == 0 &&((VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT .getVectorMinNumElements() % 8 == 0 && "Unexpected mask vector lowering" ) ? static_cast<void> (0) : __assert_fail ("VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && \"Unexpected mask vector lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2647, __PRETTY_FUNCTION__)) | |||
2647 | "Unexpected mask vector lowering")((VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT .getVectorMinNumElements() % 8 == 0 && "Unexpected mask vector lowering" ) ? static_cast<void> (0) : __assert_fail ("VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && \"Unexpected mask vector lowering\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2647, __PRETTY_FUNCTION__)); | |||
2648 | OrigIdx /= 8; | |||
2649 | SubVecVT = | |||
2650 | MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8, | |||
2651 | SubVecVT.isScalableVector()); | |||
2652 | VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8, | |||
2653 | VecVT.isScalableVector()); | |||
2654 | Vec = DAG.getBitcast(VecVT, Vec); | |||
2655 | } else { | |||
2656 | // We can't slide this mask vector down, indexed by its i1 elements. | |||
2657 | // This poses a problem when we wish to extract a scalable vector which | |||
2658 | // can't be re-expressed as a larger type. Just choose the slow path and | |||
2659 | // extend to a larger type, then truncate back down. | |||
2660 | // TODO: We could probably improve this when extracting certain fixed | |||
2661 | // from fixed, where we can extract as i8 and shift the correct element | |||
2662 | // right to reach the desired subvector? | |||
2663 | MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8); | |||
2664 | MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8); | |||
2665 | Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec); | |||
2666 | Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec, | |||
2667 | Op.getOperand(1)); | |||
2668 | SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT); | |||
2669 | return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE); | |||
2670 | } | |||
2671 | } | |||
2672 | ||||
2673 | // If the subvector vector is a fixed-length type, we cannot use subregister | |||
2674 | // manipulation to simplify the codegen; we don't know which register of a | |||
2675 | // LMUL group contains the specific subvector as we only know the minimum | |||
2676 | // register size. Therefore we must slide the vector group down the full | |||
2677 | // amount. | |||
2678 | if (SubVecVT.isFixedLengthVector()) { | |||
2679 | // With an index of 0 this is a cast-like subvector, which can be performed | |||
2680 | // with subregister operations. | |||
2681 | if (OrigIdx == 0) | |||
2682 | return Op; | |||
2683 | MVT ContainerVT = VecVT; | |||
2684 | if (VecVT.isFixedLengthVector()) { | |||
2685 | ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
2686 | Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); | |||
2687 | } | |||
2688 | SDValue Mask = | |||
2689 | getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first; | |||
2690 | // Set the vector length to only the number of elements we care about. This | |||
2691 | // avoids sliding down elements we're going to discard straight away. | |||
2692 | SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT); | |||
2693 | SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT); | |||
2694 | SDValue Slidedown = | |||
2695 | DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, | |||
2696 | DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL); | |||
2697 | // Now we can use a cast-like subvector extract to get the result. | |||
2698 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown, | |||
2699 | DAG.getConstant(0, DL, XLenVT)); | |||
2700 | } | |||
2701 | ||||
2702 | unsigned SubRegIdx, RemIdx; | |||
2703 | std::tie(SubRegIdx, RemIdx) = | |||
2704 | RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( | |||
2705 | VecVT, SubVecVT, OrigIdx, TRI); | |||
2706 | ||||
2707 | // If the Idx has been completely eliminated then this is a subvector extract | |||
2708 | // which naturally aligns to a vector register. These can easily be handled | |||
2709 | // using subregister manipulation. | |||
2710 | if (RemIdx == 0) | |||
2711 | return Op; | |||
2712 | ||||
2713 | // Else we must shift our vector register directly to extract the subvector. | |||
2714 | // Do this using VSLIDEDOWN. | |||
2715 | ||||
2716 | // If the vector type is an LMUL-group type, extract a subvector equal to the | |||
2717 | // nearest full vector register type. This should resolve to a EXTRACT_SUBREG | |||
2718 | // instruction. | |||
2719 | MVT InterSubVT = VecVT; | |||
2720 | if (VecVT.bitsGT(getLMUL1VT(VecVT))) { | |||
2721 | InterSubVT = getLMUL1VT(VecVT); | |||
2722 | Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec, | |||
2723 | DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT)); | |||
2724 | } | |||
2725 | ||||
2726 | // Slide this vector register down by the desired number of elements in order | |||
2727 | // to place the desired subvector starting at element 0. | |||
2728 | SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT); | |||
2729 | // For scalable vectors this must be further multiplied by vscale. | |||
2730 | SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt); | |||
2731 | ||||
2732 | SDValue Mask, VL; | |||
2733 | std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget); | |||
2734 | SDValue Slidedown = | |||
2735 | DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT, | |||
2736 | DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL); | |||
2737 | ||||
2738 | // Now the vector is in the right position, extract our final subvector. This | |||
2739 | // should resolve to a COPY. | |||
2740 | Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown, | |||
2741 | DAG.getConstant(0, DL, XLenVT)); | |||
2742 | ||||
2743 | // We might have bitcast from a mask type: cast back to the original type if | |||
2744 | // required. | |||
2745 | return DAG.getBitcast(Op.getSimpleValueType(), Slidedown); | |||
2746 | } | |||
2747 | ||||
2748 | SDValue | |||
2749 | RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op, | |||
2750 | SelectionDAG &DAG) const { | |||
2751 | auto *Load = cast<LoadSDNode>(Op); | |||
2752 | ||||
2753 | SDLoc DL(Op); | |||
2754 | MVT VT = Op.getSimpleValueType(); | |||
2755 | MVT ContainerVT = getContainerForFixedLengthVector(VT); | |||
2756 | ||||
2757 | SDValue VL = | |||
2758 | DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); | |||
2759 | ||||
2760 | SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); | |||
2761 | SDValue NewLoad = DAG.getMemIntrinsicNode( | |||
2762 | RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL}, | |||
2763 | Load->getMemoryVT(), Load->getMemOperand()); | |||
2764 | ||||
2765 | SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget); | |||
2766 | return DAG.getMergeValues({Result, Load->getChain()}, DL); | |||
2767 | } | |||
2768 | ||||
2769 | SDValue | |||
2770 | RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op, | |||
2771 | SelectionDAG &DAG) const { | |||
2772 | auto *Store = cast<StoreSDNode>(Op); | |||
2773 | ||||
2774 | SDLoc DL(Op); | |||
2775 | MVT VT = Store->getValue().getSimpleValueType(); | |||
2776 | ||||
2777 | // FIXME: We probably need to zero any extra bits in a byte for mask stores. | |||
2778 | // This is tricky to do. | |||
2779 | ||||
2780 | MVT ContainerVT = getContainerForFixedLengthVector(VT); | |||
2781 | ||||
2782 | SDValue VL = | |||
2783 | DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); | |||
2784 | ||||
2785 | SDValue NewValue = | |||
2786 | convertToScalableVector(ContainerVT, Store->getValue(), DAG, Subtarget); | |||
2787 | return DAG.getMemIntrinsicNode( | |||
2788 | RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other), | |||
2789 | {Store->getChain(), NewValue, Store->getBasePtr(), VL}, | |||
2790 | Store->getMemoryVT(), Store->getMemOperand()); | |||
2791 | } | |||
2792 | ||||
2793 | SDValue | |||
2794 | RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op, | |||
2795 | SelectionDAG &DAG) const { | |||
2796 | MVT InVT = Op.getOperand(0).getSimpleValueType(); | |||
2797 | MVT ContainerVT = getContainerForFixedLengthVector(InVT); | |||
2798 | ||||
2799 | MVT VT = Op.getSimpleValueType(); | |||
2800 | ||||
2801 | SDValue Op1 = | |||
2802 | convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget); | |||
2803 | SDValue Op2 = | |||
2804 | convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget); | |||
2805 | ||||
2806 | SDLoc DL(Op); | |||
2807 | SDValue VL = | |||
2808 | DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); | |||
2809 | ||||
2810 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); | |||
2811 | ||||
2812 | bool Invert = false; | |||
2813 | Optional<unsigned> LogicOpc; | |||
2814 | if (ContainerVT.isFloatingPoint()) { | |||
2815 | bool Swap = false; | |||
2816 | switch (CC) { | |||
2817 | default: | |||
2818 | break; | |||
2819 | case ISD::SETULE: | |||
2820 | case ISD::SETULT: | |||
2821 | Swap = true; | |||
2822 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
2823 | case ISD::SETUGE: | |||
2824 | case ISD::SETUGT: | |||
2825 | CC = getSetCCInverse(CC, ContainerVT); | |||
2826 | Invert = true; | |||
2827 | break; | |||
2828 | case ISD::SETOGE: | |||
2829 | case ISD::SETOGT: | |||
2830 | case ISD::SETGE: | |||
2831 | case ISD::SETGT: | |||
2832 | Swap = true; | |||
2833 | break; | |||
2834 | case ISD::SETUEQ: | |||
2835 | // Use !((OLT Op1, Op2) || (OLT Op2, Op1)) | |||
2836 | Invert = true; | |||
2837 | LogicOpc = RISCVISD::VMOR_VL; | |||
2838 | CC = ISD::SETOLT; | |||
2839 | break; | |||
2840 | case ISD::SETONE: | |||
2841 | // Use ((OLT Op1, Op2) || (OLT Op2, Op1)) | |||
2842 | LogicOpc = RISCVISD::VMOR_VL; | |||
2843 | CC = ISD::SETOLT; | |||
2844 | break; | |||
2845 | case ISD::SETO: | |||
2846 | // Use (OEQ Op1, Op1) && (OEQ Op2, Op2) | |||
2847 | LogicOpc = RISCVISD::VMAND_VL; | |||
2848 | CC = ISD::SETOEQ; | |||
2849 | break; | |||
2850 | case ISD::SETUO: | |||
2851 | // Use (UNE Op1, Op1) || (UNE Op2, Op2) | |||
2852 | LogicOpc = RISCVISD::VMOR_VL; | |||
2853 | CC = ISD::SETUNE; | |||
2854 | break; | |||
2855 | } | |||
2856 | ||||
2857 | if (Swap) { | |||
2858 | CC = getSetCCSwappedOperands(CC); | |||
2859 | std::swap(Op1, Op2); | |||
2860 | } | |||
2861 | } | |||
2862 | ||||
2863 | MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); | |||
2864 | SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); | |||
2865 | ||||
2866 | // There are 3 cases we need to emit. | |||
2867 | // 1. For (OEQ Op1, Op1) && (OEQ Op2, Op2) or (UNE Op1, Op1) || (UNE Op2, Op2) | |||
2868 | // we need to compare each operand with itself. | |||
2869 | // 2. For (OLT Op1, Op2) || (OLT Op2, Op1) we need to compare Op1 and Op2 in | |||
2870 | // both orders. | |||
2871 | // 3. For any other case we just need one compare with Op1 and Op2. | |||
2872 | SDValue Cmp; | |||
2873 | if (LogicOpc && (CC == ISD::SETOEQ || CC == ISD::SETUNE)) { | |||
2874 | Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op1, | |||
2875 | DAG.getCondCode(CC), Mask, VL); | |||
2876 | SDValue Cmp2 = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op2, Op2, | |||
2877 | DAG.getCondCode(CC), Mask, VL); | |||
2878 | Cmp = DAG.getNode(*LogicOpc, DL, MaskVT, Cmp, Cmp2, VL); | |||
2879 | } else { | |||
2880 | Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2, | |||
2881 | DAG.getCondCode(CC), Mask, VL); | |||
2882 | if (LogicOpc) { | |||
2883 | SDValue Cmp2 = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op2, Op1, | |||
2884 | DAG.getCondCode(CC), Mask, VL); | |||
2885 | Cmp = DAG.getNode(*LogicOpc, DL, MaskVT, Cmp, Cmp2, VL); | |||
2886 | } | |||
2887 | } | |||
2888 | ||||
2889 | if (Invert) { | |||
2890 | SDValue AllOnes = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); | |||
2891 | Cmp = DAG.getNode(RISCVISD::VMXOR_VL, DL, MaskVT, Cmp, AllOnes, VL); | |||
2892 | } | |||
2893 | ||||
2894 | return convertFromScalableVector(VT, Cmp, DAG, Subtarget); | |||
2895 | } | |||
2896 | ||||
2897 | SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV( | |||
2898 | SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const { | |||
2899 | MVT VT = Op.getSimpleValueType(); | |||
2900 | ||||
2901 | if (VT.getVectorElementType() == MVT::i1) | |||
2902 | return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false); | |||
2903 | ||||
2904 | return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true); | |||
2905 | } | |||
2906 | ||||
2907 | SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV( | |||
2908 | SDValue Op, SelectionDAG &DAG) const { | |||
2909 | MVT VT = Op.getSimpleValueType(); | |||
2910 | MVT ContainerVT = getContainerForFixedLengthVector(VT); | |||
2911 | ||||
2912 | MVT I1ContainerVT = | |||
2913 | MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); | |||
2914 | ||||
2915 | SDValue CC = | |||
2916 | convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget); | |||
2917 | SDValue Op1 = | |||
2918 | convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget); | |||
2919 | SDValue Op2 = | |||
2920 | convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget); | |||
2921 | ||||
2922 | SDLoc DL(Op); | |||
2923 | SDValue Mask, VL; | |||
2924 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
2925 | ||||
2926 | SDValue Select = | |||
2927 | DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL); | |||
2928 | ||||
2929 | return convertFromScalableVector(VT, Select, DAG, Subtarget); | |||
2930 | } | |||
2931 | ||||
2932 | SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG, | |||
2933 | unsigned NewOpc, | |||
2934 | bool HasMask) const { | |||
2935 | MVT VT = Op.getSimpleValueType(); | |||
2936 | assert(useRVVForFixedLengthVectorVT(VT) &&((useRVVForFixedLengthVectorVT(VT) && "Only expected to lower fixed length vector operation!" ) ? static_cast<void> (0) : __assert_fail ("useRVVForFixedLengthVectorVT(VT) && \"Only expected to lower fixed length vector operation!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2937, __PRETTY_FUNCTION__)) | |||
2937 | "Only expected to lower fixed length vector operation!")((useRVVForFixedLengthVectorVT(VT) && "Only expected to lower fixed length vector operation!" ) ? static_cast<void> (0) : __assert_fail ("useRVVForFixedLengthVectorVT(VT) && \"Only expected to lower fixed length vector operation!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2937, __PRETTY_FUNCTION__)); | |||
2938 | MVT ContainerVT = getContainerForFixedLengthVector(VT); | |||
2939 | ||||
2940 | // Create list of operands by converting existing ones to scalable types. | |||
2941 | SmallVector<SDValue, 6> Ops; | |||
2942 | for (const SDValue &V : Op->op_values()) { | |||
2943 | assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!")((!isa<VTSDNode>(V) && "Unexpected VTSDNode node!" ) ? static_cast<void> (0) : __assert_fail ("!isa<VTSDNode>(V) && \"Unexpected VTSDNode node!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2943, __PRETTY_FUNCTION__)); | |||
2944 | ||||
2945 | // Pass through non-vector operands. | |||
2946 | if (!V.getValueType().isVector()) { | |||
2947 | Ops.push_back(V); | |||
2948 | continue; | |||
2949 | } | |||
2950 | ||||
2951 | // "cast" fixed length vector to a scalable vector. | |||
2952 | assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&((useRVVForFixedLengthVectorVT(V.getSimpleValueType()) && "Only fixed length vectors are supported!") ? static_cast< void> (0) : __assert_fail ("useRVVForFixedLengthVectorVT(V.getSimpleValueType()) && \"Only fixed length vectors are supported!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2953, __PRETTY_FUNCTION__)) | |||
2953 | "Only fixed length vectors are supported!")((useRVVForFixedLengthVectorVT(V.getSimpleValueType()) && "Only fixed length vectors are supported!") ? static_cast< void> (0) : __assert_fail ("useRVVForFixedLengthVectorVT(V.getSimpleValueType()) && \"Only fixed length vectors are supported!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2953, __PRETTY_FUNCTION__)); | |||
2954 | Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget)); | |||
2955 | } | |||
2956 | ||||
2957 | SDLoc DL(Op); | |||
2958 | SDValue Mask, VL; | |||
2959 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
2960 | if (HasMask) | |||
2961 | Ops.push_back(Mask); | |||
2962 | Ops.push_back(VL); | |||
2963 | ||||
2964 | SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops); | |||
2965 | return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget); | |||
2966 | } | |||
2967 | ||||
2968 | // Returns the opcode of the target-specific SDNode that implements the 32-bit | |||
2969 | // form of the given Opcode. | |||
2970 | static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { | |||
2971 | switch (Opcode) { | |||
2972 | default: | |||
2973 | llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2973); | |||
2974 | case ISD::SHL: | |||
2975 | return RISCVISD::SLLW; | |||
2976 | case ISD::SRA: | |||
2977 | return RISCVISD::SRAW; | |||
2978 | case ISD::SRL: | |||
2979 | return RISCVISD::SRLW; | |||
2980 | case ISD::SDIV: | |||
2981 | return RISCVISD::DIVW; | |||
2982 | case ISD::UDIV: | |||
2983 | return RISCVISD::DIVUW; | |||
2984 | case ISD::UREM: | |||
2985 | return RISCVISD::REMUW; | |||
2986 | case ISD::ROTL: | |||
2987 | return RISCVISD::ROLW; | |||
2988 | case ISD::ROTR: | |||
2989 | return RISCVISD::RORW; | |||
2990 | case RISCVISD::GREVI: | |||
2991 | return RISCVISD::GREVIW; | |||
2992 | case RISCVISD::GORCI: | |||
2993 | return RISCVISD::GORCIW; | |||
2994 | } | |||
2995 | } | |||
2996 | ||||
2997 | // Converts the given 32-bit operation to a target-specific SelectionDAG node. | |||
2998 | // Because i32 isn't a legal type for RV64, these operations would otherwise | |||
2999 | // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W | |||
3000 | // later one because the fact the operation was originally of type i32 is | |||
3001 | // lost. | |||
3002 | static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, | |||
3003 | unsigned ExtOpc = ISD::ANY_EXTEND) { | |||
3004 | SDLoc DL(N); | |||
3005 | RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); | |||
3006 | SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0)); | |||
3007 | SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1)); | |||
3008 | SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1); | |||
3009 | // ReplaceNodeResults requires we maintain the same type for the return value. | |||
3010 | return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes); | |||
3011 | } | |||
3012 | ||||
3013 | // Converts the given 32-bit operation to a i64 operation with signed extension | |||
3014 | // semantic to reduce the signed extension instructions. | |||
3015 | static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) { | |||
3016 | SDLoc DL(N); | |||
3017 | SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); | |||
3018 | SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); | |||
3019 | SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1); | |||
3020 | SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp, | |||
3021 | DAG.getValueType(MVT::i32)); | |||
3022 | return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); | |||
3023 | } | |||
3024 | ||||
3025 | void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, | |||
3026 | SmallVectorImpl<SDValue> &Results, | |||
3027 | SelectionDAG &DAG) const { | |||
3028 | SDLoc DL(N); | |||
3029 | switch (N->getOpcode()) { | |||
3030 | default: | |||
3031 | llvm_unreachable("Don't know how to custom type legalize this operation!")::llvm::llvm_unreachable_internal("Don't know how to custom type legalize this operation!" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3031); | |||
3032 | case ISD::STRICT_FP_TO_SINT: | |||
3033 | case ISD::STRICT_FP_TO_UINT: | |||
3034 | case ISD::FP_TO_SINT: | |||
3035 | case ISD::FP_TO_UINT: { | |||
3036 | bool IsStrict = N->isStrictFPOpcode(); | |||
3037 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && "Unexpected custom legalisation") ? static_cast <void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3038, __PRETTY_FUNCTION__)) | |||
3038 | "Unexpected custom legalisation")((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && "Unexpected custom legalisation") ? static_cast <void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3038, __PRETTY_FUNCTION__)); | |||
3039 | SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0); | |||
3040 | // If the FP type needs to be softened, emit a library call using the 'si' | |||
3041 | // version. If we left it to default legalization we'd end up with 'di'. If | |||
3042 | // the FP type doesn't need to be softened just let generic type | |||
3043 | // legalization promote the result type. | |||
3044 | if (getTypeAction(*DAG.getContext(), Op0.getValueType()) != | |||
3045 | TargetLowering::TypeSoftenFloat) | |||
3046 | return; | |||
3047 | RTLIB::Libcall LC; | |||
3048 | if (N->getOpcode() == ISD::FP_TO_SINT || | |||
3049 | N->getOpcode() == ISD::STRICT_FP_TO_SINT) | |||
3050 | LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0)); | |||
3051 | else | |||
3052 | LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0)); | |||
3053 | MakeLibCallOptions CallOptions; | |||
3054 | EVT OpVT = Op0.getValueType(); | |||
3055 | CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true); | |||
3056 | SDValue Chain = IsStrict ? N->getOperand(0) : SDValue(); | |||
3057 | SDValue Result; | |||
3058 | std::tie(Result, Chain) = | |||
3059 | makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain); | |||
3060 | Results.push_back(Result); | |||
3061 | if (IsStrict) | |||
3062 | Results.push_back(Chain); | |||
3063 | break; | |||
3064 | } | |||
3065 | case ISD::READCYCLECOUNTER: { | |||
3066 | assert(!Subtarget.is64Bit() &&((!Subtarget.is64Bit() && "READCYCLECOUNTER only has custom type legalization on riscv32" ) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"READCYCLECOUNTER only has custom type legalization on riscv32\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3067, __PRETTY_FUNCTION__)) | |||
3067 | "READCYCLECOUNTER only has custom type legalization on riscv32")((!Subtarget.is64Bit() && "READCYCLECOUNTER only has custom type legalization on riscv32" ) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"READCYCLECOUNTER only has custom type legalization on riscv32\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3067, __PRETTY_FUNCTION__)); | |||
3068 | ||||
3069 | SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); | |||
3070 | SDValue RCW = | |||
3071 | DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0)); | |||
3072 | ||||
3073 | Results.push_back( | |||
3074 | DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1))); | |||
3075 | Results.push_back(RCW.getValue(2)); | |||
3076 | break; | |||
3077 | } | |||
3078 | case ISD::ADD: | |||
3079 | case ISD::SUB: | |||
3080 | case ISD::MUL: | |||
3081 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && "Unexpected custom legalisation") ? static_cast <void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3082, __PRETTY_FUNCTION__)) | |||
3082 | "Unexpected custom legalisation")((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && "Unexpected custom legalisation") ? static_cast <void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3082, __PRETTY_FUNCTION__)); | |||
3083 | if (N->getOperand(1).getOpcode() == ISD::Constant) | |||
3084 | return; | |||
3085 | Results.push_back(customLegalizeToWOpWithSExt(N, DAG)); | |||
3086 | break; | |||
3087 | case ISD::SHL: | |||
3088 | case ISD::SRA: | |||
3089 | case ISD::SRL: | |||
3090 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && "Unexpected custom legalisation") ? static_cast <void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3091, __PRETTY_FUNCTION__)) | |||
3091 | "Unexpected custom legalisation")((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && "Unexpected custom legalisation") ? static_cast <void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3091, __PRETTY_FUNCTION__)); | |||
3092 | if (N->getOperand(1).getOpcode() == ISD::Constant) | |||
3093 | return; | |||
3094 | Results.push_back(customLegalizeToWOp(N, DAG)); | |||
3095 | break; | |||
3096 | case ISD::ROTL: | |||
3097 | case ISD::ROTR: | |||
3098 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && "Unexpected custom legalisation") ? static_cast <void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3099, __PRETTY_FUNCTION__)) | |||
3099 | "Unexpected custom legalisation")((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && "Unexpected custom legalisation") ? static_cast <void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3099, __PRETTY_FUNCTION__)); | |||
3100 | Results.push_back(customLegalizeToWOp(N, DAG)); | |||
3101 | break; | |||
3102 | case ISD::SDIV: | |||
3103 | case ISD::UDIV: | |||
3104 | case ISD::UREM: { | |||
3105 | MVT VT = N->getSimpleValueType(0); | |||
3106 | assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&(((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && Subtarget.is64Bit() && Subtarget.hasStdExtM() && "Unexpected custom legalisation") ? static_cast<void> ( 0) : __assert_fail ("(VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && Subtarget.is64Bit() && Subtarget.hasStdExtM() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3108, __PRETTY_FUNCTION__)) | |||
3107 | Subtarget.is64Bit() && Subtarget.hasStdExtM() &&(((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && Subtarget.is64Bit() && Subtarget.hasStdExtM() && "Unexpected custom legalisation") ? static_cast<void> ( 0) : __assert_fail ("(VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && Subtarget.is64Bit() && Subtarget.hasStdExtM() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3108, __PRETTY_FUNCTION__)) | |||
3108 | "Unexpected custom legalisation")(((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && Subtarget.is64Bit() && Subtarget.hasStdExtM() && "Unexpected custom legalisation") ? static_cast<void> ( 0) : __assert_fail ("(VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && Subtarget.is64Bit() && Subtarget.hasStdExtM() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3108, __PRETTY_FUNCTION__)); | |||
3109 | if (N->getOperand(0).getOpcode() == ISD::Constant || | |||
3110 | N->getOperand(1).getOpcode() == ISD::Constant) | |||
3111 | return; | |||
3112 | ||||
3113 | // If the input is i32, use ANY_EXTEND since the W instructions don't read | |||
3114 | // the upper 32 bits. For other types we need to sign or zero extend | |||
3115 | // based on the opcode. | |||
3116 | unsigned ExtOpc = ISD::ANY_EXTEND; | |||
3117 | if (VT != MVT::i32) | |||
3118 | ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND | |||
3119 | : ISD::ZERO_EXTEND; | |||
3120 | ||||
3121 | Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc)); | |||
3122 | break; | |||
3123 | } | |||
3124 | case ISD::BITCAST: { | |||
3125 | assert(((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&((((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && Subtarget.hasStdExtF()) || (N->getValueType( 0) == MVT::i16 && Subtarget.hasStdExtZfh())) && "Unexpected custom legalisation") ? static_cast<void> ( 0) : __assert_fail ("((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtF()) || (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh())) && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3128, __PRETTY_FUNCTION__)) | |||
3126 | Subtarget.hasStdExtF()) ||((((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && Subtarget.hasStdExtF()) || (N->getValueType( 0) == MVT::i16 && Subtarget.hasStdExtZfh())) && "Unexpected custom legalisation") ? static_cast<void> ( 0) : __assert_fail ("((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtF()) || (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh())) && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3128, __PRETTY_FUNCTION__)) | |||
3127 | (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh())) &&((((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && Subtarget.hasStdExtF()) || (N->getValueType( 0) == MVT::i16 && Subtarget.hasStdExtZfh())) && "Unexpected custom legalisation") ? static_cast<void> ( 0) : __assert_fail ("((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtF()) || (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh())) && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3128, __PRETTY_FUNCTION__)) | |||
3128 | "Unexpected custom legalisation")((((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && Subtarget.hasStdExtF()) || (N->getValueType( 0) == MVT::i16 && Subtarget.hasStdExtZfh())) && "Unexpected custom legalisation") ? static_cast<void> ( 0) : __assert_fail ("((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtF()) || (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh())) && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3128, __PRETTY_FUNCTION__)); | |||
3129 | SDValue Op0 = N->getOperand(0); | |||
3130 | if (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh()) { | |||
3131 | if (Op0.getValueType() != MVT::f16) | |||
3132 | return; | |||
3133 | SDValue FPConv = | |||
3134 | DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, Subtarget.getXLenVT(), Op0); | |||
3135 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv)); | |||
3136 | } else if (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && | |||
3137 | Subtarget.hasStdExtF()) { | |||
3138 | if (Op0.getValueType() != MVT::f32) | |||
3139 | return; | |||
3140 | SDValue FPConv = | |||
3141 | DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0); | |||
3142 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv)); | |||
3143 | } | |||
3144 | break; | |||
3145 | } | |||
3146 | case RISCVISD::GREVI: | |||
3147 | case RISCVISD::GORCI: { | |||
3148 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && "Unexpected custom legalisation") ? static_cast <void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3149, __PRETTY_FUNCTION__)) | |||
3149 | "Unexpected custom legalisation")((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && "Unexpected custom legalisation") ? static_cast <void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3149, __PRETTY_FUNCTION__)); | |||
3150 | // This is similar to customLegalizeToWOp, except that we pass the second | |||
3151 | // operand (a TargetConstant) straight through: it is already of type | |||
3152 | // XLenVT. | |||
3153 | SDLoc DL(N); | |||
3154 | RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); | |||
3155 | SDValue NewOp0 = | |||
3156 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); | |||
3157 | SDValue NewRes = | |||
3158 | DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, N->getOperand(1)); | |||
3159 | // ReplaceNodeResults requires we maintain the same type for the return | |||
3160 | // value. | |||
3161 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes)); | |||
3162 | break; | |||
3163 | } | |||
3164 | case RISCVISD::SHFLI: { | |||
3165 | // There is no SHFLIW instruction, but we can just promote the operation. | |||
3166 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && "Unexpected custom legalisation") ? static_cast <void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3167, __PRETTY_FUNCTION__)) | |||
3167 | "Unexpected custom legalisation")((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && "Unexpected custom legalisation") ? static_cast <void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3167, __PRETTY_FUNCTION__)); | |||
3168 | SDLoc DL(N); | |||
3169 | SDValue NewOp0 = | |||
3170 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); | |||
3171 | SDValue NewRes = | |||
3172 | DAG.getNode(RISCVISD::SHFLI, DL, MVT::i64, NewOp0, N->getOperand(1)); | |||
3173 | // ReplaceNodeResults requires we maintain the same type for the return | |||
3174 | // value. | |||
3175 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes)); | |||
3176 | break; | |||
3177 | } | |||
3178 | case ISD::BSWAP: | |||
3179 | case ISD::BITREVERSE: { | |||
3180 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && Subtarget.hasStdExtZbp() && "Unexpected custom legalisation" ) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtZbp() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3181, __PRETTY_FUNCTION__)) | |||
3181 | Subtarget.hasStdExtZbp() && "Unexpected custom legalisation")((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && Subtarget.hasStdExtZbp() && "Unexpected custom legalisation" ) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtZbp() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3181, __PRETTY_FUNCTION__)); | |||
3182 | SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, | |||
3183 | N->getOperand(0)); | |||
3184 | unsigned Imm = N->getOpcode() == ISD::BITREVERSE ? 31 : 24; | |||
3185 | SDValue GREVIW = DAG.getNode(RISCVISD::GREVIW, DL, MVT::i64, NewOp0, | |||
3186 | DAG.getTargetConstant(Imm, DL, | |||
3187 | Subtarget.getXLenVT())); | |||
3188 | // ReplaceNodeResults requires we maintain the same type for the return | |||
3189 | // value. | |||
3190 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, GREVIW)); | |||
3191 | break; | |||
3192 | } | |||
3193 | case ISD::FSHL: | |||
3194 | case ISD::FSHR: { | |||
3195 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && Subtarget.hasStdExtZbt() && "Unexpected custom legalisation" ) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtZbt() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3196, __PRETTY_FUNCTION__)) | |||
3196 | Subtarget.hasStdExtZbt() && "Unexpected custom legalisation")((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit () && Subtarget.hasStdExtZbt() && "Unexpected custom legalisation" ) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtZbt() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3196, __PRETTY_FUNCTION__)); | |||
3197 | SDValue NewOp0 = | |||
3198 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); | |||
3199 | SDValue NewOp1 = | |||
3200 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); | |||
3201 | SDValue NewOp2 = | |||
3202 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); | |||
3203 | // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits. | |||
3204 | // Mask the shift amount to 5 bits. | |||
3205 | NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2, | |||
3206 | DAG.getConstant(0x1f, DL, MVT::i64)); | |||
3207 | unsigned Opc = | |||
3208 | N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW; | |||
3209 | SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2); | |||
3210 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp)); | |||
3211 | break; | |||
3212 | } | |||
3213 | case ISD::EXTRACT_VECTOR_ELT: { | |||
3214 | // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element | |||
3215 | // type is illegal (currently only vXi64 RV32). | |||
3216 | // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are | |||
3217 | // transferred to the destination register. We issue two of these from the | |||
3218 | // upper- and lower- halves of the SEW-bit vector element, slid down to the | |||
3219 | // first element. | |||
3220 | SDLoc DL(N); | |||
3221 | SDValue Vec = N->getOperand(0); | |||
3222 | SDValue Idx = N->getOperand(1); | |||
3223 | ||||
3224 | // The vector type hasn't been legalized yet so we can't issue target | |||
3225 | // specific nodes if it needs legalization. | |||
3226 | // FIXME: We would manually legalize if it's important. | |||
3227 | if (!isTypeLegal(Vec.getValueType())) | |||
3228 | return; | |||
3229 | ||||
3230 | MVT VecVT = Vec.getSimpleValueType(); | |||
3231 | ||||
3232 | assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&((!Subtarget.is64Bit() && N->getValueType(0) == MVT ::i64 && VecVT.getVectorElementType() == MVT::i64 && "Unexpected EXTRACT_VECTOR_ELT legalization") ? static_cast< void> (0) : __assert_fail ("!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 && VecVT.getVectorElementType() == MVT::i64 && \"Unexpected EXTRACT_VECTOR_ELT legalization\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3234, __PRETTY_FUNCTION__)) | |||
3233 | VecVT.getVectorElementType() == MVT::i64 &&((!Subtarget.is64Bit() && N->getValueType(0) == MVT ::i64 && VecVT.getVectorElementType() == MVT::i64 && "Unexpected EXTRACT_VECTOR_ELT legalization") ? static_cast< void> (0) : __assert_fail ("!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 && VecVT.getVectorElementType() == MVT::i64 && \"Unexpected EXTRACT_VECTOR_ELT legalization\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3234, __PRETTY_FUNCTION__)) | |||
3234 | "Unexpected EXTRACT_VECTOR_ELT legalization")((!Subtarget.is64Bit() && N->getValueType(0) == MVT ::i64 && VecVT.getVectorElementType() == MVT::i64 && "Unexpected EXTRACT_VECTOR_ELT legalization") ? static_cast< void> (0) : __assert_fail ("!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 && VecVT.getVectorElementType() == MVT::i64 && \"Unexpected EXTRACT_VECTOR_ELT legalization\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3234, __PRETTY_FUNCTION__)); | |||
3235 | ||||
3236 | // If this is a fixed vector, we need to convert it to a scalable vector. | |||
3237 | MVT ContainerVT = VecVT; | |||
3238 | if (VecVT.isFixedLengthVector()) { | |||
3239 | ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
3240 | Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); | |||
3241 | } | |||
3242 | ||||
3243 | MVT XLenVT = Subtarget.getXLenVT(); | |||
3244 | ||||
3245 | // Use a VL of 1 to avoid processing more elements than we need. | |||
3246 | MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount()); | |||
3247 | SDValue VL = DAG.getConstant(1, DL, XLenVT); | |||
3248 | SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); | |||
3249 | ||||
3250 | // Unless the index is known to be 0, we must slide the vector down to get | |||
3251 | // the desired element into index 0. | |||
3252 | if (!isNullConstant(Idx)) { | |||
3253 | Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, | |||
3254 | DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL); | |||
3255 | } | |||
3256 | ||||
3257 | // Extract the lower XLEN bits of the correct vector element. | |||
3258 | SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); | |||
3259 | ||||
3260 | // To extract the upper XLEN bits of the vector element, shift the first | |||
3261 | // element right by 32 bits and re-extract the lower XLEN bits. | |||
3262 | SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, | |||
3263 | DAG.getConstant(32, DL, XLenVT), VL); | |||
3264 | SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec, | |||
3265 | ThirtyTwoV, Mask, VL); | |||
3266 | ||||
3267 | SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32); | |||
3268 | ||||
3269 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi)); | |||
3270 | break; | |||
3271 | } | |||
3272 | case ISD::INTRINSIC_WO_CHAIN: { | |||
3273 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); | |||
3274 | switch (IntNo) { | |||
3275 | default: | |||
3276 | llvm_unreachable(::llvm::llvm_unreachable_internal("Don't know how to custom type legalize this intrinsic!" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3277) | |||
3277 | "Don't know how to custom type legalize this intrinsic!")::llvm::llvm_unreachable_internal("Don't know how to custom type legalize this intrinsic!" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3277); | |||
3278 | case Intrinsic::riscv_vmv_x_s: { | |||
3279 | EVT VT = N->getValueType(0); | |||
3280 | assert((VT == MVT::i8 || VT == MVT::i16 ||(((VT == MVT::i8 || VT == MVT::i16 || (Subtarget.is64Bit() && VT == MVT::i32)) && "Unexpected custom legalisation!" ) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::i8 || VT == MVT::i16 || (Subtarget.is64Bit() && VT == MVT::i32)) && \"Unexpected custom legalisation!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3282, __PRETTY_FUNCTION__)) | |||
3281 | (Subtarget.is64Bit() && VT == MVT::i32)) &&(((VT == MVT::i8 || VT == MVT::i16 || (Subtarget.is64Bit() && VT == MVT::i32)) && "Unexpected custom legalisation!" ) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::i8 || VT == MVT::i16 || (Subtarget.is64Bit() && VT == MVT::i32)) && \"Unexpected custom legalisation!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3282, __PRETTY_FUNCTION__)) | |||
3282 | "Unexpected custom legalisation!")(((VT == MVT::i8 || VT == MVT::i16 || (Subtarget.is64Bit() && VT == MVT::i32)) && "Unexpected custom legalisation!" ) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::i8 || VT == MVT::i16 || (Subtarget.is64Bit() && VT == MVT::i32)) && \"Unexpected custom legalisation!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3282, __PRETTY_FUNCTION__)); | |||
3283 | SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL, | |||
3284 | Subtarget.getXLenVT(), N->getOperand(1)); | |||
3285 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract)); | |||
3286 | break; | |||
3287 | } | |||
3288 | } | |||
3289 | break; | |||
3290 | } | |||
3291 | case ISD::VECREDUCE_ADD: | |||
3292 | case ISD::VECREDUCE_AND: | |||
3293 | case ISD::VECREDUCE_OR: | |||
3294 | case ISD::VECREDUCE_XOR: | |||
3295 | case ISD::VECREDUCE_SMAX: | |||
3296 | case ISD::VECREDUCE_UMAX: | |||
3297 | case ISD::VECREDUCE_SMIN: | |||
3298 | case ISD::VECREDUCE_UMIN: | |||
3299 | // The custom-lowering for these nodes returns a vector whose first element | |||
3300 | // is the result of the reduction. Extract its first element and let the | |||
3301 | // legalization for EXTRACT_VECTOR_ELT do the rest of the job. | |||
3302 | Results.push_back(lowerVECREDUCE(SDValue(N, 0), DAG)); | |||
3303 | break; | |||
3304 | } | |||
3305 | } | |||
3306 | ||||
3307 | // A structure to hold one of the bit-manipulation patterns below. Together, a | |||
3308 | // SHL and non-SHL pattern may form a bit-manipulation pair on a single source: | |||
3309 | // (or (and (shl x, 1), 0xAAAAAAAA), | |||
3310 | // (and (srl x, 1), 0x55555555)) | |||
3311 | struct RISCVBitmanipPat { | |||
3312 | SDValue Op; | |||
3313 | unsigned ShAmt; | |||
3314 | bool IsSHL; | |||
3315 | ||||
3316 | bool formsPairWith(const RISCVBitmanipPat &Other) const { | |||
3317 | return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL; | |||
3318 | } | |||
3319 | }; | |||
3320 | ||||
3321 | // Matches patterns of the form | |||
3322 | // (and (shl x, C2), (C1 << C2)) | |||
3323 | // (and (srl x, C2), C1) | |||
3324 | // (shl (and x, C1), C2) | |||
3325 | // (srl (and x, (C1 << C2)), C2) | |||
3326 | // Where C2 is a power of 2 and C1 has at least that many leading zeroes. | |||
3327 | // The expected masks for each shift amount are specified in BitmanipMasks where | |||
3328 | // BitmanipMasks[log2(C2)] specifies the expected C1 value. | |||
3329 | // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether | |||
3330 | // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible | |||
3331 | // XLen is 64. | |||
3332 | static Optional<RISCVBitmanipPat> | |||
3333 | matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) { | |||
3334 | assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&(((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) && "Unexpected number of masks") ? static_cast<void> (0) : __assert_fail ("(BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) && \"Unexpected number of masks\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3335, __PRETTY_FUNCTION__)) | |||
3335 | "Unexpected number of masks")(((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) && "Unexpected number of masks") ? static_cast<void> (0) : __assert_fail ("(BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) && \"Unexpected number of masks\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3335, __PRETTY_FUNCTION__)); | |||
3336 | Optional<uint64_t> Mask; | |||
3337 | // Optionally consume a mask around the shift operation. | |||
3338 | if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) { | |||
3339 | Mask = Op.getConstantOperandVal(1); | |||
3340 | Op = Op.getOperand(0); | |||
3341 | } | |||
3342 | if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL) | |||
3343 | return None; | |||
3344 | bool IsSHL = Op.getOpcode() == ISD::SHL; | |||
3345 | ||||
3346 | if (!isa<ConstantSDNode>(Op.getOperand(1))) | |||
3347 | return None; | |||
3348 | uint64_t ShAmt = Op.getConstantOperandVal(1); | |||
3349 | ||||
3350 | unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32; | |||
3351 | if (ShAmt >= Width && !isPowerOf2_64(ShAmt)) | |||
3352 | return None; | |||
3353 | // If we don't have enough masks for 64 bit, then we must be trying to | |||
3354 | // match SHFL so we're only allowed to shift 1/4 of the width. | |||
3355 | if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2)) | |||
3356 | return None; | |||
3357 | ||||
3358 | SDValue Src = Op.getOperand(0); | |||
3359 | ||||
3360 | // The expected mask is shifted left when the AND is found around SHL | |||
3361 | // patterns. | |||
3362 | // ((x >> 1) & 0x55555555) | |||
3363 | // ((x << 1) & 0xAAAAAAAA) | |||
3364 | bool SHLExpMask = IsSHL; | |||
3365 | ||||
3366 | if (!Mask) { | |||
3367 | // Sometimes LLVM keeps the mask as an operand of the shift, typically when | |||
3368 | // the mask is all ones: consume that now. | |||
3369 | if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) { | |||
3370 | Mask = Src.getConstantOperandVal(1); | |||
3371 | Src = Src.getOperand(0); | |||
3372 | // The expected mask is now in fact shifted left for SRL, so reverse the | |||
3373 | // decision. | |||
3374 | // ((x & 0xAAAAAAAA) >> 1) | |||
3375 | // ((x & 0x55555555) << 1) | |||
3376 | SHLExpMask = !SHLExpMask; | |||
3377 | } else { | |||
3378 | // Use a default shifted mask of all-ones if there's no AND, truncated | |||
3379 | // down to the expected width. This simplifies the logic later on. | |||
3380 | Mask = maskTrailingOnes<uint64_t>(Width); | |||
3381 | *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt); | |||
3382 | } | |||
3383 | } | |||
3384 | ||||
3385 | unsigned MaskIdx = Log2_32(ShAmt); | |||
3386 | uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width); | |||
3387 | ||||
3388 | if (SHLExpMask) | |||
3389 | ExpMask <<= ShAmt; | |||
3390 | ||||
3391 | if (Mask != ExpMask) | |||
3392 | return None; | |||
3393 | ||||
3394 | return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL}; | |||
3395 | } | |||
3396 | ||||
3397 | // Matches any of the following bit-manipulation patterns: | |||
3398 | // (and (shl x, 1), (0x55555555 << 1)) | |||
3399 | // (and (srl x, 1), 0x55555555) | |||
3400 | // (shl (and x, 0x55555555), 1) | |||
3401 | // (srl (and x, (0x55555555 << 1)), 1) | |||
3402 | // where the shift amount and mask may vary thus: | |||
3403 | // [1] = 0x55555555 / 0xAAAAAAAA | |||
3404 | // [2] = 0x33333333 / 0xCCCCCCCC | |||
3405 | // [4] = 0x0F0F0F0F / 0xF0F0F0F0 | |||
3406 | // [8] = 0x00FF00FF / 0xFF00FF00 | |||
3407 | // [16] = 0x0000FFFF / 0xFFFFFFFF | |||
3408 | // [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64) | |||
3409 | static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) { | |||
3410 | // These are the unshifted masks which we use to match bit-manipulation | |||
3411 | // patterns. They may be shifted left in certain circumstances. | |||
3412 | static const uint64_t BitmanipMasks[] = { | |||
3413 | 0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL, | |||
3414 | 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL}; | |||
3415 | ||||
3416 | return matchRISCVBitmanipPat(Op, BitmanipMasks); | |||
3417 | } | |||
3418 | ||||
3419 | // Match the following pattern as a GREVI(W) operation | |||
3420 | // (or (BITMANIP_SHL x), (BITMANIP_SRL x)) | |||
3421 | static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG, | |||
3422 | const RISCVSubtarget &Subtarget) { | |||
3423 | assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson")((Subtarget.hasStdExtZbp() && "Expected Zbp extenson" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasStdExtZbp() && \"Expected Zbp extenson\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3423, __PRETTY_FUNCTION__)); | |||
3424 | EVT VT = Op.getValueType(); | |||
3425 | ||||
3426 | if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { | |||
3427 | auto LHS = matchGREVIPat(Op.getOperand(0)); | |||
3428 | auto RHS = matchGREVIPat(Op.getOperand(1)); | |||
3429 | if (LHS && RHS && LHS->formsPairWith(*RHS)) { | |||
3430 | SDLoc DL(Op); | |||
3431 | return DAG.getNode( | |||
3432 | RISCVISD::GREVI, DL, VT, LHS->Op, | |||
3433 | DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT())); | |||
3434 | } | |||
3435 | } | |||
3436 | return SDValue(); | |||
3437 | } | |||
3438 | ||||
3439 | // Matches any the following pattern as a GORCI(W) operation | |||
3440 | // 1. (or (GREVI x, shamt), x) if shamt is a power of 2 | |||
3441 | // 2. (or x, (GREVI x, shamt)) if shamt is a power of 2 | |||
3442 | // 3. (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x)) | |||
3443 | // Note that with the variant of 3., | |||
3444 | // (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x) | |||
3445 | // the inner pattern will first be matched as GREVI and then the outer | |||
3446 | // pattern will be matched to GORC via the first rule above. | |||
3447 | // 4. (or (rotl/rotr x, bitwidth/2), x) | |||
3448 | static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG, | |||
3449 | const RISCVSubtarget &Subtarget) { | |||
3450 | assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson")((Subtarget.hasStdExtZbp() && "Expected Zbp extenson" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasStdExtZbp() && \"Expected Zbp extenson\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3450, __PRETTY_FUNCTION__)); | |||
3451 | EVT VT = Op.getValueType(); | |||
3452 | ||||
3453 | if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { | |||
3454 | SDLoc DL(Op); | |||
3455 | SDValue Op0 = Op.getOperand(0); | |||
3456 | SDValue Op1 = Op.getOperand(1); | |||
3457 | ||||
3458 | auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) { | |||
3459 | if (Reverse.getOpcode() == RISCVISD::GREVI && Reverse.getOperand(0) == X && | |||
3460 | isPowerOf2_32(Reverse.getConstantOperandVal(1))) | |||
3461 | return DAG.getNode(RISCVISD::GORCI, DL, VT, X, Reverse.getOperand(1)); | |||
3462 | // We can also form GORCI from ROTL/ROTR by half the bitwidth. | |||
3463 | if ((Reverse.getOpcode() == ISD::ROTL || | |||
3464 | Reverse.getOpcode() == ISD::ROTR) && | |||
3465 | Reverse.getOperand(0) == X && | |||
3466 | isa<ConstantSDNode>(Reverse.getOperand(1))) { | |||
3467 | uint64_t RotAmt = Reverse.getConstantOperandVal(1); | |||
3468 | if (RotAmt == (VT.getSizeInBits() / 2)) | |||
3469 | return DAG.getNode( | |||
3470 | RISCVISD::GORCI, DL, VT, X, | |||
3471 | DAG.getTargetConstant(RotAmt, DL, Subtarget.getXLenVT())); | |||
3472 | } | |||
3473 | return SDValue(); | |||
3474 | }; | |||
3475 | ||||
3476 | // Check for either commutable permutation of (or (GREVI x, shamt), x) | |||
3477 | if (SDValue V = MatchOROfReverse(Op0, Op1)) | |||
3478 | return V; | |||
3479 | if (SDValue V = MatchOROfReverse(Op1, Op0)) | |||
3480 | return V; | |||
3481 | ||||
3482 | // OR is commutable so canonicalize its OR operand to the left | |||
3483 | if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR) | |||
3484 | std::swap(Op0, Op1); | |||
3485 | if (Op0.getOpcode() != ISD::OR) | |||
3486 | return SDValue(); | |||
3487 | SDValue OrOp0 = Op0.getOperand(0); | |||
3488 | SDValue OrOp1 = Op0.getOperand(1); | |||
3489 | auto LHS = matchGREVIPat(OrOp0); | |||
3490 | // OR is commutable so swap the operands and try again: x might have been | |||
3491 | // on the left | |||
3492 | if (!LHS) { | |||
3493 | std::swap(OrOp0, OrOp1); | |||
3494 | LHS = matchGREVIPat(OrOp0); | |||
3495 | } | |||
3496 | auto RHS = matchGREVIPat(Op1); | |||
3497 | if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) { | |||
3498 | return DAG.getNode( | |||
3499 | RISCVISD::GORCI, DL, VT, LHS->Op, | |||
3500 | DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT())); | |||
3501 | } | |||
3502 | } | |||
3503 | return SDValue(); | |||
3504 | } | |||
3505 | ||||
3506 | // Matches any of the following bit-manipulation patterns: | |||
3507 | // (and (shl x, 1), (0x22222222 << 1)) | |||
3508 | // (and (srl x, 1), 0x22222222) | |||
3509 | // (shl (and x, 0x22222222), 1) | |||
3510 | // (srl (and x, (0x22222222 << 1)), 1) | |||
3511 | // where the shift amount and mask may vary thus: | |||
3512 | // [1] = 0x22222222 / 0x44444444 | |||
3513 | // [2] = 0x0C0C0C0C / 0x3C3C3C3C | |||
3514 | // [4] = 0x00F000F0 / 0x0F000F00 | |||
3515 | // [8] = 0x0000FF00 / 0x00FF0000 | |||
3516 | // [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64) | |||
3517 | static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) { | |||
3518 | // These are the unshifted masks which we use to match bit-manipulation | |||
3519 | // patterns. They may be shifted left in certain circumstances. | |||
3520 | static const uint64_t BitmanipMasks[] = { | |||
3521 | 0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL, | |||
3522 | 0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL}; | |||
3523 | ||||
3524 | return matchRISCVBitmanipPat(Op, BitmanipMasks); | |||
3525 | } | |||
3526 | ||||
3527 | // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x) | |||
3528 | static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG, | |||
3529 | const RISCVSubtarget &Subtarget) { | |||
3530 | assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson")((Subtarget.hasStdExtZbp() && "Expected Zbp extenson" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasStdExtZbp() && \"Expected Zbp extenson\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3530, __PRETTY_FUNCTION__)); | |||
3531 | EVT VT = Op.getValueType(); | |||
3532 | ||||
3533 | if (VT != MVT::i32 && VT != Subtarget.getXLenVT()) | |||
3534 | return SDValue(); | |||
3535 | ||||
3536 | SDValue Op0 = Op.getOperand(0); | |||
3537 | SDValue Op1 = Op.getOperand(1); | |||
3538 | ||||
3539 | // Or is commutable so canonicalize the second OR to the LHS. | |||
3540 | if (Op0.getOpcode() != ISD::OR) | |||
3541 | std::swap(Op0, Op1); | |||
3542 | if (Op0.getOpcode() != ISD::OR) | |||
3543 | return SDValue(); | |||
3544 | ||||
3545 | // We found an inner OR, so our operands are the operands of the inner OR | |||
3546 | // and the other operand of the outer OR. | |||
3547 | SDValue A = Op0.getOperand(0); | |||
3548 | SDValue B = Op0.getOperand(1); | |||
3549 | SDValue C = Op1; | |||
3550 | ||||
3551 | auto Match1 = matchSHFLPat(A); | |||
3552 | auto Match2 = matchSHFLPat(B); | |||
3553 | ||||
3554 | // If neither matched, we failed. | |||
3555 | if (!Match1 && !Match2) | |||
3556 | return SDValue(); | |||
3557 | ||||
3558 | // We had at least one match. if one failed, try the remaining C operand. | |||
3559 | if (!Match1) { | |||
3560 | std::swap(A, C); | |||
3561 | Match1 = matchSHFLPat(A); | |||
3562 | if (!Match1) | |||
3563 | return SDValue(); | |||
3564 | } else if (!Match2) { | |||
3565 | std::swap(B, C); | |||
3566 | Match2 = matchSHFLPat(B); | |||
3567 | if (!Match2) | |||
3568 | return SDValue(); | |||
3569 | } | |||
3570 | assert(Match1 && Match2)((Match1 && Match2) ? static_cast<void> (0) : __assert_fail ("Match1 && Match2", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3570, __PRETTY_FUNCTION__)); | |||
3571 | ||||
3572 | // Make sure our matches pair up. | |||
3573 | if (!Match1->formsPairWith(*Match2)) | |||
3574 | return SDValue(); | |||
3575 | ||||
3576 | // All the remains is to make sure C is an AND with the same input, that masks | |||
3577 | // out the bits that are being shuffled. | |||
3578 | if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) || | |||
3579 | C.getOperand(0) != Match1->Op) | |||
3580 | return SDValue(); | |||
3581 | ||||
3582 | uint64_t Mask = C.getConstantOperandVal(1); | |||
3583 | ||||
3584 | static const uint64_t BitmanipMasks[] = { | |||
3585 | 0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL, | |||
3586 | 0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL, | |||
3587 | }; | |||
3588 | ||||
3589 | unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32; | |||
3590 | unsigned MaskIdx = Log2_32(Match1->ShAmt); | |||
3591 | uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width); | |||
3592 | ||||
3593 | if (Mask != ExpMask) | |||
3594 | return SDValue(); | |||
3595 | ||||
3596 | SDLoc DL(Op); | |||
3597 | return DAG.getNode( | |||
3598 | RISCVISD::SHFLI, DL, VT, Match1->Op, | |||
3599 | DAG.getTargetConstant(Match1->ShAmt, DL, Subtarget.getXLenVT())); | |||
3600 | } | |||
3601 | ||||
3602 | // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is | |||
3603 | // non-zero, and to x when it is. Any repeated GREVI stage undoes itself. | |||
3604 | // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does | |||
3605 | // not undo itself, but they are redundant. | |||
3606 | static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) { | |||
3607 | unsigned ShAmt1 = N->getConstantOperandVal(1); | |||
3608 | SDValue Src = N->getOperand(0); | |||
3609 | ||||
3610 | if (Src.getOpcode() != N->getOpcode()) | |||
3611 | return SDValue(); | |||
3612 | ||||
3613 | unsigned ShAmt2 = Src.getConstantOperandVal(1); | |||
3614 | Src = Src.getOperand(0); | |||
3615 | ||||
3616 | unsigned CombinedShAmt; | |||
3617 | if (N->getOpcode() == RISCVISD::GORCI || N->getOpcode() == RISCVISD::GORCIW) | |||
3618 | CombinedShAmt = ShAmt1 | ShAmt2; | |||
3619 | else | |||
3620 | CombinedShAmt = ShAmt1 ^ ShAmt2; | |||
3621 | ||||
3622 | if (CombinedShAmt == 0) | |||
3623 | return Src; | |||
3624 | ||||
3625 | SDLoc DL(N); | |||
3626 | return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), Src, | |||
3627 | DAG.getTargetConstant(CombinedShAmt, DL, | |||
3628 | N->getOperand(1).getValueType())); | |||
3629 | } | |||
3630 | ||||
3631 | SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, | |||
3632 | DAGCombinerInfo &DCI) const { | |||
3633 | SelectionDAG &DAG = DCI.DAG; | |||
3634 | ||||
3635 | switch (N->getOpcode()) { | |||
3636 | default: | |||
3637 | break; | |||
3638 | case RISCVISD::SplitF64: { | |||
3639 | SDValue Op0 = N->getOperand(0); | |||
3640 | // If the input to SplitF64 is just BuildPairF64 then the operation is | |||
3641 | // redundant. Instead, use BuildPairF64's operands directly. | |||
3642 | if (Op0->getOpcode() == RISCVISD::BuildPairF64) | |||
3643 | return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1)); | |||
3644 | ||||
3645 | SDLoc DL(N); | |||
3646 | ||||
3647 | // It's cheaper to materialise two 32-bit integers than to load a double | |||
3648 | // from the constant pool and transfer it to integer registers through the | |||
3649 | // stack. | |||
3650 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) { | |||
3651 | APInt V = C->getValueAPF().bitcastToAPInt(); | |||
3652 | SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32); | |||
3653 | SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32); | |||
3654 | return DCI.CombineTo(N, Lo, Hi); | |||
3655 | } | |||
3656 | ||||
3657 | // This is a target-specific version of a DAGCombine performed in | |||
3658 | // DAGCombiner::visitBITCAST. It performs the equivalent of: | |||
3659 | // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) | |||
3660 | // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) | |||
3661 | if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || | |||
3662 | !Op0.getNode()->hasOneUse()) | |||
3663 | break; | |||
3664 | SDValue NewSplitF64 = | |||
3665 | DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), | |||
3666 | Op0.getOperand(0)); | |||
3667 | SDValue Lo = NewSplitF64.getValue(0); | |||
3668 | SDValue Hi = NewSplitF64.getValue(1); | |||
3669 | APInt SignBit = APInt::getSignMask(32); | |||
3670 | if (Op0.getOpcode() == ISD::FNEG) { | |||
3671 | SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi, | |||
3672 | DAG.getConstant(SignBit, DL, MVT::i32)); | |||
3673 | return DCI.CombineTo(N, Lo, NewHi); | |||
3674 | } | |||
3675 | assert(Op0.getOpcode() == ISD::FABS)((Op0.getOpcode() == ISD::FABS) ? static_cast<void> (0) : __assert_fail ("Op0.getOpcode() == ISD::FABS", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3675, __PRETTY_FUNCTION__)); | |||
3676 | SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi, | |||
3677 | DAG.getConstant(~SignBit, DL, MVT::i32)); | |||
3678 | return DCI.CombineTo(N, Lo, NewHi); | |||
3679 | } | |||
3680 | case RISCVISD::SLLW: | |||
3681 | case RISCVISD::SRAW: | |||
3682 | case RISCVISD::SRLW: | |||
3683 | case RISCVISD::ROLW: | |||
3684 | case RISCVISD::RORW: { | |||
3685 | // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. | |||
3686 | SDValue LHS = N->getOperand(0); | |||
3687 | SDValue RHS = N->getOperand(1); | |||
3688 | APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32); | |||
3689 | APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5); | |||
3690 | if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) || | |||
3691 | SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) { | |||
3692 | if (N->getOpcode() != ISD::DELETED_NODE) | |||
3693 | DCI.AddToWorklist(N); | |||
3694 | return SDValue(N, 0); | |||
3695 | } | |||
3696 | break; | |||
3697 | } | |||
3698 | case RISCVISD::FSL: | |||
3699 | case RISCVISD::FSR: { | |||
3700 | // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read. | |||
3701 | SDValue ShAmt = N->getOperand(2); | |||
3702 | unsigned BitWidth = ShAmt.getValueSizeInBits(); | |||
3703 | assert(isPowerOf2_32(BitWidth) && "Unexpected bit width")((isPowerOf2_32(BitWidth) && "Unexpected bit width") ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(BitWidth) && \"Unexpected bit width\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3703, __PRETTY_FUNCTION__)); | |||
3704 | APInt ShAmtMask(BitWidth, (BitWidth * 2) - 1); | |||
3705 | if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) { | |||
3706 | if (N->getOpcode() != ISD::DELETED_NODE) | |||
3707 | DCI.AddToWorklist(N); | |||
3708 | return SDValue(N, 0); | |||
3709 | } | |||
3710 | break; | |||
3711 | } | |||
3712 | case RISCVISD::FSLW: | |||
3713 | case RISCVISD::FSRW: { | |||
3714 | // Only the lower 32 bits of Values and lower 6 bits of shift amount are | |||
3715 | // read. | |||
3716 | SDValue Op0 = N->getOperand(0); | |||
3717 | SDValue Op1 = N->getOperand(1); | |||
3718 | SDValue ShAmt = N->getOperand(2); | |||
3719 | APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32); | |||
3720 | APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6); | |||
3721 | if (SimplifyDemandedBits(Op0, OpMask, DCI) || | |||
3722 | SimplifyDemandedBits(Op1, OpMask, DCI) || | |||
3723 | SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) { | |||
3724 | if (N->getOpcode() != ISD::DELETED_NODE) | |||
3725 | DCI.AddToWorklist(N); | |||
3726 | return SDValue(N, 0); | |||
3727 | } | |||
3728 | break; | |||
3729 | } | |||
3730 | case RISCVISD::GREVIW: | |||
3731 | case RISCVISD::GORCIW: { | |||
3732 | // Only the lower 32 bits of the first operand are read | |||
3733 | SDValue Op0 = N->getOperand(0); | |||
3734 | APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32); | |||
3735 | if (SimplifyDemandedBits(Op0, Mask, DCI)) { | |||
3736 | if (N->getOpcode() != ISD::DELETED_NODE) | |||
3737 | DCI.AddToWorklist(N); | |||
3738 | return SDValue(N, 0); | |||
3739 | } | |||
3740 | ||||
3741 | return combineGREVI_GORCI(N, DCI.DAG); | |||
3742 | } | |||
3743 | case RISCVISD::FMV_X_ANYEXTW_RV64: { | |||
3744 | SDLoc DL(N); | |||
3745 | SDValue Op0 = N->getOperand(0); | |||
3746 | // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the | |||
3747 | // conversion is unnecessary and can be replaced with an ANY_EXTEND | |||
3748 | // of the FMV_W_X_RV64 operand. | |||
3749 | if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) { | |||
3750 | assert(Op0.getOperand(0).getValueType() == MVT::i64 &&((Op0.getOperand(0).getValueType() == MVT::i64 && "Unexpected value type!" ) ? static_cast<void> (0) : __assert_fail ("Op0.getOperand(0).getValueType() == MVT::i64 && \"Unexpected value type!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3751, __PRETTY_FUNCTION__)) | |||
3751 | "Unexpected value type!")((Op0.getOperand(0).getValueType() == MVT::i64 && "Unexpected value type!" ) ? static_cast<void> (0) : __assert_fail ("Op0.getOperand(0).getValueType() == MVT::i64 && \"Unexpected value type!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3751, __PRETTY_FUNCTION__)); | |||
3752 | return Op0.getOperand(0); | |||
3753 | } | |||
3754 | ||||
3755 | // This is a target-specific version of a DAGCombine performed in | |||
3756 | // DAGCombiner::visitBITCAST. It performs the equivalent of: | |||
3757 | // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) | |||
3758 | // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) | |||
3759 | if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || | |||
3760 | !Op0.getNode()->hasOneUse()) | |||
3761 | break; | |||
3762 | SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, | |||
3763 | Op0.getOperand(0)); | |||
3764 | APInt SignBit = APInt::getSignMask(32).sext(64); | |||
3765 | if (Op0.getOpcode() == ISD::FNEG) | |||
3766 | return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV, | |||
3767 | DAG.getConstant(SignBit, DL, MVT::i64)); | |||
3768 | ||||
3769 | assert(Op0.getOpcode() == ISD::FABS)((Op0.getOpcode() == ISD::FABS) ? static_cast<void> (0) : __assert_fail ("Op0.getOpcode() == ISD::FABS", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3769, __PRETTY_FUNCTION__)); | |||
3770 | return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV, | |||
3771 | DAG.getConstant(~SignBit, DL, MVT::i64)); | |||
3772 | } | |||
3773 | case RISCVISD::GREVI: | |||
3774 | case RISCVISD::GORCI: | |||
3775 | return combineGREVI_GORCI(N, DCI.DAG); | |||
3776 | case ISD::OR: | |||
3777 | if (auto GREV = combineORToGREV(SDValue(N, 0), DCI.DAG, Subtarget)) | |||
3778 | return GREV; | |||
3779 | if (auto GORC = combineORToGORC(SDValue(N, 0), DCI.DAG, Subtarget)) | |||
3780 | return GORC; | |||
3781 | if (auto SHFL = combineORToSHFL(SDValue(N, 0), DCI.DAG, Subtarget)) | |||
3782 | return SHFL; | |||
3783 | break; | |||
3784 | case RISCVISD::SELECT_CC: { | |||
3785 | // Transform | |||
3786 | SDValue LHS = N->getOperand(0); | |||
3787 | SDValue RHS = N->getOperand(1); | |||
3788 | auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2)); | |||
3789 | if (!ISD::isIntEqualitySetCC(CCVal)) | |||
3790 | break; | |||
3791 | ||||
3792 | // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) -> | |||
3793 | // (select_cc X, Y, lt, trueV, falseV) | |||
3794 | // Sometimes the setcc is introduced after select_cc has been formed. | |||
3795 | if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) && | |||
3796 | LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) { | |||
3797 | // If we're looking for eq 0 instead of ne 0, we need to invert the | |||
3798 | // condition. | |||
3799 | bool Invert = CCVal == ISD::SETEQ; | |||
3800 | CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); | |||
3801 | if (Invert) | |||
3802 | CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); | |||
3803 | ||||
3804 | RHS = LHS.getOperand(1); | |||
3805 | LHS = LHS.getOperand(0); | |||
3806 | normaliseSetCC(LHS, RHS, CCVal); | |||
3807 | ||||
3808 | SDLoc DL(N); | |||
3809 | SDValue TargetCC = DAG.getConstant(CCVal, DL, Subtarget.getXLenVT()); | |||
3810 | return DAG.getNode( | |||
3811 | RISCVISD::SELECT_CC, DL, N->getValueType(0), | |||
3812 | {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)}); | |||
3813 | } | |||
3814 | ||||
3815 | // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) -> | |||
3816 | // (select_cc X, Y, eq/ne, trueV, falseV) | |||
3817 | if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS)) | |||
3818 | return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0), | |||
3819 | {LHS.getOperand(0), LHS.getOperand(1), | |||
3820 | N->getOperand(2), N->getOperand(3), | |||
3821 | N->getOperand(4)}); | |||
3822 | // (select_cc X, 1, setne, trueV, falseV) -> | |||
3823 | // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1. | |||
3824 | // This can occur when legalizing some floating point comparisons. | |||
3825 | APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); | |||
3826 | if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) { | |||
3827 | SDLoc DL(N); | |||
3828 | CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); | |||
3829 | SDValue TargetCC = DAG.getConstant(CCVal, DL, Subtarget.getXLenVT()); | |||
3830 | RHS = DAG.getConstant(0, DL, LHS.getValueType()); | |||
3831 | return DAG.getNode( | |||
3832 | RISCVISD::SELECT_CC, DL, N->getValueType(0), | |||
3833 | {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)}); | |||
3834 | } | |||
3835 | ||||
3836 | break; | |||
3837 | } | |||
3838 | case ISD::SETCC: { | |||
3839 | // (setcc X, 1, setne) -> (setcc X, 0, seteq) if we can prove X is 0/1. | |||
3840 | // Comparing with 0 may allow us to fold into bnez/beqz. | |||
3841 | SDValue LHS = N->getOperand(0); | |||
3842 | SDValue RHS = N->getOperand(1); | |||
3843 | if (LHS.getValueType().isScalableVector()) | |||
3844 | break; | |||
3845 | auto CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); | |||
3846 | APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); | |||
3847 | if (isOneConstant(RHS) && ISD::isIntEqualitySetCC(CC) && | |||
3848 | DAG.MaskedValueIsZero(LHS, Mask)) { | |||
3849 | SDLoc DL(N); | |||
3850 | SDValue Zero = DAG.getConstant(0, DL, LHS.getValueType()); | |||
3851 | CC = ISD::getSetCCInverse(CC, LHS.getValueType()); | |||
3852 | return DAG.getSetCC(DL, N->getValueType(0), LHS, Zero, CC); | |||
3853 | } | |||
3854 | break; | |||
3855 | } | |||
3856 | case ISD::FCOPYSIGN: { | |||
3857 | EVT VT = N->getValueType(0); | |||
3858 | if (!VT.isVector()) | |||
3859 | break; | |||
3860 | // There is a form of VFSGNJ which injects the negated sign of its second | |||
3861 | // operand. Try and bubble any FNEG up after the extend/round to produce | |||
3862 | // this optimized pattern. Avoid modifying cases where FP_ROUND and | |||
3863 | // TRUNC=1. | |||
3864 | SDValue In2 = N->getOperand(1); | |||
3865 | // Avoid cases where the extend/round has multiple uses, as duplicating | |||
3866 | // those is typically more expensive than removing a fneg. | |||
3867 | if (!In2.hasOneUse()) | |||
3868 | break; | |||
3869 | if (In2.getOpcode() != ISD::FP_EXTEND && | |||
3870 | (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0)) | |||
3871 | break; | |||
3872 | In2 = In2.getOperand(0); | |||
3873 | if (In2.getOpcode() != ISD::FNEG) | |||
3874 | break; | |||
3875 | SDLoc DL(N); | |||
3876 | SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT); | |||
3877 | return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0), | |||
3878 | DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound)); | |||
3879 | } | |||
3880 | } | |||
3881 | ||||
3882 | return SDValue(); | |||
3883 | } | |||
3884 | ||||
3885 | bool RISCVTargetLowering::isDesirableToCommuteWithShift( | |||
3886 | const SDNode *N, CombineLevel Level) const { | |||
3887 | // The following folds are only desirable if `(OP _, c1 << c2)` can be | |||
3888 | // materialised in fewer instructions than `(OP _, c1)`: | |||
3889 | // | |||
3890 | // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) | |||
3891 | // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2) | |||
3892 | SDValue N0 = N->getOperand(0); | |||
3893 | EVT Ty = N0.getValueType(); | |||
3894 | if (Ty.isScalarInteger() && | |||
3895 | (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) { | |||
3896 | auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1)); | |||
3897 | auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); | |||
3898 | if (C1 && C2) { | |||
3899 | const APInt &C1Int = C1->getAPIntValue(); | |||
3900 | APInt ShiftedC1Int = C1Int << C2->getAPIntValue(); | |||
3901 | ||||
3902 | // We can materialise `c1 << c2` into an add immediate, so it's "free", | |||
3903 | // and the combine should happen, to potentially allow further combines | |||
3904 | // later. | |||
3905 | if (ShiftedC1Int.getMinSignedBits() <= 64 && | |||
3906 | isLegalAddImmediate(ShiftedC1Int.getSExtValue())) | |||
3907 | return true; | |||
3908 | ||||
3909 | // We can materialise `c1` in an add immediate, so it's "free", and the | |||
3910 | // combine should be prevented. | |||
3911 | if (C1Int.getMinSignedBits() <= 64 && | |||
3912 | isLegalAddImmediate(C1Int.getSExtValue())) | |||
3913 | return false; | |||
3914 | ||||
3915 | // Neither constant will fit into an immediate, so find materialisation | |||
3916 | // costs. | |||
3917 | int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(), | |||
3918 | Subtarget.is64Bit()); | |||
3919 | int ShiftedC1Cost = RISCVMatInt::getIntMatCost( | |||
3920 | ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit()); | |||
3921 | ||||
3922 | // Materialising `c1` is cheaper than materialising `c1 << c2`, so the | |||
3923 | // combine should be prevented. | |||
3924 | if (C1Cost < ShiftedC1Cost) | |||
3925 | return false; | |||
3926 | } | |||
3927 | } | |||
3928 | return true; | |||
3929 | } | |||
3930 | ||||
3931 | bool RISCVTargetLowering::targetShrinkDemandedConstant( | |||
3932 | SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, | |||
3933 | TargetLoweringOpt &TLO) const { | |||
3934 | // Delay this optimization as late as possible. | |||
3935 | if (!TLO.LegalOps) | |||
3936 | return false; | |||
3937 | ||||
3938 | EVT VT = Op.getValueType(); | |||
3939 | if (VT.isVector()) | |||
3940 | return false; | |||
3941 | ||||
3942 | // Only handle AND for now. | |||
3943 | if (Op.getOpcode() != ISD::AND) | |||
3944 | return false; | |||
3945 | ||||
3946 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); | |||
3947 | if (!C) | |||
3948 | return false; | |||
3949 | ||||
3950 | const APInt &Mask = C->getAPIntValue(); | |||
3951 | ||||
3952 | // Clear all non-demanded bits initially. | |||
3953 | APInt ShrunkMask = Mask & DemandedBits; | |||
3954 | ||||
3955 | // If the shrunk mask fits in sign extended 12 bits, let the target | |||
3956 | // independent code apply it. | |||
3957 | if (ShrunkMask.isSignedIntN(12)) | |||
3958 | return false; | |||
3959 | ||||
3960 | // Try to make a smaller immediate by setting undemanded bits. | |||
3961 | ||||
3962 | // We need to be able to make a negative number through a combination of mask | |||
3963 | // and undemanded bits. | |||
3964 | APInt ExpandedMask = Mask | ~DemandedBits; | |||
3965 | if (!ExpandedMask.isNegative()) | |||
3966 | return false; | |||
3967 | ||||
3968 | // What is the fewest number of bits we need to represent the negative number. | |||
3969 | unsigned MinSignedBits = ExpandedMask.getMinSignedBits(); | |||
3970 | ||||
3971 | // Try to make a 12 bit negative immediate. If that fails try to make a 32 | |||
3972 | // bit negative immediate unless the shrunk immediate already fits in 32 bits. | |||
3973 | APInt NewMask = ShrunkMask; | |||
3974 | if (MinSignedBits <= 12) | |||
3975 | NewMask.setBitsFrom(11); | |||
3976 | else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32)) | |||
3977 | NewMask.setBitsFrom(31); | |||
3978 | else | |||
3979 | return false; | |||
3980 | ||||
3981 | // Sanity check that our new mask is a subset of the demanded mask. | |||
3982 | assert(NewMask.isSubsetOf(ExpandedMask))((NewMask.isSubsetOf(ExpandedMask)) ? static_cast<void> (0) : __assert_fail ("NewMask.isSubsetOf(ExpandedMask)", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3982, __PRETTY_FUNCTION__)); | |||
3983 | ||||
3984 | // If we aren't changing the mask, just return true to keep it and prevent | |||
3985 | // the caller from optimizing. | |||
3986 | if (NewMask == Mask) | |||
3987 | return true; | |||
3988 | ||||
3989 | // Replace the constant with the new mask. | |||
3990 | SDLoc DL(Op); | |||
3991 | SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT); | |||
3992 | SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC); | |||
3993 | return TLO.CombineTo(Op, NewOp); | |||
3994 | } | |||
3995 | ||||
3996 | void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, | |||
3997 | KnownBits &Known, | |||
3998 | const APInt &DemandedElts, | |||
3999 | const SelectionDAG &DAG, | |||
4000 | unsigned Depth) const { | |||
4001 | unsigned BitWidth = Known.getBitWidth(); | |||
4002 | unsigned Opc = Op.getOpcode(); | |||
4003 | assert((Opc >= ISD::BUILTIN_OP_END ||(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID ) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? static_cast<void> (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4008, __PRETTY_FUNCTION__)) | |||
4004 | Opc == ISD::INTRINSIC_WO_CHAIN ||(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID ) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? static_cast<void> (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4008, __PRETTY_FUNCTION__)) | |||
4005 | Opc == ISD::INTRINSIC_W_CHAIN ||(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID ) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? static_cast<void> (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4008, __PRETTY_FUNCTION__)) | |||
4006 | Opc == ISD::INTRINSIC_VOID) &&(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID ) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? static_cast<void> (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4008, __PRETTY_FUNCTION__)) | |||
4007 | "Should use MaskedValueIsZero if you don't know whether Op"(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID ) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? static_cast<void> (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4008, __PRETTY_FUNCTION__)) | |||
4008 | " is a target node!")(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID ) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? static_cast<void> (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4008, __PRETTY_FUNCTION__)); | |||
4009 | ||||
4010 | Known.resetAll(); | |||
4011 | switch (Opc) { | |||
4012 | default: break; | |||
4013 | case RISCVISD::REMUW: { | |||
4014 | KnownBits Known2; | |||
4015 | Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | |||
4016 | Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | |||
4017 | // We only care about the lower 32 bits. | |||
4018 | Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32)); | |||
4019 | // Restore the original width by sign extending. | |||
4020 | Known = Known.sext(BitWidth); | |||
4021 | break; | |||
4022 | } | |||
4023 | case RISCVISD::DIVUW: { | |||
4024 | KnownBits Known2; | |||
4025 | Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | |||
4026 | Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | |||
4027 | // We only care about the lower 32 bits. | |||
4028 | Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32)); | |||
4029 | // Restore the original width by sign extending. | |||
4030 | Known = Known.sext(BitWidth); | |||
4031 | break; | |||
4032 | } | |||
4033 | case RISCVISD::READ_VLENB: | |||
4034 | // We assume VLENB is at least 8 bytes. | |||
4035 | // FIXME: The 1.0 draft spec defines minimum VLEN as 128 bits. | |||
4036 | Known.Zero.setLowBits(3); | |||
4037 | break; | |||
4038 | } | |||
4039 | } | |||
4040 | ||||
4041 | unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode( | |||
4042 | SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, | |||
4043 | unsigned Depth) const { | |||
4044 | switch (Op.getOpcode()) { | |||
4045 | default: | |||
4046 | break; | |||
4047 | case RISCVISD::SLLW: | |||
4048 | case RISCVISD::SRAW: | |||
4049 | case RISCVISD::SRLW: | |||
4050 | case RISCVISD::DIVW: | |||
4051 | case RISCVISD::DIVUW: | |||
4052 | case RISCVISD::REMUW: | |||
4053 | case RISCVISD::ROLW: | |||
4054 | case RISCVISD::RORW: | |||
4055 | case RISCVISD::GREVIW: | |||
4056 | case RISCVISD::GORCIW: | |||
4057 | case RISCVISD::FSLW: | |||
4058 | case RISCVISD::FSRW: | |||
4059 | // TODO: As the result is sign-extended, this is conservatively correct. A | |||
4060 | // more precise answer could be calculated for SRAW depending on known | |||
4061 | // bits in the shift amount. | |||
4062 | return 33; | |||
4063 | case RISCVISD::SHFLI: { | |||
4064 | // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word | |||
4065 | // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but | |||
4066 | // will stay within the upper 32 bits. If there were more than 32 sign bits | |||
4067 | // before there will be at least 33 sign bits after. | |||
4068 | if (Op.getValueType() == MVT::i64 && | |||
4069 | (Op.getConstantOperandVal(1) & 0x10) == 0) { | |||
4070 | unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1); | |||
4071 | if (Tmp > 32) | |||
4072 | return 33; | |||
4073 | } | |||
4074 | break; | |||
4075 | } | |||
4076 | case RISCVISD::VMV_X_S: | |||
4077 | // The number of sign bits of the scalar result is computed by obtaining the | |||
4078 | // element type of the input vector operand, subtracting its width from the | |||
4079 | // XLEN, and then adding one (sign bit within the element type). If the | |||
4080 | // element type is wider than XLen, the least-significant XLEN bits are | |||
4081 | // taken. | |||
4082 | if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen()) | |||
4083 | return 1; | |||
4084 | return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1; | |||
4085 | } | |||
4086 | ||||
4087 | return 1; | |||
4088 | } | |||
4089 | ||||
4090 | static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI, | |||
4091 | MachineBasicBlock *BB) { | |||
4092 | assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction")((MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction" ) ? static_cast<void> (0) : __assert_fail ("MI.getOpcode() == RISCV::ReadCycleWide && \"Unexpected instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4092, __PRETTY_FUNCTION__)); | |||
4093 | ||||
4094 | // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves. | |||
4095 | // Should the count have wrapped while it was being read, we need to try | |||
4096 | // again. | |||
4097 | // ... | |||
4098 | // read: | |||
4099 | // rdcycleh x3 # load high word of cycle | |||
4100 | // rdcycle x2 # load low word of cycle | |||
4101 | // rdcycleh x4 # load high word of cycle | |||
4102 | // bne x3, x4, read # check if high word reads match, otherwise try again | |||
4103 | // ... | |||
4104 | ||||
4105 | MachineFunction &MF = *BB->getParent(); | |||
4106 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); | |||
4107 | MachineFunction::iterator It = ++BB->getIterator(); | |||
4108 | ||||
4109 | MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB); | |||
4110 | MF.insert(It, LoopMBB); | |||
4111 | ||||
4112 | MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB); | |||
4113 | MF.insert(It, DoneMBB); | |||
4114 | ||||
4115 | // Transfer the remainder of BB and its successor edges to DoneMBB. | |||
4116 | DoneMBB->splice(DoneMBB->begin(), BB, | |||
4117 | std::next(MachineBasicBlock::iterator(MI)), BB->end()); | |||
4118 | DoneMBB->transferSuccessorsAndUpdatePHIs(BB); | |||
4119 | ||||
4120 | BB->addSuccessor(LoopMBB); | |||
4121 | ||||
4122 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); | |||
4123 | Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); | |||
4124 | Register LoReg = MI.getOperand(0).getReg(); | |||
4125 | Register HiReg = MI.getOperand(1).getReg(); | |||
4126 | DebugLoc DL = MI.getDebugLoc(); | |||
4127 | ||||
4128 | const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); | |||
4129 | BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg) | |||
4130 | .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) | |||
4131 | .addReg(RISCV::X0); | |||
4132 | BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg) | |||
4133 | .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding) | |||
4134 | .addReg(RISCV::X0); | |||
4135 | BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg) | |||
4136 | .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) | |||
4137 | .addReg(RISCV::X0); | |||
4138 | ||||
4139 | BuildMI(LoopMBB, DL, TII->get(RISCV::BNE)) | |||
4140 | .addReg(HiReg) | |||
4141 | .addReg(ReadAgainReg) | |||
4142 | .addMBB(LoopMBB); | |||
4143 | ||||
4144 | LoopMBB->addSuccessor(LoopMBB); | |||
4145 | LoopMBB->addSuccessor(DoneMBB); | |||
4146 | ||||
4147 | MI.eraseFromParent(); | |||
4148 | ||||
4149 | return DoneMBB; | |||
4150 | } | |||
4151 | ||||
4152 | static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI, | |||
4153 | MachineBasicBlock *BB) { | |||
4154 | assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction")((MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction" ) ? static_cast<void> (0) : __assert_fail ("MI.getOpcode() == RISCV::SplitF64Pseudo && \"Unexpected instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4154, __PRETTY_FUNCTION__)); | |||
4155 | ||||
4156 | MachineFunction &MF = *BB->getParent(); | |||
4157 | DebugLoc DL = MI.getDebugLoc(); | |||
4158 | const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); | |||
4159 | const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); | |||
4160 | Register LoReg = MI.getOperand(0).getReg(); | |||
4161 | Register HiReg = MI.getOperand(1).getReg(); | |||
4162 | Register SrcReg = MI.getOperand(2).getReg(); | |||
4163 | const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass; | |||
4164 | int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); | |||
4165 | ||||
4166 | TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC, | |||
4167 | RI); | |||
4168 | MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); | |||
4169 | MachineMemOperand *MMOLo = | |||
4170 | MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8)); | |||
4171 | MachineMemOperand *MMOHi = MF.getMachineMemOperand( | |||
4172 | MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8)); | |||
4173 | BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg) | |||
4174 | .addFrameIndex(FI) | |||
4175 | .addImm(0) | |||
4176 | .addMemOperand(MMOLo); | |||
4177 | BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg) | |||
4178 | .addFrameIndex(FI) | |||
4179 | .addImm(4) | |||
4180 | .addMemOperand(MMOHi); | |||
4181 | MI.eraseFromParent(); // The pseudo instruction is gone now. | |||
4182 | return BB; | |||
4183 | } | |||
4184 | ||||
4185 | static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI, | |||
4186 | MachineBasicBlock *BB) { | |||
4187 | assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&((MI.getOpcode() == RISCV::BuildPairF64Pseudo && "Unexpected instruction" ) ? static_cast<void> (0) : __assert_fail ("MI.getOpcode() == RISCV::BuildPairF64Pseudo && \"Unexpected instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4188, __PRETTY_FUNCTION__)) | |||
4188 | "Unexpected instruction")((MI.getOpcode() == RISCV::BuildPairF64Pseudo && "Unexpected instruction" ) ? static_cast<void> (0) : __assert_fail ("MI.getOpcode() == RISCV::BuildPairF64Pseudo && \"Unexpected instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4188, __PRETTY_FUNCTION__)); | |||
4189 | ||||
4190 | MachineFunction &MF = *BB->getParent(); | |||
4191 | DebugLoc DL = MI.getDebugLoc(); | |||
4192 | const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); | |||
4193 | const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); | |||
4194 | Register DstReg = MI.getOperand(0).getReg(); | |||
4195 | Register LoReg = MI.getOperand(1).getReg(); | |||
4196 | Register HiReg = MI.getOperand(2).getReg(); | |||
4197 | const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass; | |||
4198 | int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); | |||
4199 | ||||
4200 | MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); | |||
4201 | MachineMemOperand *MMOLo = | |||
4202 | MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8)); | |||
4203 | MachineMemOperand *MMOHi = MF.getMachineMemOperand( | |||
4204 | MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8)); | |||
4205 | BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) | |||
4206 | .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill())) | |||
4207 | .addFrameIndex(FI) | |||
4208 | .addImm(0) | |||
4209 | .addMemOperand(MMOLo); | |||
4210 | BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) | |||
4211 | .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill())) | |||
4212 | .addFrameIndex(FI) | |||
4213 | .addImm(4) | |||
4214 | .addMemOperand(MMOHi); | |||
4215 | TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI); | |||
4216 | MI.eraseFromParent(); // The pseudo instruction is gone now. | |||
4217 | return BB; | |||
4218 | } | |||
4219 | ||||
4220 | static bool isSelectPseudo(MachineInstr &MI) { | |||
4221 | switch (MI.getOpcode()) { | |||
4222 | default: | |||
4223 | return false; | |||
4224 | case RISCV::Select_GPR_Using_CC_GPR: | |||
4225 | case RISCV::Select_FPR16_Using_CC_GPR: | |||
4226 | case RISCV::Select_FPR32_Using_CC_GPR: | |||
4227 | case RISCV::Select_FPR64_Using_CC_GPR: | |||
4228 | return true; | |||
4229 | } | |||
4230 | } | |||
4231 | ||||
4232 | static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, | |||
4233 | MachineBasicBlock *BB) { | |||
4234 | // To "insert" Select_* instructions, we actually have to insert the triangle | |||
4235 | // control-flow pattern. The incoming instructions know the destination vreg | |||
4236 | // to set, the condition code register to branch on, the true/false values to | |||
4237 | // select between, and the condcode to use to select the appropriate branch. | |||
4238 | // | |||
4239 | // We produce the following control flow: | |||
4240 | // HeadMBB | |||
4241 | // | \ | |||
4242 | // | IfFalseMBB | |||
4243 | // | / | |||
4244 | // TailMBB | |||
4245 | // | |||
4246 | // When we find a sequence of selects we attempt to optimize their emission | |||
4247 | // by sharing the control flow. Currently we only handle cases where we have | |||
4248 | // multiple selects with the exact same condition (same LHS, RHS and CC). | |||
4249 | // The selects may be interleaved with other instructions if the other | |||
4250 | // instructions meet some requirements we deem safe: | |||
4251 | // - They are debug instructions. Otherwise, | |||
4252 | // - They do not have side-effects, do not access memory and their inputs do | |||
4253 | // not depend on the results of the select pseudo-instructions. | |||
4254 | // The TrueV/FalseV operands of the selects cannot depend on the result of | |||
4255 | // previous selects in the sequence. | |||
4256 | // These conditions could be further relaxed. See the X86 target for a | |||
4257 | // related approach and more information. | |||
4258 | Register LHS = MI.getOperand(1).getReg(); | |||
4259 | Register RHS = MI.getOperand(2).getReg(); | |||
4260 | auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm()); | |||
4261 | ||||
4262 | SmallVector<MachineInstr *, 4> SelectDebugValues; | |||
4263 | SmallSet<Register, 4> SelectDests; | |||
4264 | SelectDests.insert(MI.getOperand(0).getReg()); | |||
4265 | ||||
4266 | MachineInstr *LastSelectPseudo = &MI; | |||
4267 | ||||
4268 | for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI); | |||
4269 | SequenceMBBI != E; ++SequenceMBBI) { | |||
4270 | if (SequenceMBBI->isDebugInstr()) | |||
4271 | continue; | |||
4272 | else if (isSelectPseudo(*SequenceMBBI)) { | |||
4273 | if (SequenceMBBI->getOperand(1).getReg() != LHS || | |||
4274 | SequenceMBBI->getOperand(2).getReg() != RHS || | |||
4275 | SequenceMBBI->getOperand(3).getImm() != CC || | |||
4276 | SelectDests.count(SequenceMBBI->getOperand(4).getReg()) || | |||
4277 | SelectDests.count(SequenceMBBI->getOperand(5).getReg())) | |||
4278 | break; | |||
4279 | LastSelectPseudo = &*SequenceMBBI; | |||
4280 | SequenceMBBI->collectDebugValues(SelectDebugValues); | |||
4281 | SelectDests.insert(SequenceMBBI->getOperand(0).getReg()); | |||
4282 | } else { | |||
4283 | if (SequenceMBBI->hasUnmodeledSideEffects() || | |||
4284 | SequenceMBBI->mayLoadOrStore()) | |||
4285 | break; | |||
4286 | if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) { | |||
4287 | return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg()); | |||
4288 | })) | |||
4289 | break; | |||
4290 | } | |||
4291 | } | |||
4292 | ||||
4293 | const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); | |||
4294 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); | |||
4295 | DebugLoc DL = MI.getDebugLoc(); | |||
4296 | MachineFunction::iterator I = ++BB->getIterator(); | |||
4297 | ||||
4298 | MachineBasicBlock *HeadMBB = BB; | |||
4299 | MachineFunction *F = BB->getParent(); | |||
4300 | MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB); | |||
4301 | MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB); | |||
4302 | ||||
4303 | F->insert(I, IfFalseMBB); | |||
4304 | F->insert(I, TailMBB); | |||
4305 | ||||
4306 | // Transfer debug instructions associated with the selects to TailMBB. | |||
4307 | for (MachineInstr *DebugInstr : SelectDebugValues) { | |||
4308 | TailMBB->push_back(DebugInstr->removeFromParent()); | |||
4309 | } | |||
4310 | ||||
4311 | // Move all instructions after the sequence to TailMBB. | |||
4312 | TailMBB->splice(TailMBB->end(), HeadMBB, | |||
4313 | std::next(LastSelectPseudo->getIterator()), HeadMBB->end()); | |||
4314 | // Update machine-CFG edges by transferring all successors of the current | |||
4315 | // block to the new block which will contain the Phi nodes for the selects. | |||
4316 | TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB); | |||
4317 | // Set the successors for HeadMBB. | |||
4318 | HeadMBB->addSuccessor(IfFalseMBB); | |||
4319 | HeadMBB->addSuccessor(TailMBB); | |||
4320 | ||||
4321 | // Insert appropriate branch. | |||
4322 | unsigned Opcode = getBranchOpcodeForIntCondCode(CC); | |||
4323 | ||||
4324 | BuildMI(HeadMBB, DL, TII.get(Opcode)) | |||
4325 | .addReg(LHS) | |||
4326 | .addReg(RHS) | |||
4327 | .addMBB(TailMBB); | |||
4328 | ||||
4329 | // IfFalseMBB just falls through to TailMBB. | |||
4330 | IfFalseMBB->addSuccessor(TailMBB); | |||
4331 | ||||
4332 | // Create PHIs for all of the select pseudo-instructions. | |||
4333 | auto SelectMBBI = MI.getIterator(); | |||
4334 | auto SelectEnd = std::next(LastSelectPseudo->getIterator()); | |||
4335 | auto InsertionPoint = TailMBB->begin(); | |||
4336 | while (SelectMBBI != SelectEnd) { | |||
4337 | auto Next = std::next(SelectMBBI); | |||
4338 | if (isSelectPseudo(*SelectMBBI)) { | |||
4339 | // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ] | |||
4340 | BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(), | |||
4341 | TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg()) | |||
4342 | .addReg(SelectMBBI->getOperand(4).getReg()) | |||
4343 | .addMBB(HeadMBB) | |||
4344 | .addReg(SelectMBBI->getOperand(5).getReg()) | |||
4345 | .addMBB(IfFalseMBB); | |||
4346 | SelectMBBI->eraseFromParent(); | |||
4347 | } | |||
4348 | SelectMBBI = Next; | |||
4349 | } | |||
4350 | ||||
4351 | F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs); | |||
4352 | return TailMBB; | |||
4353 | } | |||
4354 | ||||
4355 | static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB, | |||
4356 | int VLIndex, unsigned SEWIndex, | |||
4357 | RISCVVLMUL VLMul, bool ForceTailAgnostic) { | |||
4358 | MachineFunction &MF = *BB->getParent(); | |||
4359 | DebugLoc DL = MI.getDebugLoc(); | |||
4360 | const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); | |||
4361 | ||||
4362 | unsigned SEW = MI.getOperand(SEWIndex).getImm(); | |||
4363 | assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW")((RISCVVType::isValidSEW(SEW) && "Unexpected SEW") ? static_cast <void> (0) : __assert_fail ("RISCVVType::isValidSEW(SEW) && \"Unexpected SEW\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4363, __PRETTY_FUNCTION__)); | |||
4364 | RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2_32(SEW / 8)); | |||
4365 | ||||
4366 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
4367 | ||||
4368 | auto BuildVSETVLI = [&]() { | |||
4369 | if (VLIndex >= 0) { | |||
4370 | Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); | |||
4371 | Register VLReg = MI.getOperand(VLIndex).getReg(); | |||
4372 | ||||
4373 | // VL might be a compile time constant, but isel would have to put it | |||
4374 | // in a register. See if VL comes from an ADDI X0, imm. | |||
4375 | if (VLReg.isVirtual()) { | |||
4376 | MachineInstr *Def = MRI.getVRegDef(VLReg); | |||
4377 | if (Def && Def->getOpcode() == RISCV::ADDI && | |||
4378 | Def->getOperand(1).getReg() == RISCV::X0 && | |||
4379 | Def->getOperand(2).isImm()) { | |||
4380 | uint64_t Imm = Def->getOperand(2).getImm(); | |||
4381 | // VSETIVLI allows a 5-bit zero extended immediate. | |||
4382 | if (isUInt<5>(Imm)) | |||
4383 | return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETIVLI)) | |||
4384 | .addReg(DestReg, RegState::Define | RegState::Dead) | |||
4385 | .addImm(Imm); | |||
4386 | } | |||
4387 | } | |||
4388 | ||||
4389 | return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI)) | |||
4390 | .addReg(DestReg, RegState::Define | RegState::Dead) | |||
4391 | .addReg(VLReg); | |||
4392 | } | |||
4393 | ||||
4394 | // With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0). | |||
4395 | return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI)) | |||
4396 | .addReg(RISCV::X0, RegState::Define | RegState::Dead) | |||
4397 | .addReg(RISCV::X0, RegState::Kill); | |||
4398 | }; | |||
4399 | ||||
4400 | MachineInstrBuilder MIB = BuildVSETVLI(); | |||
4401 | ||||
4402 | // Default to tail agnostic unless the destination is tied to a source. In | |||
4403 | // that case the user would have some control over the tail values. The tail | |||
4404 | // policy is also ignored on instructions that only update element 0 like | |||
4405 | // vmv.s.x or reductions so use agnostic there to match the common case. | |||
4406 | // FIXME: This is conservatively correct, but we might want to detect that | |||
4407 | // the input is undefined. | |||
4408 | bool TailAgnostic = true; | |||
4409 | unsigned UseOpIdx; | |||
4410 | if (!ForceTailAgnostic && MI.isRegTiedToUseOperand(0, &UseOpIdx)) { | |||
4411 | TailAgnostic = false; | |||
4412 | // If the tied operand is an IMPLICIT_DEF we can keep TailAgnostic. | |||
4413 | const MachineOperand &UseMO = MI.getOperand(UseOpIdx); | |||
4414 | MachineInstr *UseMI = MRI.getVRegDef(UseMO.getReg()); | |||
4415 | if (UseMI && UseMI->isImplicitDef()) | |||
4416 | TailAgnostic = true; | |||
4417 | } | |||
4418 | ||||
4419 | // For simplicity we reuse the vtype representation here. | |||
4420 | MIB.addImm(RISCVVType::encodeVTYPE(VLMul, ElementWidth, | |||
4421 | /*TailAgnostic*/ TailAgnostic, | |||
4422 | /*MaskAgnostic*/ false)); | |||
4423 | ||||
4424 | // Remove (now) redundant operands from pseudo | |||
4425 | MI.getOperand(SEWIndex).setImm(-1); | |||
4426 | if (VLIndex >= 0) { | |||
4427 | MI.getOperand(VLIndex).setReg(RISCV::NoRegister); | |||
4428 | MI.getOperand(VLIndex).setIsKill(false); | |||
4429 | } | |||
4430 | ||||
4431 | return BB; | |||
4432 | } | |||
4433 | ||||
4434 | MachineBasicBlock * | |||
4435 | RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, | |||
4436 | MachineBasicBlock *BB) const { | |||
4437 | uint64_t TSFlags = MI.getDesc().TSFlags; | |||
4438 | ||||
4439 | if (TSFlags & RISCVII::HasSEWOpMask) { | |||
4440 | unsigned NumOperands = MI.getNumExplicitOperands(); | |||
4441 | int VLIndex = (TSFlags & RISCVII::HasVLOpMask) ? NumOperands - 2 : -1; | |||
4442 | unsigned SEWIndex = NumOperands - 1; | |||
4443 | bool ForceTailAgnostic = TSFlags & RISCVII::ForceTailAgnosticMask; | |||
4444 | ||||
4445 | RISCVVLMUL VLMul = static_cast<RISCVVLMUL>((TSFlags & RISCVII::VLMulMask) >> | |||
4446 | RISCVII::VLMulShift); | |||
4447 | return addVSetVL(MI, BB, VLIndex, SEWIndex, VLMul, ForceTailAgnostic); | |||
4448 | } | |||
4449 | ||||
4450 | switch (MI.getOpcode()) { | |||
4451 | default: | |||
4452 | llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4452); | |||
4453 | case RISCV::ReadCycleWide: | |||
4454 | assert(!Subtarget.is64Bit() &&((!Subtarget.is64Bit() && "ReadCycleWrite is only to be used on riscv32" ) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"ReadCycleWrite is only to be used on riscv32\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4455, __PRETTY_FUNCTION__)) | |||
4455 | "ReadCycleWrite is only to be used on riscv32")((!Subtarget.is64Bit() && "ReadCycleWrite is only to be used on riscv32" ) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"ReadCycleWrite is only to be used on riscv32\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4455, __PRETTY_FUNCTION__)); | |||
4456 | return emitReadCycleWidePseudo(MI, BB); | |||
4457 | case RISCV::Select_GPR_Using_CC_GPR: | |||
4458 | case RISCV::Select_FPR16_Using_CC_GPR: | |||
4459 | case RISCV::Select_FPR32_Using_CC_GPR: | |||
4460 | case RISCV::Select_FPR64_Using_CC_GPR: | |||
4461 | return emitSelectPseudo(MI, BB); | |||
4462 | case RISCV::BuildPairF64Pseudo: | |||
4463 | return emitBuildPairF64Pseudo(MI, BB); | |||
4464 | case RISCV::SplitF64Pseudo: | |||
4465 | return emitSplitF64Pseudo(MI, BB); | |||
4466 | } | |||
4467 | } | |||
4468 | ||||
4469 | // Calling Convention Implementation. | |||
4470 | // The expectations for frontend ABI lowering vary from target to target. | |||
4471 | // Ideally, an LLVM frontend would be able to avoid worrying about many ABI | |||
4472 | // details, but this is a longer term goal. For now, we simply try to keep the | |||
4473 | // role of the frontend as simple and well-defined as possible. The rules can | |||
4474 | // be summarised as: | |||
4475 | // * Never split up large scalar arguments. We handle them here. | |||
4476 | // * If a hardfloat calling convention is being used, and the struct may be | |||
4477 | // passed in a pair of registers (fp+fp, int+fp), and both registers are | |||
4478 | // available, then pass as two separate arguments. If either the GPRs or FPRs | |||
4479 | // are exhausted, then pass according to the rule below. | |||
4480 | // * If a struct could never be passed in registers or directly in a stack | |||
4481 | // slot (as it is larger than 2*XLEN and the floating point rules don't | |||
4482 | // apply), then pass it using a pointer with the byval attribute. | |||
4483 | // * If a struct is less than 2*XLEN, then coerce to either a two-element | |||
4484 | // word-sized array or a 2*XLEN scalar (depending on alignment). | |||
4485 | // * The frontend can determine whether a struct is returned by reference or | |||
4486 | // not based on its size and fields. If it will be returned by reference, the | |||
4487 | // frontend must modify the prototype so a pointer with the sret annotation is | |||
4488 | // passed as the first argument. This is not necessary for large scalar | |||
4489 | // returns. | |||
4490 | // * Struct return values and varargs should be coerced to structs containing | |||
4491 | // register-size fields in the same situations they would be for fixed | |||
4492 | // arguments. | |||
4493 | ||||
4494 | static const MCPhysReg ArgGPRs[] = { | |||
4495 | RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, | |||
4496 | RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17 | |||
4497 | }; | |||
4498 | static const MCPhysReg ArgFPR16s[] = { | |||
4499 | RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, | |||
4500 | RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H | |||
4501 | }; | |||
4502 | static const MCPhysReg ArgFPR32s[] = { | |||
4503 | RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, | |||
4504 | RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F | |||
4505 | }; | |||
4506 | static const MCPhysReg ArgFPR64s[] = { | |||
4507 | RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, | |||
4508 | RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D | |||
4509 | }; | |||
4510 | // This is an interim calling convention and it may be changed in the future. | |||
4511 | static const MCPhysReg ArgVRs[] = { | |||
4512 | RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13, | |||
4513 | RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, | |||
4514 | RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23}; | |||
4515 | static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2, | |||
4516 | RISCV::V14M2, RISCV::V16M2, RISCV::V18M2, | |||
4517 | RISCV::V20M2, RISCV::V22M2}; | |||
4518 | static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4, | |||
4519 | RISCV::V20M4}; | |||
4520 | static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8}; | |||
4521 | ||||
4522 | // Pass a 2*XLEN argument that has been split into two XLEN values through | |||
4523 | // registers or the stack as necessary. | |||
4524 | static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, | |||
4525 | ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, | |||
4526 | MVT ValVT2, MVT LocVT2, | |||
4527 | ISD::ArgFlagsTy ArgFlags2) { | |||
4528 | unsigned XLenInBytes = XLen / 8; | |||
4529 | if (Register Reg = State.AllocateReg(ArgGPRs)) { | |||
4530 | // At least one half can be passed via register. | |||
4531 | State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, | |||
4532 | VA1.getLocVT(), CCValAssign::Full)); | |||
4533 | } else { | |||
4534 | // Both halves must be passed on the stack, with proper alignment. | |||
4535 | Align StackAlign = | |||
4536 | std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign()); | |||
4537 | State.addLoc( | |||
4538 | CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), | |||
4539 | State.AllocateStack(XLenInBytes, StackAlign), | |||
4540 | VA1.getLocVT(), CCValAssign::Full)); | |||
4541 | State.addLoc(CCValAssign::getMem( | |||
4542 | ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), | |||
4543 | LocVT2, CCValAssign::Full)); | |||
4544 | return false; | |||
4545 | } | |||
4546 | ||||
4547 | if (Register Reg = State.AllocateReg(ArgGPRs)) { | |||
4548 | // The second half can also be passed via register. | |||
4549 | State.addLoc( | |||
4550 | CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); | |||
4551 | } else { | |||
4552 | // The second half is passed via the stack, without additional alignment. | |||
4553 | State.addLoc(CCValAssign::getMem( | |||
4554 | ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), | |||
4555 | LocVT2, CCValAssign::Full)); | |||
4556 | } | |||
4557 | ||||
4558 | return false; | |||
4559 | } | |||
4560 | ||||
4561 | // Implements the RISC-V calling convention. Returns true upon failure. | |||
4562 | static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, | |||
4563 | MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, | |||
4564 | ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, | |||
4565 | bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, | |||
4566 | Optional<unsigned> FirstMaskArgument) { | |||
4567 | unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); | |||
4568 | assert(XLen == 32 || XLen == 64)((XLen == 32 || XLen == 64) ? static_cast<void> (0) : __assert_fail ("XLen == 32 || XLen == 64", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4568, __PRETTY_FUNCTION__)); | |||
4569 | MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; | |||
4570 | ||||
4571 | // Any return value split in to more than two values can't be returned | |||
4572 | // directly. | |||
4573 | if (IsRet && ValNo > 1) | |||
4574 | return true; | |||
4575 | ||||
4576 | // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a | |||
4577 | // variadic argument, or if no F16/F32 argument registers are available. | |||
4578 | bool UseGPRForF16_F32 = true; | |||
4579 | // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a | |||
4580 | // variadic argument, or if no F64 argument registers are available. | |||
4581 | bool UseGPRForF64 = true; | |||
4582 | ||||
4583 | switch (ABI) { | |||
4584 | default: | |||
4585 | llvm_unreachable("Unexpected ABI")::llvm::llvm_unreachable_internal("Unexpected ABI", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4585); | |||
4586 | case RISCVABI::ABI_ILP32: | |||
4587 | case RISCVABI::ABI_LP64: | |||
4588 | break; | |||
4589 | case RISCVABI::ABI_ILP32F: | |||
4590 | case RISCVABI::ABI_LP64F: | |||
4591 | UseGPRForF16_F32 = !IsFixed; | |||
4592 | break; | |||
4593 | case RISCVABI::ABI_ILP32D: | |||
4594 | case RISCVABI::ABI_LP64D: | |||
4595 | UseGPRForF16_F32 = !IsFixed; | |||
4596 | UseGPRForF64 = !IsFixed; | |||
4597 | break; | |||
4598 | } | |||
4599 | ||||
4600 | // FPR16, FPR32, and FPR64 alias each other. | |||
4601 | if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) { | |||
4602 | UseGPRForF16_F32 = true; | |||
4603 | UseGPRForF64 = true; | |||
4604 | } | |||
4605 | ||||
4606 | // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and | |||
4607 | // similar local variables rather than directly checking against the target | |||
4608 | // ABI. | |||
4609 | ||||
4610 | if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) { | |||
4611 | LocVT = XLenVT; | |||
4612 | LocInfo = CCValAssign::BCvt; | |||
4613 | } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) { | |||
4614 | LocVT = MVT::i64; | |||
4615 | LocInfo = CCValAssign::BCvt; | |||
4616 | } | |||
4617 | ||||
4618 | // If this is a variadic argument, the RISC-V calling convention requires | |||
4619 | // that it is assigned an 'even' or 'aligned' register if it has 8-byte | |||
4620 | // alignment (RV32) or 16-byte alignment (RV64). An aligned register should | |||
4621 | // be used regardless of whether the original argument was split during | |||
4622 | // legalisation or not. The argument will not be passed by registers if the | |||
4623 | // original type is larger than 2*XLEN, so the register alignment rule does | |||
4624 | // not apply. | |||
4625 | unsigned TwoXLenInBytes = (2 * XLen) / 8; | |||
4626 | if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes && | |||
4627 | DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) { | |||
4628 | unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); | |||
4629 | // Skip 'odd' register if necessary. | |||
4630 | if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1) | |||
4631 | State.AllocateReg(ArgGPRs); | |||
4632 | } | |||
4633 | ||||
4634 | SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs(); | |||
4635 | SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags = | |||
4636 | State.getPendingArgFlags(); | |||
4637 | ||||
4638 | assert(PendingLocs.size() == PendingArgFlags.size() &&((PendingLocs.size() == PendingArgFlags.size() && "PendingLocs and PendingArgFlags out of sync" ) ? static_cast<void> (0) : __assert_fail ("PendingLocs.size() == PendingArgFlags.size() && \"PendingLocs and PendingArgFlags out of sync\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4639, __PRETTY_FUNCTION__)) | |||
4639 | "PendingLocs and PendingArgFlags out of sync")((PendingLocs.size() == PendingArgFlags.size() && "PendingLocs and PendingArgFlags out of sync" ) ? static_cast<void> (0) : __assert_fail ("PendingLocs.size() == PendingArgFlags.size() && \"PendingLocs and PendingArgFlags out of sync\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4639, __PRETTY_FUNCTION__)); | |||
4640 | ||||
4641 | // Handle passing f64 on RV32D with a soft float ABI or when floating point | |||
4642 | // registers are exhausted. | |||
4643 | if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) { | |||
4644 | assert(!ArgFlags.isSplit() && PendingLocs.empty() &&((!ArgFlags.isSplit() && PendingLocs.empty() && "Can't lower f64 if it is split") ? static_cast<void> ( 0) : __assert_fail ("!ArgFlags.isSplit() && PendingLocs.empty() && \"Can't lower f64 if it is split\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4645, __PRETTY_FUNCTION__)) | |||
4645 | "Can't lower f64 if it is split")((!ArgFlags.isSplit() && PendingLocs.empty() && "Can't lower f64 if it is split") ? static_cast<void> ( 0) : __assert_fail ("!ArgFlags.isSplit() && PendingLocs.empty() && \"Can't lower f64 if it is split\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4645, __PRETTY_FUNCTION__)); | |||
4646 | // Depending on available argument GPRS, f64 may be passed in a pair of | |||
4647 | // GPRs, split between a GPR and the stack, or passed completely on the | |||
4648 | // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these | |||
4649 | // cases. | |||
4650 | Register Reg = State.AllocateReg(ArgGPRs); | |||
4651 | LocVT = MVT::i32; | |||
4652 | if (!Reg) { | |||
4653 | unsigned StackOffset = State.AllocateStack(8, Align(8)); | |||
4654 | State.addLoc( | |||
4655 | CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); | |||
4656 | return false; | |||
4657 | } | |||
4658 | if (!State.AllocateReg(ArgGPRs)) | |||
4659 | State.AllocateStack(4, Align(4)); | |||
4660 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
4661 | return false; | |||
4662 | } | |||
4663 | ||||
4664 | // Split arguments might be passed indirectly, so keep track of the pending | |||
4665 | // values. | |||
4666 | if (ArgFlags.isSplit() || !PendingLocs.empty()) { | |||
4667 | LocVT = XLenVT; | |||
4668 | LocInfo = CCValAssign::Indirect; | |||
4669 | PendingLocs.push_back( | |||
4670 | CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); | |||
4671 | PendingArgFlags.push_back(ArgFlags); | |||
4672 | if (!ArgFlags.isSplitEnd()) { | |||
4673 | return false; | |||
4674 | } | |||
4675 | } | |||
4676 | ||||
4677 | // If the split argument only had two elements, it should be passed directly | |||
4678 | // in registers or on the stack. | |||
4679 | if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) { | |||
4680 | assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()")((PendingLocs.size() == 2 && "Unexpected PendingLocs.size()" ) ? static_cast<void> (0) : __assert_fail ("PendingLocs.size() == 2 && \"Unexpected PendingLocs.size()\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4680, __PRETTY_FUNCTION__)); | |||
4681 | // Apply the normal calling convention rules to the first half of the | |||
4682 | // split argument. | |||
4683 | CCValAssign VA = PendingLocs[0]; | |||
4684 | ISD::ArgFlagsTy AF = PendingArgFlags[0]; | |||
4685 | PendingLocs.clear(); | |||
4686 | PendingArgFlags.clear(); | |||
4687 | return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT, | |||
4688 | ArgFlags); | |||
4689 | } | |||
4690 | ||||
4691 | // Allocate to a register if possible, or else a stack slot. | |||
4692 | Register Reg; | |||
4693 | if (ValVT == MVT::f16 && !UseGPRForF16_F32) | |||
4694 | Reg = State.AllocateReg(ArgFPR16s); | |||
4695 | else if (ValVT == MVT::f32 && !UseGPRForF16_F32) | |||
4696 | Reg = State.AllocateReg(ArgFPR32s); | |||
4697 | else if (ValVT == MVT::f64 && !UseGPRForF64) | |||
4698 | Reg = State.AllocateReg(ArgFPR64s); | |||
4699 | else if (ValVT.isScalableVector()) { | |||
4700 | const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT); | |||
4701 | if (RC == &RISCV::VRRegClass) { | |||
4702 | // Assign the first mask argument to V0. | |||
4703 | // This is an interim calling convention and it may be changed in the | |||
4704 | // future. | |||
4705 | if (FirstMaskArgument.hasValue() && | |||
4706 | ValNo == FirstMaskArgument.getValue()) { | |||
4707 | Reg = State.AllocateReg(RISCV::V0); | |||
4708 | } else { | |||
4709 | Reg = State.AllocateReg(ArgVRs); | |||
4710 | } | |||
4711 | } else if (RC == &RISCV::VRM2RegClass) { | |||
4712 | Reg = State.AllocateReg(ArgVRM2s); | |||
4713 | } else if (RC == &RISCV::VRM4RegClass) { | |||
4714 | Reg = State.AllocateReg(ArgVRM4s); | |||
4715 | } else if (RC == &RISCV::VRM8RegClass) { | |||
4716 | Reg = State.AllocateReg(ArgVRM8s); | |||
4717 | } else { | |||
4718 | llvm_unreachable("Unhandled class register for ValueType")::llvm::llvm_unreachable_internal("Unhandled class register for ValueType" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4718); | |||
4719 | } | |||
4720 | if (!Reg) { | |||
4721 | LocInfo = CCValAssign::Indirect; | |||
4722 | // Try using a GPR to pass the address | |||
4723 | Reg = State.AllocateReg(ArgGPRs); | |||
4724 | LocVT = XLenVT; | |||
4725 | } | |||
4726 | } else | |||
4727 | Reg = State.AllocateReg(ArgGPRs); | |||
4728 | unsigned StackOffset = | |||
4729 | Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8)); | |||
4730 | ||||
4731 | // If we reach this point and PendingLocs is non-empty, we must be at the | |||
4732 | // end of a split argument that must be passed indirectly. | |||
4733 | if (!PendingLocs.empty()) { | |||
4734 | assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()")((ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()" ) ? static_cast<void> (0) : __assert_fail ("ArgFlags.isSplitEnd() && \"Expected ArgFlags.isSplitEnd()\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4734, __PRETTY_FUNCTION__)); | |||
4735 | assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()")((PendingLocs.size() > 2 && "Unexpected PendingLocs.size()" ) ? static_cast<void> (0) : __assert_fail ("PendingLocs.size() > 2 && \"Unexpected PendingLocs.size()\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4735, __PRETTY_FUNCTION__)); | |||
4736 | ||||
4737 | for (auto &It : PendingLocs) { | |||
4738 | if (Reg) | |||
4739 | It.convertToReg(Reg); | |||
4740 | else | |||
4741 | It.convertToMem(StackOffset); | |||
4742 | State.addLoc(It); | |||
4743 | } | |||
4744 | PendingLocs.clear(); | |||
4745 | PendingArgFlags.clear(); | |||
4746 | return false; | |||
4747 | } | |||
4748 | ||||
4749 | assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||(((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || ( TLI.getSubtarget().hasStdExtV() && ValVT.isScalableVector ())) && "Expected an XLenVT or scalable vector types at this stage" ) ? static_cast<void> (0) : __assert_fail ("(!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || (TLI.getSubtarget().hasStdExtV() && ValVT.isScalableVector())) && \"Expected an XLenVT or scalable vector types at this stage\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4751, __PRETTY_FUNCTION__)) | |||
4750 | (TLI.getSubtarget().hasStdExtV() && ValVT.isScalableVector())) &&(((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || ( TLI.getSubtarget().hasStdExtV() && ValVT.isScalableVector ())) && "Expected an XLenVT or scalable vector types at this stage" ) ? static_cast<void> (0) : __assert_fail ("(!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || (TLI.getSubtarget().hasStdExtV() && ValVT.isScalableVector())) && \"Expected an XLenVT or scalable vector types at this stage\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4751, __PRETTY_FUNCTION__)) | |||
4751 | "Expected an XLenVT or scalable vector types at this stage")(((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || ( TLI.getSubtarget().hasStdExtV() && ValVT.isScalableVector ())) && "Expected an XLenVT or scalable vector types at this stage" ) ? static_cast<void> (0) : __assert_fail ("(!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || (TLI.getSubtarget().hasStdExtV() && ValVT.isScalableVector())) && \"Expected an XLenVT or scalable vector types at this stage\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4751, __PRETTY_FUNCTION__)); | |||
4752 | ||||
4753 | if (Reg) { | |||
4754 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
4755 | return false; | |||
4756 | } | |||
4757 | ||||
4758 | // When a floating-point value is passed on the stack, no bit-conversion is | |||
4759 | // needed. | |||
4760 | if (ValVT.isFloatingPoint()) { | |||
4761 | LocVT = ValVT; | |||
4762 | LocInfo = CCValAssign::Full; | |||
4763 | } | |||
4764 | State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); | |||
4765 | return false; | |||
4766 | } | |||
4767 | ||||
4768 | template <typename ArgTy> | |||
4769 | static Optional<unsigned> preAssignMask(const ArgTy &Args) { | |||
4770 | for (const auto &ArgIdx : enumerate(Args)) { | |||
4771 | MVT ArgVT = ArgIdx.value().VT; | |||
4772 | if (ArgVT.isScalableVector() && | |||
4773 | ArgVT.getVectorElementType().SimpleTy == MVT::i1) | |||
4774 | return ArgIdx.index(); | |||
4775 | } | |||
4776 | return None; | |||
4777 | } | |||
4778 | ||||
4779 | void RISCVTargetLowering::analyzeInputArgs( | |||
4780 | MachineFunction &MF, CCState &CCInfo, | |||
4781 | const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const { | |||
4782 | unsigned NumArgs = Ins.size(); | |||
4783 | FunctionType *FType = MF.getFunction().getFunctionType(); | |||
4784 | ||||
4785 | Optional<unsigned> FirstMaskArgument; | |||
4786 | if (Subtarget.hasStdExtV()) | |||
4787 | FirstMaskArgument = preAssignMask(Ins); | |||
4788 | ||||
4789 | for (unsigned i = 0; i != NumArgs; ++i) { | |||
4790 | MVT ArgVT = Ins[i].VT; | |||
4791 | ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; | |||
4792 | ||||
4793 | Type *ArgTy = nullptr; | |||
4794 | if (IsRet) | |||
4795 | ArgTy = FType->getReturnType(); | |||
4796 | else if (Ins[i].isOrigArg()) | |||
4797 | ArgTy = FType->getParamType(Ins[i].getOrigArgIndex()); | |||
4798 | ||||
4799 | RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); | |||
4800 | if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, | |||
4801 | ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this, | |||
4802 | FirstMaskArgument)) { | |||
4803 | LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("riscv-lower")) { dbgs() << "InputArg #" << i << " has unhandled type " << EVT(ArgVT).getEVTString() << '\n'; } } while (false) | |||
4804 | << EVT(ArgVT).getEVTString() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("riscv-lower")) { dbgs() << "InputArg #" << i << " has unhandled type " << EVT(ArgVT).getEVTString() << '\n'; } } while (false); | |||
4805 | llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4805); | |||
4806 | } | |||
4807 | } | |||
4808 | } | |||
4809 | ||||
4810 | void RISCVTargetLowering::analyzeOutputArgs( | |||
4811 | MachineFunction &MF, CCState &CCInfo, | |||
4812 | const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet, | |||
4813 | CallLoweringInfo *CLI) const { | |||
4814 | unsigned NumArgs = Outs.size(); | |||
4815 | ||||
4816 | Optional<unsigned> FirstMaskArgument; | |||
4817 | if (Subtarget.hasStdExtV()) | |||
4818 | FirstMaskArgument = preAssignMask(Outs); | |||
4819 | ||||
4820 | for (unsigned i = 0; i != NumArgs; i++) { | |||
4821 | MVT ArgVT = Outs[i].VT; | |||
4822 | ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; | |||
4823 | Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr; | |||
4824 | ||||
4825 | RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); | |||
4826 | if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, | |||
4827 | ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this, | |||
4828 | FirstMaskArgument)) { | |||
4829 | LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("riscv-lower")) { dbgs() << "OutputArg #" << i << " has unhandled type " << EVT(ArgVT).getEVTString() << "\n"; } } while (false) | |||
4830 | << EVT(ArgVT).getEVTString() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("riscv-lower")) { dbgs() << "OutputArg #" << i << " has unhandled type " << EVT(ArgVT).getEVTString() << "\n"; } } while (false); | |||
4831 | llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4831); | |||
4832 | } | |||
4833 | } | |||
4834 | } | |||
4835 | ||||
4836 | // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect | |||
4837 | // values. | |||
4838 | static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, | |||
4839 | const CCValAssign &VA, const SDLoc &DL) { | |||
4840 | switch (VA.getLocInfo()) { | |||
4841 | default: | |||
4842 | llvm_unreachable("Unexpected CCValAssign::LocInfo")::llvm::llvm_unreachable_internal("Unexpected CCValAssign::LocInfo" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4842); | |||
4843 | case CCValAssign::Full: | |||
4844 | break; | |||
4845 | case CCValAssign::BCvt: | |||
4846 | if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) | |||
4847 | Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val); | |||
4848 | else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) | |||
4849 | Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val); | |||
4850 | else | |||
4851 | Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); | |||
4852 | break; | |||
4853 | } | |||
4854 | return Val; | |||
4855 | } | |||
4856 | ||||
4857 | // The caller is responsible for loading the full value if the argument is | |||
4858 | // passed with CCValAssign::Indirect. | |||
4859 | static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, | |||
4860 | const CCValAssign &VA, const SDLoc &DL, | |||
4861 | const RISCVTargetLowering &TLI) { | |||
4862 | MachineFunction &MF = DAG.getMachineFunction(); | |||
4863 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); | |||
4864 | EVT LocVT = VA.getLocVT(); | |||
4865 | SDValue Val; | |||
4866 | const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT()); | |||
4867 | Register VReg = RegInfo.createVirtualRegister(RC); | |||
4868 | RegInfo.addLiveIn(VA.getLocReg(), VReg); | |||
4869 | Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); | |||
4870 | ||||
4871 | if (VA.getLocInfo() == CCValAssign::Indirect) | |||
4872 | return Val; | |||
4873 | ||||
4874 | return convertLocVTToValVT(DAG, Val, VA, DL); | |||
4875 | } | |||
4876 | ||||
4877 | static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, | |||
4878 | const CCValAssign &VA, const SDLoc &DL) { | |||
4879 | EVT LocVT = VA.getLocVT(); | |||
4880 | ||||
4881 | switch (VA.getLocInfo()) { | |||
4882 | default: | |||
4883 | llvm_unreachable("Unexpected CCValAssign::LocInfo")::llvm::llvm_unreachable_internal("Unexpected CCValAssign::LocInfo" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4883); | |||
4884 | case CCValAssign::Full: | |||
4885 | break; | |||
4886 | case CCValAssign::BCvt: | |||
4887 | if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) | |||
4888 | Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val); | |||
4889 | else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) | |||
4890 | Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val); | |||
4891 | else | |||
4892 | Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val); | |||
4893 | break; | |||
4894 | } | |||
4895 | return Val; | |||
4896 | } | |||
4897 | ||||
4898 | // The caller is responsible for loading the full value if the argument is | |||
4899 | // passed with CCValAssign::Indirect. | |||
4900 | static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, | |||
4901 | const CCValAssign &VA, const SDLoc &DL) { | |||
4902 | MachineFunction &MF = DAG.getMachineFunction(); | |||
4903 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
4904 | EVT LocVT = VA.getLocVT(); | |||
4905 | EVT ValVT = VA.getValVT(); | |||
4906 | EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0)); | |||
4907 | int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, | |||
4908 | VA.getLocMemOffset(), /*Immutable=*/true); | |||
4909 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); | |||
4910 | SDValue Val; | |||
4911 | ||||
4912 | ISD::LoadExtType ExtType; | |||
4913 | switch (VA.getLocInfo()) { | |||
4914 | default: | |||
4915 | llvm_unreachable("Unexpected CCValAssign::LocInfo")::llvm::llvm_unreachable_internal("Unexpected CCValAssign::LocInfo" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4915); | |||
4916 | case CCValAssign::Full: | |||
4917 | case CCValAssign::Indirect: | |||
4918 | case CCValAssign::BCvt: | |||
4919 | ExtType = ISD::NON_EXTLOAD; | |||
4920 | break; | |||
4921 | } | |||
4922 | Val = DAG.getExtLoad( | |||
4923 | ExtType, DL, LocVT, Chain, FIN, | |||
4924 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); | |||
4925 | return Val; | |||
4926 | } | |||
4927 | ||||
4928 | static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, | |||
4929 | const CCValAssign &VA, const SDLoc &DL) { | |||
4930 | assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&((VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT:: f64 && "Unexpected VA") ? static_cast<void> (0) : __assert_fail ("VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && \"Unexpected VA\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4931, __PRETTY_FUNCTION__)) | |||
4931 | "Unexpected VA")((VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT:: f64 && "Unexpected VA") ? static_cast<void> (0) : __assert_fail ("VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && \"Unexpected VA\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4931, __PRETTY_FUNCTION__)); | |||
4932 | MachineFunction &MF = DAG.getMachineFunction(); | |||
4933 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
4934 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); | |||
4935 | ||||
4936 | if (VA.isMemLoc()) { | |||
4937 | // f64 is passed on the stack. | |||
4938 | int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true); | |||
4939 | SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); | |||
4940 | return DAG.getLoad(MVT::f64, DL, Chain, FIN, | |||
4941 | MachinePointerInfo::getFixedStack(MF, FI)); | |||
4942 | } | |||
4943 | ||||
4944 | assert(VA.isRegLoc() && "Expected register VA assignment")((VA.isRegLoc() && "Expected register VA assignment") ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Expected register VA assignment\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4944, __PRETTY_FUNCTION__)); | |||
4945 | ||||
4946 | Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); | |||
4947 | RegInfo.addLiveIn(VA.getLocReg(), LoVReg); | |||
4948 | SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32); | |||
4949 | SDValue Hi; | |||
4950 | if (VA.getLocReg() == RISCV::X17) { | |||
4951 | // Second half of f64 is passed on the stack. | |||
4952 | int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true); | |||
4953 | SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); | |||
4954 | Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN, | |||
4955 | MachinePointerInfo::getFixedStack(MF, FI)); | |||
4956 | } else { | |||
4957 | // Second half of f64 is passed in another GPR. | |||
4958 | Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); | |||
4959 | RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg); | |||
4960 | Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32); | |||
4961 | } | |||
4962 | return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi); | |||
4963 | } | |||
4964 | ||||
4965 | // FastCC has less than 1% performance improvement for some particular | |||
4966 | // benchmark. But theoretically, it may has benenfit for some cases. | |||
4967 | static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, | |||
4968 | CCValAssign::LocInfo LocInfo, | |||
4969 | ISD::ArgFlagsTy ArgFlags, CCState &State) { | |||
4970 | ||||
4971 | if (LocVT == MVT::i32 || LocVT == MVT::i64) { | |||
4972 | // X5 and X6 might be used for save-restore libcall. | |||
4973 | static const MCPhysReg GPRList[] = { | |||
4974 | RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, | |||
4975 | RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28, | |||
4976 | RISCV::X29, RISCV::X30, RISCV::X31}; | |||
4977 | if (unsigned Reg = State.AllocateReg(GPRList)) { | |||
4978 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
4979 | return false; | |||
4980 | } | |||
4981 | } | |||
4982 | ||||
4983 | if (LocVT == MVT::f16) { | |||
4984 | static const MCPhysReg FPR16List[] = { | |||
4985 | RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H, | |||
4986 | RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H, | |||
4987 | RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H, | |||
4988 | RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H}; | |||
4989 | if (unsigned Reg = State.AllocateReg(FPR16List)) { | |||
4990 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
4991 | return false; | |||
4992 | } | |||
4993 | } | |||
4994 | ||||
4995 | if (LocVT == MVT::f32) { | |||
4996 | static const MCPhysReg FPR32List[] = { | |||
4997 | RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F, | |||
4998 | RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F, | |||
4999 | RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F, | |||
5000 | RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F}; | |||
5001 | if (unsigned Reg = State.AllocateReg(FPR32List)) { | |||
5002 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
5003 | return false; | |||
5004 | } | |||
5005 | } | |||
5006 | ||||
5007 | if (LocVT == MVT::f64) { | |||
5008 | static const MCPhysReg FPR64List[] = { | |||
5009 | RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D, | |||
5010 | RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D, | |||
5011 | RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D, | |||
5012 | RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D}; | |||
5013 | if (unsigned Reg = State.AllocateReg(FPR64List)) { | |||
5014 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
5015 | return false; | |||
5016 | } | |||
5017 | } | |||
5018 | ||||
5019 | if (LocVT == MVT::i32 || LocVT == MVT::f32) { | |||
5020 | unsigned Offset4 = State.AllocateStack(4, Align(4)); | |||
5021 | State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo)); | |||
5022 | return false; | |||
5023 | } | |||
5024 | ||||
5025 | if (LocVT == MVT::i64 || LocVT == MVT::f64) { | |||
5026 | unsigned Offset5 = State.AllocateStack(8, Align(8)); | |||
5027 | State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo)); | |||
5028 | return false; | |||
5029 | } | |||
5030 | ||||
5031 | return true; // CC didn't match. | |||
5032 | } | |||
5033 | ||||
5034 | static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, | |||
5035 | CCValAssign::LocInfo LocInfo, | |||
5036 | ISD::ArgFlagsTy ArgFlags, CCState &State) { | |||
5037 | ||||
5038 | if (LocVT == MVT::i32 || LocVT == MVT::i64) { | |||
5039 | // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim | |||
5040 | // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 | |||
5041 | static const MCPhysReg GPRList[] = { | |||
5042 | RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22, | |||
5043 | RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27}; | |||
5044 | if (unsigned Reg = State.AllocateReg(GPRList)) { | |||
5045 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
5046 | return false; | |||
5047 | } | |||
5048 | } | |||
5049 | ||||
5050 | if (LocVT == MVT::f32) { | |||
5051 | // Pass in STG registers: F1, ..., F6 | |||
5052 | // fs0 ... fs5 | |||
5053 | static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F, | |||
5054 | RISCV::F18_F, RISCV::F19_F, | |||
5055 | RISCV::F20_F, RISCV::F21_F}; | |||
5056 | if (unsigned Reg = State.AllocateReg(FPR32List)) { | |||
5057 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
5058 | return false; | |||
5059 | } | |||
5060 | } | |||
5061 | ||||
5062 | if (LocVT == MVT::f64) { | |||
5063 | // Pass in STG registers: D1, ..., D6 | |||
5064 | // fs6 ... fs11 | |||
5065 | static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D, | |||
5066 | RISCV::F24_D, RISCV::F25_D, | |||
5067 | RISCV::F26_D, RISCV::F27_D}; | |||
5068 | if (unsigned Reg = State.AllocateReg(FPR64List)) { | |||
5069 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
5070 | return false; | |||
5071 | } | |||
5072 | } | |||
5073 | ||||
5074 | report_fatal_error("No registers left in GHC calling convention"); | |||
5075 | return true; | |||
5076 | } | |||
5077 | ||||
5078 | // Transform physical registers into virtual registers. | |||
5079 | SDValue RISCVTargetLowering::LowerFormalArguments( | |||
5080 | SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, | |||
5081 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, | |||
5082 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { | |||
5083 | ||||
5084 | MachineFunction &MF = DAG.getMachineFunction(); | |||
5085 | ||||
5086 | switch (CallConv) { | |||
5087 | default: | |||
5088 | report_fatal_error("Unsupported calling convention"); | |||
5089 | case CallingConv::C: | |||
5090 | case CallingConv::Fast: | |||
5091 | break; | |||
5092 | case CallingConv::GHC: | |||
5093 | if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] || | |||
5094 | !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD]) | |||
5095 | report_fatal_error( | |||
5096 | "GHC calling convention requires the F and D instruction set extensions"); | |||
5097 | } | |||
5098 | ||||
5099 | const Function &Func = MF.getFunction(); | |||
5100 | if (Func.hasFnAttribute("interrupt")) { | |||
5101 | if (!Func.arg_empty()) | |||
5102 | report_fatal_error( | |||
5103 | "Functions with the interrupt attribute cannot have arguments!"); | |||
5104 | ||||
5105 | StringRef Kind = | |||
5106 | MF.getFunction().getFnAttribute("interrupt").getValueAsString(); | |||
5107 | ||||
5108 | if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine")) | |||
5109 | report_fatal_error( | |||
5110 | "Function interrupt attribute argument not supported!"); | |||
5111 | } | |||
5112 | ||||
5113 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
5114 | MVT XLenVT = Subtarget.getXLenVT(); | |||
5115 | unsigned XLenInBytes = Subtarget.getXLen() / 8; | |||
5116 | // Used with vargs to acumulate store chains. | |||
5117 | std::vector<SDValue> OutChains; | |||
5118 | ||||
5119 | // Assign locations to all of the incoming arguments. | |||
5120 | SmallVector<CCValAssign, 16> ArgLocs; | |||
5121 | CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); | |||
5122 | ||||
5123 | if (CallConv == CallingConv::Fast) | |||
5124 | CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC); | |||
5125 | else if (CallConv == CallingConv::GHC) | |||
5126 | CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC); | |||
5127 | else | |||
5128 | analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false); | |||
5129 | ||||
5130 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { | |||
5131 | CCValAssign &VA = ArgLocs[i]; | |||
5132 | SDValue ArgValue; | |||
5133 | // Passing f64 on RV32D with a soft float ABI must be handled as a special | |||
5134 | // case. | |||
5135 | if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) | |||
5136 | ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL); | |||
5137 | else if (VA.isRegLoc()) | |||
5138 | ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this); | |||
5139 | else | |||
5140 | ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL); | |||
5141 | ||||
5142 | if (VA.getLocInfo() == CCValAssign::Indirect) { | |||
5143 | // If the original argument was split and passed by reference (e.g. i128 | |||
5144 | // on RV32), we need to load all parts of it here (using the same | |||
5145 | // address). | |||
5146 | InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, | |||
5147 | MachinePointerInfo())); | |||
5148 | unsigned ArgIndex = Ins[i].OrigArgIndex; | |||
5149 | assert(Ins[i].PartOffset == 0)((Ins[i].PartOffset == 0) ? static_cast<void> (0) : __assert_fail ("Ins[i].PartOffset == 0", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5149, __PRETTY_FUNCTION__)); | |||
5150 | while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) { | |||
5151 | CCValAssign &PartVA = ArgLocs[i + 1]; | |||
5152 | unsigned PartOffset = Ins[i + 1].PartOffset; | |||
5153 | SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, | |||
5154 | DAG.getIntPtrConstant(PartOffset, DL)); | |||
5155 | InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, | |||
5156 | MachinePointerInfo())); | |||
5157 | ++i; | |||
5158 | } | |||
5159 | continue; | |||
5160 | } | |||
5161 | InVals.push_back(ArgValue); | |||
5162 | } | |||
5163 | ||||
5164 | if (IsVarArg) { | |||
5165 | ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs); | |||
5166 | unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); | |||
5167 | const TargetRegisterClass *RC = &RISCV::GPRRegClass; | |||
5168 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
5169 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); | |||
5170 | RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>(); | |||
5171 | ||||
5172 | // Offset of the first variable argument from stack pointer, and size of | |||
5173 | // the vararg save area. For now, the varargs save area is either zero or | |||
5174 | // large enough to hold a0-a7. | |||
5175 | int VaArgOffset, VarArgsSaveSize; | |||
5176 | ||||
5177 | // If all registers are allocated, then all varargs must be passed on the | |||
5178 | // stack and we don't need to save any argregs. | |||
5179 | if (ArgRegs.size() == Idx) { | |||
5180 | VaArgOffset = CCInfo.getNextStackOffset(); | |||
5181 | VarArgsSaveSize = 0; | |||
5182 | } else { | |||
5183 | VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); | |||
5184 | VaArgOffset = -VarArgsSaveSize; | |||
5185 | } | |||
5186 | ||||
5187 | // Record the frame index of the first variable argument | |||
5188 | // which is a value necessary to VASTART. | |||
5189 | int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); | |||
5190 | RVFI->setVarArgsFrameIndex(FI); | |||
5191 | ||||
5192 | // If saving an odd number of registers then create an extra stack slot to | |||
5193 | // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures | |||
5194 | // offsets to even-numbered registered remain 2*XLEN-aligned. | |||
5195 | if (Idx % 2) { | |||
5196 | MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true); | |||
5197 | VarArgsSaveSize += XLenInBytes; | |||
5198 | } | |||
5199 | ||||
5200 | // Copy the integer registers that may have been used for passing varargs | |||
5201 | // to the vararg save area. | |||
5202 | for (unsigned I = Idx; I < ArgRegs.size(); | |||
5203 | ++I, VaArgOffset += XLenInBytes) { | |||
5204 | const Register Reg = RegInfo.createVirtualRegister(RC); | |||
5205 | RegInfo.addLiveIn(ArgRegs[I], Reg); | |||
5206 | SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT); | |||
5207 | FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); | |||
5208 | SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); | |||
5209 | SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, | |||
5210 | MachinePointerInfo::getFixedStack(MF, FI)); | |||
5211 | cast<StoreSDNode>(Store.getNode()) | |||
5212 | ->getMemOperand() | |||
5213 | ->setValue((Value *)nullptr); | |||
5214 | OutChains.push_back(Store); | |||
5215 | } | |||
5216 | RVFI->setVarArgsSaveSize(VarArgsSaveSize); | |||
5217 | } | |||
5218 | ||||
5219 | // All stores are grouped in one node to allow the matching between | |||
5220 | // the size of Ins and InVals. This only happens for vararg functions. | |||
5221 | if (!OutChains.empty()) { | |||
5222 | OutChains.push_back(Chain); | |||
5223 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); | |||
5224 | } | |||
5225 | ||||
5226 | return Chain; | |||
5227 | } | |||
5228 | ||||
5229 | /// isEligibleForTailCallOptimization - Check whether the call is eligible | |||
5230 | /// for tail call optimization. | |||
5231 | /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization. | |||
5232 | bool RISCVTargetLowering::isEligibleForTailCallOptimization( | |||
5233 | CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF, | |||
5234 | const SmallVector<CCValAssign, 16> &ArgLocs) const { | |||
5235 | ||||
5236 | auto &Callee = CLI.Callee; | |||
5237 | auto CalleeCC = CLI.CallConv; | |||
5238 | auto &Outs = CLI.Outs; | |||
5239 | auto &Caller = MF.getFunction(); | |||
5240 | auto CallerCC = Caller.getCallingConv(); | |||
5241 | ||||
5242 | // Exception-handling functions need a special set of instructions to | |||
5243 | // indicate a return to the hardware. Tail-calling another function would | |||
5244 | // probably break this. | |||
5245 | // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This | |||
5246 | // should be expanded as new function attributes are introduced. | |||
5247 | if (Caller.hasFnAttribute("interrupt")) | |||
5248 | return false; | |||
5249 | ||||
5250 | // Do not tail call opt if the stack is used to pass parameters. | |||
5251 | if (CCInfo.getNextStackOffset() != 0) | |||
5252 | return false; | |||
5253 | ||||
5254 | // Do not tail call opt if any parameters need to be passed indirectly. | |||
5255 | // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are | |||
5256 | // passed indirectly. So the address of the value will be passed in a | |||
5257 | // register, or if not available, then the address is put on the stack. In | |||
5258 | // order to pass indirectly, space on the stack often needs to be allocated | |||
5259 | // in order to store the value. In this case the CCInfo.getNextStackOffset() | |||
5260 | // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs | |||
5261 | // are passed CCValAssign::Indirect. | |||
5262 | for (auto &VA : ArgLocs) | |||
5263 | if (VA.getLocInfo() == CCValAssign::Indirect) | |||
5264 | return false; | |||
5265 | ||||
5266 | // Do not tail call opt if either caller or callee uses struct return | |||
5267 | // semantics. | |||
5268 | auto IsCallerStructRet = Caller.hasStructRetAttr(); | |||
5269 | auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); | |||
5270 | if (IsCallerStructRet || IsCalleeStructRet) | |||
5271 | return false; | |||
5272 | ||||
5273 | // Externally-defined functions with weak linkage should not be | |||
5274 | // tail-called. The behaviour of branch instructions in this situation (as | |||
5275 | // used for tail calls) is implementation-defined, so we cannot rely on the | |||
5276 | // linker replacing the tail call with a return. | |||
5277 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { | |||
5278 | const GlobalValue *GV = G->getGlobal(); | |||
5279 | if (GV->hasExternalWeakLinkage()) | |||
5280 | return false; | |||
5281 | } | |||
5282 | ||||
5283 | // The callee has to preserve all registers the caller needs to preserve. | |||
5284 | const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
5285 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); | |||
5286 | if (CalleeCC != CallerCC) { | |||
5287 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); | |||
5288 | if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) | |||
5289 | return false; | |||
5290 | } | |||
5291 | ||||
5292 | // Byval parameters hand the function a pointer directly into the stack area | |||
5293 | // we want to reuse during a tail call. Working around this *is* possible | |||
5294 | // but less efficient and uglier in LowerCall. | |||
5295 | for (auto &Arg : Outs) | |||
5296 | if (Arg.Flags.isByVal()) | |||
5297 | return false; | |||
5298 | ||||
5299 | return true; | |||
5300 | } | |||
5301 | ||||
5302 | // Lower a call to a callseq_start + CALL + callseq_end chain, and add input | |||
5303 | // and output parameter nodes. | |||
5304 | SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, | |||
5305 | SmallVectorImpl<SDValue> &InVals) const { | |||
5306 | SelectionDAG &DAG = CLI.DAG; | |||
5307 | SDLoc &DL = CLI.DL; | |||
5308 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; | |||
5309 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; | |||
5310 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; | |||
5311 | SDValue Chain = CLI.Chain; | |||
5312 | SDValue Callee = CLI.Callee; | |||
5313 | bool &IsTailCall = CLI.IsTailCall; | |||
5314 | CallingConv::ID CallConv = CLI.CallConv; | |||
5315 | bool IsVarArg = CLI.IsVarArg; | |||
5316 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
5317 | MVT XLenVT = Subtarget.getXLenVT(); | |||
5318 | ||||
5319 | MachineFunction &MF = DAG.getMachineFunction(); | |||
5320 | ||||
5321 | // Analyze the operands of the call, assigning locations to each operand. | |||
5322 | SmallVector<CCValAssign, 16> ArgLocs; | |||
5323 | CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); | |||
5324 | ||||
5325 | if (CallConv == CallingConv::Fast) | |||
5326 | ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC); | |||
5327 | else if (CallConv == CallingConv::GHC) | |||
5328 | ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC); | |||
5329 | else | |||
5330 | analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI); | |||
5331 | ||||
5332 | // Check if it's really possible to do a tail call. | |||
5333 | if (IsTailCall) | |||
5334 | IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs); | |||
5335 | ||||
5336 | if (IsTailCall) | |||
5337 | ++NumTailCalls; | |||
5338 | else if (CLI.CB && CLI.CB->isMustTailCall()) | |||
5339 | report_fatal_error("failed to perform tail call elimination on a call " | |||
5340 | "site marked musttail"); | |||
5341 | ||||
5342 | // Get a count of how many bytes are to be pushed on the stack. | |||
5343 | unsigned NumBytes = ArgCCInfo.getNextStackOffset(); | |||
5344 | ||||
5345 | // Create local copies for byval args | |||
5346 | SmallVector<SDValue, 8> ByValArgs; | |||
5347 | for (unsigned i = 0, e = Outs.size(); i != e; ++i) { | |||
5348 | ISD::ArgFlagsTy Flags = Outs[i].Flags; | |||
5349 | if (!Flags.isByVal()) | |||
5350 | continue; | |||
5351 | ||||
5352 | SDValue Arg = OutVals[i]; | |||
5353 | unsigned Size = Flags.getByValSize(); | |||
5354 | Align Alignment = Flags.getNonZeroByValAlign(); | |||
5355 | ||||
5356 | int FI = | |||
5357 | MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false); | |||
5358 | SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); | |||
5359 | SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT); | |||
5360 | ||||
5361 | Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment, | |||
5362 | /*IsVolatile=*/false, | |||
5363 | /*AlwaysInline=*/false, IsTailCall, | |||
5364 | MachinePointerInfo(), MachinePointerInfo()); | |||
5365 | ByValArgs.push_back(FIPtr); | |||
5366 | } | |||
5367 | ||||
5368 | if (!IsTailCall) | |||
5369 | Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); | |||
5370 | ||||
5371 | // Copy argument values to their designated locations. | |||
5372 | SmallVector<std::pair<Register, SDValue>, 8> RegsToPass; | |||
5373 | SmallVector<SDValue, 8> MemOpChains; | |||
5374 | SDValue StackPtr; | |||
5375 | for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) { | |||
5376 | CCValAssign &VA = ArgLocs[i]; | |||
5377 | SDValue ArgValue = OutVals[i]; | |||
5378 | ISD::ArgFlagsTy Flags = Outs[i].Flags; | |||
5379 | ||||
5380 | // Handle passing f64 on RV32D with a soft float ABI as a special case. | |||
5381 | bool IsF64OnRV32DSoftABI = | |||
5382 | VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64; | |||
5383 | if (IsF64OnRV32DSoftABI && VA.isRegLoc()) { | |||
5384 | SDValue SplitF64 = DAG.getNode( | |||
5385 | RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue); | |||
5386 | SDValue Lo = SplitF64.getValue(0); | |||
5387 | SDValue Hi = SplitF64.getValue(1); | |||
5388 | ||||
5389 | Register RegLo = VA.getLocReg(); | |||
5390 | RegsToPass.push_back(std::make_pair(RegLo, Lo)); | |||
5391 | ||||
5392 | if (RegLo == RISCV::X17) { | |||
5393 | // Second half of f64 is passed on the stack. | |||
5394 | // Work out the address of the stack slot. | |||
5395 | if (!StackPtr.getNode()) | |||
5396 | StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); | |||
5397 | // Emit the store. | |||
5398 | MemOpChains.push_back( | |||
5399 | DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo())); | |||
5400 | } else { | |||
5401 | // Second half of f64 is passed in another GPR. | |||
5402 | assert(RegLo < RISCV::X31 && "Invalid register pair")((RegLo < RISCV::X31 && "Invalid register pair") ? static_cast<void> (0) : __assert_fail ("RegLo < RISCV::X31 && \"Invalid register pair\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5402, __PRETTY_FUNCTION__)); | |||
5403 | Register RegHigh = RegLo + 1; | |||
5404 | RegsToPass.push_back(std::make_pair(RegHigh, Hi)); | |||
5405 | } | |||
5406 | continue; | |||
5407 | } | |||
5408 | ||||
5409 | // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way | |||
5410 | // as any other MemLoc. | |||
5411 | ||||
5412 | // Promote the value if needed. | |||
5413 | // For now, only handle fully promoted and indirect arguments. | |||
5414 | if (VA.getLocInfo() == CCValAssign::Indirect) { | |||
5415 | // Store the argument in a stack slot and pass its address. | |||
5416 | SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT); | |||
5417 | int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); | |||
5418 | MemOpChains.push_back( | |||
5419 | DAG.getStore(Chain, DL, ArgValue, SpillSlot, | |||
5420 | MachinePointerInfo::getFixedStack(MF, FI))); | |||
5421 | // If the original argument was split (e.g. i128), we need | |||
5422 | // to store all parts of it here (and pass just one address). | |||
5423 | unsigned ArgIndex = Outs[i].OrigArgIndex; | |||
5424 | assert(Outs[i].PartOffset == 0)((Outs[i].PartOffset == 0) ? static_cast<void> (0) : __assert_fail ("Outs[i].PartOffset == 0", "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5424, __PRETTY_FUNCTION__)); | |||
5425 | while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) { | |||
5426 | SDValue PartValue = OutVals[i + 1]; | |||
5427 | unsigned PartOffset = Outs[i + 1].PartOffset; | |||
5428 | SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, | |||
5429 | DAG.getIntPtrConstant(PartOffset, DL)); | |||
5430 | MemOpChains.push_back( | |||
5431 | DAG.getStore(Chain, DL, PartValue, Address, | |||
5432 | MachinePointerInfo::getFixedStack(MF, FI))); | |||
5433 | ++i; | |||
5434 | } | |||
5435 | ArgValue = SpillSlot; | |||
5436 | } else { | |||
5437 | ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL); | |||
5438 | } | |||
5439 | ||||
5440 | // Use local copy if it is a byval arg. | |||
5441 | if (Flags.isByVal()) | |||
5442 | ArgValue = ByValArgs[j++]; | |||
5443 | ||||
5444 | if (VA.isRegLoc()) { | |||
5445 | // Queue up the argument copies and emit them at the end. | |||
5446 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); | |||
5447 | } else { | |||
5448 | assert(VA.isMemLoc() && "Argument not register or memory")((VA.isMemLoc() && "Argument not register or memory") ? static_cast<void> (0) : __assert_fail ("VA.isMemLoc() && \"Argument not register or memory\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5448, __PRETTY_FUNCTION__)); | |||
5449 | assert(!IsTailCall && "Tail call not allowed if stack is used "((!IsTailCall && "Tail call not allowed if stack is used " "for passing parameters") ? static_cast<void> (0) : __assert_fail ("!IsTailCall && \"Tail call not allowed if stack is used \" \"for passing parameters\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5450, __PRETTY_FUNCTION__)) | |||
5450 | "for passing parameters")((!IsTailCall && "Tail call not allowed if stack is used " "for passing parameters") ? static_cast<void> (0) : __assert_fail ("!IsTailCall && \"Tail call not allowed if stack is used \" \"for passing parameters\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5450, __PRETTY_FUNCTION__)); | |||
5451 | ||||
5452 | // Work out the address of the stack slot. | |||
5453 | if (!StackPtr.getNode()) | |||
5454 | StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); | |||
5455 | SDValue Address = | |||
5456 | DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, | |||
5457 | DAG.getIntPtrConstant(VA.getLocMemOffset(), DL)); | |||
5458 | ||||
5459 | // Emit the store. | |||
5460 | MemOpChains.push_back( | |||
5461 | DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); | |||
5462 | } | |||
5463 | } | |||
5464 | ||||
5465 | // Join the stores, which are independent of one another. | |||
5466 | if (!MemOpChains.empty()) | |||
5467 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); | |||
5468 | ||||
5469 | SDValue Glue; | |||
5470 | ||||
5471 | // Build a sequence of copy-to-reg nodes, chained and glued together. | |||
5472 | for (auto &Reg : RegsToPass) { | |||
5473 | Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue); | |||
5474 | Glue = Chain.getValue(1); | |||
5475 | } | |||
5476 | ||||
5477 | // Validate that none of the argument registers have been marked as | |||
5478 | // reserved, if so report an error. Do the same for the return address if this | |||
5479 | // is not a tailcall. | |||
5480 | validateCCReservedRegs(RegsToPass, MF); | |||
5481 | if (!IsTailCall && | |||
5482 | MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1)) | |||
5483 | MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ | |||
5484 | MF.getFunction(), | |||
5485 | "Return address register required, but has been reserved."}); | |||
5486 | ||||
5487 | // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a | |||
5488 | // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't | |||
5489 | // split it and then direct call can be matched by PseudoCALL. | |||
5490 | if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) { | |||
5491 | const GlobalValue *GV = S->getGlobal(); | |||
5492 | ||||
5493 | unsigned OpFlags = RISCVII::MO_CALL; | |||
5494 | if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) | |||
5495 | OpFlags = RISCVII::MO_PLT; | |||
5496 | ||||
5497 | Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); | |||
5498 | } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { | |||
5499 | unsigned OpFlags = RISCVII::MO_CALL; | |||
5500 | ||||
5501 | if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(), | |||
5502 | nullptr)) | |||
5503 | OpFlags = RISCVII::MO_PLT; | |||
5504 | ||||
5505 | Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags); | |||
5506 | } | |||
5507 | ||||
5508 | // The first call operand is the chain and the second is the target address. | |||
5509 | SmallVector<SDValue, 8> Ops; | |||
5510 | Ops.push_back(Chain); | |||
5511 | Ops.push_back(Callee); | |||
5512 | ||||
5513 | // Add argument registers to the end of the list so that they are | |||
5514 | // known live into the call. | |||
5515 | for (auto &Reg : RegsToPass) | |||
5516 | Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); | |||
5517 | ||||
5518 | if (!IsTailCall) { | |||
5519 | // Add a register mask operand representing the call-preserved registers. | |||
5520 | const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
5521 | const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); | |||
5522 | assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention" ) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5522, __PRETTY_FUNCTION__)); | |||
5523 | Ops.push_back(DAG.getRegisterMask(Mask)); | |||
5524 | } | |||
5525 | ||||
5526 | // Glue the call to the argument copies, if any. | |||
5527 | if (Glue.getNode()) | |||
5528 | Ops.push_back(Glue); | |||
5529 | ||||
5530 | // Emit the call. | |||
5531 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
5532 | ||||
5533 | if (IsTailCall) { | |||
5534 | MF.getFrameInfo().setHasTailCall(); | |||
5535 | return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops); | |||
5536 | } | |||
5537 | ||||
5538 | Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops); | |||
5539 | DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); | |||
5540 | Glue = Chain.getValue(1); | |||
5541 | ||||
5542 | // Mark the end of the call, which is glued to the call itself. | |||
5543 | Chain = DAG.getCALLSEQ_END(Chain, | |||
5544 | DAG.getConstant(NumBytes, DL, PtrVT, true), | |||
5545 | DAG.getConstant(0, DL, PtrVT, true), | |||
5546 | Glue, DL); | |||
5547 | Glue = Chain.getValue(1); | |||
5548 | ||||
5549 | // Assign locations to each value returned by this call. | |||
5550 | SmallVector<CCValAssign, 16> RVLocs; | |||
5551 | CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); | |||
5552 | analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true); | |||
5553 | ||||
5554 | // Copy all of the result registers out of their specified physreg. | |||
5555 | for (auto &VA : RVLocs) { | |||
5556 | // Copy the value out | |||
5557 | SDValue RetValue = | |||
5558 | DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue); | |||
5559 | // Glue the RetValue to the end of the call sequence | |||
5560 | Chain = RetValue.getValue(1); | |||
5561 | Glue = RetValue.getValue(2); | |||
5562 | ||||
5563 | if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { | |||
5564 | assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment")((VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment" ) ? static_cast<void> (0) : __assert_fail ("VA.getLocReg() == ArgGPRs[0] && \"Unexpected reg assignment\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5564, __PRETTY_FUNCTION__)); | |||
5565 | SDValue RetValue2 = | |||
5566 | DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue); | |||
5567 | Chain = RetValue2.getValue(1); | |||
5568 | Glue = RetValue2.getValue(2); | |||
5569 | RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue, | |||
5570 | RetValue2); | |||
5571 | } | |||
5572 | ||||
5573 | RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL); | |||
5574 | ||||
5575 | InVals.push_back(RetValue); | |||
5576 | } | |||
5577 | ||||
5578 | return Chain; | |||
5579 | } | |||
5580 | ||||
5581 | bool RISCVTargetLowering::CanLowerReturn( | |||
5582 | CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, | |||
5583 | const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { | |||
5584 | SmallVector<CCValAssign, 16> RVLocs; | |||
5585 | CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); | |||
5586 | ||||
5587 | Optional<unsigned> FirstMaskArgument; | |||
5588 | if (Subtarget.hasStdExtV()) | |||
5589 | FirstMaskArgument = preAssignMask(Outs); | |||
5590 | ||||
5591 | for (unsigned i = 0, e = Outs.size(); i != e; ++i) { | |||
5592 | MVT VT = Outs[i].VT; | |||
5593 | ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; | |||
5594 | RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); | |||
5595 | if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, | |||
5596 | ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr, | |||
5597 | *this, FirstMaskArgument)) | |||
5598 | return false; | |||
5599 | } | |||
5600 | return true; | |||
5601 | } | |||
5602 | ||||
5603 | SDValue | |||
5604 | RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, | |||
5605 | bool IsVarArg, | |||
5606 | const SmallVectorImpl<ISD::OutputArg> &Outs, | |||
5607 | const SmallVectorImpl<SDValue> &OutVals, | |||
5608 | const SDLoc &DL, SelectionDAG &DAG) const { | |||
5609 | const MachineFunction &MF = DAG.getMachineFunction(); | |||
5610 | const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); | |||
5611 | ||||
5612 | // Stores the assignment of the return value to a location. | |||
5613 | SmallVector<CCValAssign, 16> RVLocs; | |||
5614 | ||||
5615 | // Info about the registers and stack slot. | |||
5616 | CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, | |||
5617 | *DAG.getContext()); | |||
5618 | ||||
5619 | analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true, | |||
5620 | nullptr); | |||
5621 | ||||
5622 | if (CallConv == CallingConv::GHC && !RVLocs.empty()) | |||
5623 | report_fatal_error("GHC functions return void only"); | |||
5624 | ||||
5625 | SDValue Glue; | |||
5626 | SmallVector<SDValue, 4> RetOps(1, Chain); | |||
5627 | ||||
5628 | // Copy the result values into the output registers. | |||
5629 | for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) { | |||
5630 | SDValue Val = OutVals[i]; | |||
5631 | CCValAssign &VA = RVLocs[i]; | |||
5632 | assert(VA.isRegLoc() && "Can only return in registers!")((VA.isRegLoc() && "Can only return in registers!") ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5632, __PRETTY_FUNCTION__)); | |||
5633 | ||||
5634 | if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { | |||
5635 | // Handle returning f64 on RV32D with a soft float ABI. | |||
5636 | assert(VA.isRegLoc() && "Expected return via registers")((VA.isRegLoc() && "Expected return via registers") ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Expected return via registers\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5636, __PRETTY_FUNCTION__)); | |||
5637 | SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL, | |||
5638 | DAG.getVTList(MVT::i32, MVT::i32), Val); | |||
5639 | SDValue Lo = SplitF64.getValue(0); | |||
5640 | SDValue Hi = SplitF64.getValue(1); | |||
5641 | Register RegLo = VA.getLocReg(); | |||
5642 | assert(RegLo < RISCV::X31 && "Invalid register pair")((RegLo < RISCV::X31 && "Invalid register pair") ? static_cast<void> (0) : __assert_fail ("RegLo < RISCV::X31 && \"Invalid register pair\"" , "/build/llvm-toolchain-snapshot-13~++20210308111132+66e3a4abe99c/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5642, __PRETTY_FUNCTION__)); | |||
5643 | Register RegHi = RegLo + 1; | |||
5644 | ||||
5645 | if (STI.isRegisterReservedByUser(RegLo) || | |||
5646 | STI.isRegisterReservedByUser(RegHi)) | |||
5647 | MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ | |||
5648 | MF.getFunction(), | |||
5649 | "Return value register required, but has been reserved."}); | |||
5650 | ||||
5651 | Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue); | |||
5652 | Glue = Chain.getValue(1); | |||
5653 | RetOps.push_back(DAG.getRegister(RegLo, MVT::i32)); | |||
5654 | Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue); | |||
5655 | Glue = Chain.getValue(1); | |||
5656 | RetOps.push_back(DAG.getRegister(RegHi, MVT::i32)); | |||
5657 | } else { | |||
5658 | // Handle a 'normal' return. | |||
5659 | Val = convertValVTToLocVT(DAG, Val, VA, DL); | |||
5660 | Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue); | |||
5661 | ||||
5662 | if (STI.isRegisterReservedByUser(VA.getLocReg())) | |||
5663 | MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ | |||
5664 | MF.getFunction(), | |||
5665 | "Return value register required, but has been reserved."}); | |||
5666 | ||||
5667 | // Guarantee that all emitted copies are stuck together. | |||
5668 | Glue = Chain.getValue(1); | |||
5669 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); | |||
5670 | } | |||
5671 | } | |||
5672 | ||||
5673 | RetOps[0] = Chain; // Update chain. | |||
5674 | ||||
5675 | // Add the glue node if we have it. | |||
5676 | if (Glue.getNode()) { | |||
5677 | RetOps.push_back(Glue); | |||
5678 | } | |||
5679 | ||||
5680 | // Interrupt service routines use different return instructions. | |||
5681 | const Function &Func = DAG.getMachineFunction().getFunction(); | |||
5682 | if (Func.hasFnAttribute("interrupt")) { | |||
5683 | if (!Func.getReturnType()->isVoidTy()) | |||
5684 | report_fatal_error( | |||
5685 | "Functions with the interrupt attribute must have void return type!"); | |||
5686 | ||||
5687 | MachineFunction &MF = DAG.getMachineFunction(); | |||
5688 | StringRef Kind = | |||
5689 | MF.getFunction().getFnAttribute("interrupt").getValueAsString(); | |||
5690 | ||||
5691 | unsigned RetOpc; | |||
5692 | if (Kind == "user") | |||
5693 | RetOpc = RISCVISD::URET_FLAG; | |||
5694 | else if (Kind == "supervisor") | |||
5695 | RetOpc = RISCVISD::SRET_FLAG; | |||
5696 | else | |||
5697 | RetOpc = RISCVISD::MRET_FLAG; | |||
5698 | ||||
5699 | return DAG.getNode(RetOpc, DL, MVT::Other, RetOps); | |||
5700 | } | |||
5701 | ||||
5702 | return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps); | |||
5703 | } | |||
5704 | ||||
5705 | void RISCVTargetLowering::validateCCReservedRegs( | |||
5706 | const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs, | |||
5707 | MachineFunction &MF) const { | |||
5708 | const Function &F = MF.getFunction(); | |||
5709 | const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); | |||
5710 | ||||
5711 | if (llvm::any_of(Regs, [&STI](auto Reg) { | |||
5712 | return STI.isRegisterReservedByUser(Reg.first); | |||
5713 | })) | |||
5714 | F.getContext().diagnose(DiagnosticInfoUnsupported{ | |||
5715 | F, "Argument register required, but has been reserved."}); | |||
5716 | } | |||
5717 | ||||
5718 | bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { | |||
5719 | return CI->isTailCall(); | |||
5720 | } | |||
5721 | ||||
5722 | const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { | |||
5723 | #define NODE_NAME_CASE(NODE) \ | |||
5724 | case RISCVISD::NODE: \ | |||
5725 | return "RISCVISD::" #NODE; | |||
5726 | // clang-format off | |||
5727 | switch ((RISCVISD::NodeType)Opcode) { | |||
5728 | case RISCVISD::FIRST_NUMBER: | |||
5729 | break; | |||
5730 | NODE_NAME_CASE(RET_FLAG) | |||
5731 | NODE_NAME_CASE(URET_FLAG) | |||
5732 | NODE_NAME_CASE(SRET_FLAG) | |||
5733 | NODE_NAME_CASE(MRET_FLAG) | |||
5734 | NODE_NAME_CASE(CALL) | |||
5735 | NODE_NAME_CASE(SELECT_CC) | |||
5736 | NODE_NAME_CASE(BuildPairF64) | |||
5737 | NODE_NAME_CASE(SplitF64) | |||
5738 | NODE_NAME_CASE(TAIL) | |||
5739 | NODE_NAME_CASE(SLLW) | |||
5740 | NODE_NAME_CASE(SRAW) | |||
5741 | NODE_NAME_CASE(SRLW) | |||
5742 | NODE_NAME_CASE(DIVW) | |||
5743 | NODE_NAME_CASE(DIVUW) | |||
5744 | NODE_NAME_CASE(REMUW) | |||
5745 | NODE_NAME_CASE(ROLW) | |||
5746 | NODE_NAME_CASE(RORW) | |||
5747 | NODE_NAME_CASE(FSLW) | |||
5748 | NODE_NAME_CASE(FSRW) | |||
5749 | NODE_NAME_CASE(FSL) | |||
5750 | NODE_NAME_CASE(FSR) | |||
5751 | NODE_NAME_CASE(FMV_H_X) | |||
5752 | NODE_NAME_CASE(FMV_X_ANYEXTH) | |||
5753 | NODE_NAME_CASE(FMV_W_X_RV64) | |||
5754 | NODE_NAME_CASE(FMV_X_ANYEXTW_RV64) | |||
5755 | NODE_NAME_CASE(READ_CYCLE_WIDE) | |||
5756 | NODE_NAME_CASE(GREVI) | |||
5757 | NODE_NAME_CASE(GREVIW) | |||
5758 | NODE_NAME_CASE(GORCI) | |||
5759 | NODE_NAME_CASE(GORCIW) | |||
5760 | NODE_NAME_CASE(SHFLI) | |||
5761 | NODE_NAME_CASE(VMV_V_X_VL) | |||
5762 | NODE_NAME_CASE(VFMV_V_F_VL) | |||
5763 | NODE_NAME_CASE(VMV_X_S) | |||
5764 | NODE_NAME_CASE(VMV_S_XF_VL) | |||
5765 | NODE_NAME_CASE(SPLAT_VECTOR_I64) | |||
5766 | NODE_NAME_CASE(READ_VLENB) | |||
5767 | NODE_NAME_CASE(TRUNCATE_VECTOR_VL) | |||
5768 | NODE_NAME_CASE(VLEFF) | |||
5769 | NODE_NAME_CASE(VLEFF_MASK) | |||
5770 | NODE_NAME_CASE(VSLIDEUP_VL) | |||
5771 | NODE_NAME_CASE(VSLIDEDOWN_VL) | |||
5772 | NODE_NAME_CASE(VID_VL) | |||
5773 | NODE_NAME_CASE(VFNCVT_ROD_VL) | |||
5774 | NODE_NAME_CASE(VECREDUCE_ADD) | |||
5775 | NODE_NAME_CASE(VECREDUCE_UMAX) | |||
5776 | NODE_NAME_CASE(VECREDUCE_SMAX) | |||
5777 | NODE_NAME_CASE(VECREDUCE_UMIN) | |||
5778 | NODE_NAME_CASE(VECREDUCE_SMIN) | |||
5779 | NODE_NAME_CASE(VECREDUCE_AND) | |||
5780 | NODE_NAME_CASE(VECREDUCE_OR) | |||
5781 | NODE_NAME_CASE(VECREDUCE_XOR) | |||
5782 | NODE_NAME_CASE(VECREDUCE_FADD) | |||
5783 | NODE_NAME_CASE(VECREDUCE_SEQ_FADD) | |||
5784 | NODE_NAME_CASE(ADD_VL) | |||
5785 | NODE_NAME_CASE(AND_VL) | |||
5786 | NODE_NAME_CASE(MUL_VL) | |||
5787 | NODE_NAME_CASE(OR_VL) | |||
5788 | NODE_NAME_CASE(SDIV_VL) | |||
5789 | NODE_NAME_CASE(SHL_VL) | |||
5790 | NODE_NAME_CASE(SREM_VL) | |||
5791 | NODE_NAME_CASE(SRA_VL) | |||
5792 | NODE_NAME_CASE(SRL_VL) | |||
5793 | NODE_NAME_CASE(SUB_VL) | |||
5794 | NODE_NAME_CASE(UDIV_VL) | |||
5795 | NODE_NAME_CASE(UREM_VL) | |||
5796 | NODE_NAME_CASE(XOR_VL) | |||
5797 | NODE_NAME_CASE(FADD_VL) | |||
5798 | NODE_NAME_CASE(FSUB_VL) | |||
5799 | NODE_NAME_CASE(FMUL_VL) | |||
5800 | NODE_NAME_CASE(FDIV_VL) | |||
5801 | NODE_NAME_CASE(FNEG_VL) | |||
5802 | NODE_NAME_CASE(FABS_VL) | |||
5803 | NODE_NAME_CASE(FSQRT_VL) | |||
5804 | NODE_NAME_CASE(FMA_VL) | |||
5805 | NODE_NAME_CASE(SMIN_VL) | |||
5806 | NODE_NAME_CASE(SMAX_VL) | |||
5807 | NODE_NAME_CASE(UMIN_VL) | |||
5808 | NODE_NAME_CASE(UMAX_VL) | |||
5809 | NODE_NAME_CASE(MULHS_VL) | |||
5810 | NODE_NAME_CASE(MULHU_VL) | |||
5811 | NODE_NAME_CASE(FP_TO_SINT_VL) | |||
5812 | NODE_NAME_CASE(FP_TO_UINT_VL) | |||
5813 | NODE_NAME_CASE(SINT_TO_FP_VL) | |||
5814 | NODE_NAME_CASE(UINT_TO_FP_VL) | |||
5815 | NODE_NAME_CASE(FP_EXTEND_VL) | |||
5816 | NODE_NAME_CASE(FP_ROUND_VL) | |||
5817 | NODE_NAME_CASE(SETCC_VL) | |||
5818 | NODE_NAME_CASE(VSELECT_VL) | |||
5819 | NODE_NAME_CASE(VMAND_VL) | |||
5820 | NODE_NAME_CASE(VMOR_VL) | |||
5821 | NODE_NAME_CASE(VMXOR_VL) | |||
5822 | NODE_NAME_CASE(VMCLR_VL) | |||
5823 | NODE_NAME_CASE(VMSET_VL) | |||
5824 | NODE_NAME_CASE(VRGATHER_VX_VL) | |||
5825 | NODE_NAME_CASE(VSEXT_VL) | |||
5826 | NODE_NAME_CASE(VZEXT_VL) | |||
5827 | NODE_NAME_CASE(VLE_VL) | |||
5828 | NODE_NAME_CASE(VSE_VL) | |||
5829 | } | |||
5830 | // clang-format on | |||
5831 | return nullptr; | |||
5832 | #undef NODE_NAME_CASE | |||
5833 | } | |||
5834 | ||||
5835 | /// getConstraintType - Given a constraint letter, return the type of | |||
5836 | /// constraint it is for this target. | |||
5837 | RISCVTargetLowering::ConstraintType | |||
5838 | RISCVTargetLowering::getConstraintType(StringRef Constraint) const { | |||
5839 | if (Constraint.size() == 1) { | |||
5840 | switch (Constraint[0]) { | |||
5841 | default: | |||
5842 | break; | |||
5843 | case 'f': | |||
5844 | return C_RegisterClass; | |||
5845 | case 'I': | |||
5846 | case 'J': | |||
5847 | case 'K': | |||
5848 | return C_Immediate; | |||
5849 | case 'A': | |||
585 |