File: | llvm/include/llvm/CodeGen/SelectionDAGNodes.h |
Warning: | line 1114, column 10 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file defines the interfaces that RISCV uses to lower LLVM code into a | |||
10 | // selection DAG. | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #include "RISCVISelLowering.h" | |||
15 | #include "MCTargetDesc/RISCVMatInt.h" | |||
16 | #include "RISCV.h" | |||
17 | #include "RISCVMachineFunctionInfo.h" | |||
18 | #include "RISCVRegisterInfo.h" | |||
19 | #include "RISCVSubtarget.h" | |||
20 | #include "RISCVTargetMachine.h" | |||
21 | #include "llvm/ADT/SmallSet.h" | |||
22 | #include "llvm/ADT/Statistic.h" | |||
23 | #include "llvm/CodeGen/MachineFrameInfo.h" | |||
24 | #include "llvm/CodeGen/MachineFunction.h" | |||
25 | #include "llvm/CodeGen/MachineInstrBuilder.h" | |||
26 | #include "llvm/CodeGen/MachineRegisterInfo.h" | |||
27 | #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" | |||
28 | #include "llvm/CodeGen/ValueTypes.h" | |||
29 | #include "llvm/IR/DiagnosticInfo.h" | |||
30 | #include "llvm/IR/DiagnosticPrinter.h" | |||
31 | #include "llvm/IR/IntrinsicsRISCV.h" | |||
32 | #include "llvm/IR/IRBuilder.h" | |||
33 | #include "llvm/Support/Debug.h" | |||
34 | #include "llvm/Support/ErrorHandling.h" | |||
35 | #include "llvm/Support/KnownBits.h" | |||
36 | #include "llvm/Support/MathExtras.h" | |||
37 | #include "llvm/Support/raw_ostream.h" | |||
38 | ||||
39 | using namespace llvm; | |||
40 | ||||
41 | #define DEBUG_TYPE"riscv-lower" "riscv-lower" | |||
42 | ||||
43 | STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"riscv-lower", "NumTailCalls" , "Number of tail calls"}; | |||
44 | ||||
45 | RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, | |||
46 | const RISCVSubtarget &STI) | |||
47 | : TargetLowering(TM), Subtarget(STI) { | |||
48 | ||||
49 | if (Subtarget.isRV32E()) | |||
50 | report_fatal_error("Codegen not yet implemented for RV32E"); | |||
51 | ||||
52 | RISCVABI::ABI ABI = Subtarget.getTargetABI(); | |||
53 | assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI")(static_cast <bool> (ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI") ? void (0) : __assert_fail ("ABI != RISCVABI::ABI_Unknown && \"Improperly initialised target ABI\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 53, __extension__ __PRETTY_FUNCTION__)); | |||
54 | ||||
55 | if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) && | |||
56 | !Subtarget.hasStdExtF()) { | |||
57 | errs() << "Hard-float 'f' ABI can't be used for a target that " | |||
58 | "doesn't support the F instruction set extension (ignoring " | |||
59 | "target-abi)\n"; | |||
60 | ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; | |||
61 | } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) && | |||
62 | !Subtarget.hasStdExtD()) { | |||
63 | errs() << "Hard-float 'd' ABI can't be used for a target that " | |||
64 | "doesn't support the D instruction set extension (ignoring " | |||
65 | "target-abi)\n"; | |||
66 | ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; | |||
67 | } | |||
68 | ||||
69 | switch (ABI) { | |||
70 | default: | |||
71 | report_fatal_error("Don't know how to lower this ABI"); | |||
72 | case RISCVABI::ABI_ILP32: | |||
73 | case RISCVABI::ABI_ILP32F: | |||
74 | case RISCVABI::ABI_ILP32D: | |||
75 | case RISCVABI::ABI_LP64: | |||
76 | case RISCVABI::ABI_LP64F: | |||
77 | case RISCVABI::ABI_LP64D: | |||
78 | break; | |||
79 | } | |||
80 | ||||
81 | MVT XLenVT = Subtarget.getXLenVT(); | |||
82 | ||||
83 | // Set up the register classes. | |||
84 | addRegisterClass(XLenVT, &RISCV::GPRRegClass); | |||
85 | ||||
86 | if (Subtarget.hasStdExtZfh()) | |||
87 | addRegisterClass(MVT::f16, &RISCV::FPR16RegClass); | |||
88 | if (Subtarget.hasStdExtF()) | |||
89 | addRegisterClass(MVT::f32, &RISCV::FPR32RegClass); | |||
90 | if (Subtarget.hasStdExtD()) | |||
91 | addRegisterClass(MVT::f64, &RISCV::FPR64RegClass); | |||
92 | ||||
93 | static const MVT::SimpleValueType BoolVecVTs[] = { | |||
94 | MVT::nxv1i1, MVT::nxv2i1, MVT::nxv4i1, MVT::nxv8i1, | |||
95 | MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1}; | |||
96 | static const MVT::SimpleValueType IntVecVTs[] = { | |||
97 | MVT::nxv1i8, MVT::nxv2i8, MVT::nxv4i8, MVT::nxv8i8, MVT::nxv16i8, | |||
98 | MVT::nxv32i8, MVT::nxv64i8, MVT::nxv1i16, MVT::nxv2i16, MVT::nxv4i16, | |||
99 | MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32, | |||
100 | MVT::nxv4i32, MVT::nxv8i32, MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64, | |||
101 | MVT::nxv4i64, MVT::nxv8i64}; | |||
102 | static const MVT::SimpleValueType F16VecVTs[] = { | |||
103 | MVT::nxv1f16, MVT::nxv2f16, MVT::nxv4f16, | |||
104 | MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16}; | |||
105 | static const MVT::SimpleValueType F32VecVTs[] = { | |||
106 | MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32}; | |||
107 | static const MVT::SimpleValueType F64VecVTs[] = { | |||
108 | MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64}; | |||
109 | ||||
110 | if (Subtarget.hasStdExtV()) { | |||
111 | auto addRegClassForRVV = [this](MVT VT) { | |||
112 | unsigned Size = VT.getSizeInBits().getKnownMinValue(); | |||
113 | assert(Size <= 512 && isPowerOf2_32(Size))(static_cast <bool> (Size <= 512 && isPowerOf2_32 (Size)) ? void (0) : __assert_fail ("Size <= 512 && isPowerOf2_32(Size)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 113, __extension__ __PRETTY_FUNCTION__)); | |||
114 | const TargetRegisterClass *RC; | |||
115 | if (Size <= 64) | |||
116 | RC = &RISCV::VRRegClass; | |||
117 | else if (Size == 128) | |||
118 | RC = &RISCV::VRM2RegClass; | |||
119 | else if (Size == 256) | |||
120 | RC = &RISCV::VRM4RegClass; | |||
121 | else | |||
122 | RC = &RISCV::VRM8RegClass; | |||
123 | ||||
124 | addRegisterClass(VT, RC); | |||
125 | }; | |||
126 | ||||
127 | for (MVT VT : BoolVecVTs) | |||
128 | addRegClassForRVV(VT); | |||
129 | for (MVT VT : IntVecVTs) | |||
130 | addRegClassForRVV(VT); | |||
131 | ||||
132 | if (Subtarget.hasStdExtZfh()) | |||
133 | for (MVT VT : F16VecVTs) | |||
134 | addRegClassForRVV(VT); | |||
135 | ||||
136 | if (Subtarget.hasStdExtF()) | |||
137 | for (MVT VT : F32VecVTs) | |||
138 | addRegClassForRVV(VT); | |||
139 | ||||
140 | if (Subtarget.hasStdExtD()) | |||
141 | for (MVT VT : F64VecVTs) | |||
142 | addRegClassForRVV(VT); | |||
143 | ||||
144 | if (Subtarget.useRVVForFixedLengthVectors()) { | |||
145 | auto addRegClassForFixedVectors = [this](MVT VT) { | |||
146 | MVT ContainerVT = getContainerForFixedLengthVector(VT); | |||
147 | unsigned RCID = getRegClassIDForVecVT(ContainerVT); | |||
148 | const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo(); | |||
149 | addRegisterClass(VT, TRI.getRegClass(RCID)); | |||
150 | }; | |||
151 | for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) | |||
152 | if (useRVVForFixedLengthVectorVT(VT)) | |||
153 | addRegClassForFixedVectors(VT); | |||
154 | ||||
155 | for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) | |||
156 | if (useRVVForFixedLengthVectorVT(VT)) | |||
157 | addRegClassForFixedVectors(VT); | |||
158 | } | |||
159 | } | |||
160 | ||||
161 | // Compute derived properties from the register classes. | |||
162 | computeRegisterProperties(STI.getRegisterInfo()); | |||
163 | ||||
164 | setStackPointerRegisterToSaveRestore(RISCV::X2); | |||
165 | ||||
166 | for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) | |||
167 | setLoadExtAction(N, XLenVT, MVT::i1, Promote); | |||
168 | ||||
169 | // TODO: add all necessary setOperationAction calls. | |||
170 | setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand); | |||
171 | ||||
172 | setOperationAction(ISD::BR_JT, MVT::Other, Expand); | |||
173 | setOperationAction(ISD::BR_CC, XLenVT, Expand); | |||
174 | setOperationAction(ISD::BRCOND, MVT::Other, Custom); | |||
175 | setOperationAction(ISD::SELECT_CC, XLenVT, Expand); | |||
176 | ||||
177 | setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); | |||
178 | setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); | |||
179 | ||||
180 | setOperationAction(ISD::VASTART, MVT::Other, Custom); | |||
181 | setOperationAction(ISD::VAARG, MVT::Other, Expand); | |||
182 | setOperationAction(ISD::VACOPY, MVT::Other, Expand); | |||
183 | setOperationAction(ISD::VAEND, MVT::Other, Expand); | |||
184 | ||||
185 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); | |||
186 | if (!Subtarget.hasStdExtZbb()) { | |||
187 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); | |||
188 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); | |||
189 | } | |||
190 | ||||
191 | if (Subtarget.is64Bit()) { | |||
192 | setOperationAction(ISD::ADD, MVT::i32, Custom); | |||
193 | setOperationAction(ISD::SUB, MVT::i32, Custom); | |||
194 | setOperationAction(ISD::SHL, MVT::i32, Custom); | |||
195 | setOperationAction(ISD::SRA, MVT::i32, Custom); | |||
196 | setOperationAction(ISD::SRL, MVT::i32, Custom); | |||
197 | ||||
198 | setOperationAction(ISD::UADDO, MVT::i32, Custom); | |||
199 | setOperationAction(ISD::USUBO, MVT::i32, Custom); | |||
200 | setOperationAction(ISD::UADDSAT, MVT::i32, Custom); | |||
201 | setOperationAction(ISD::USUBSAT, MVT::i32, Custom); | |||
202 | } | |||
203 | ||||
204 | if (!Subtarget.hasStdExtM()) { | |||
205 | setOperationAction(ISD::MUL, XLenVT, Expand); | |||
206 | setOperationAction(ISD::MULHS, XLenVT, Expand); | |||
207 | setOperationAction(ISD::MULHU, XLenVT, Expand); | |||
208 | setOperationAction(ISD::SDIV, XLenVT, Expand); | |||
209 | setOperationAction(ISD::UDIV, XLenVT, Expand); | |||
210 | setOperationAction(ISD::SREM, XLenVT, Expand); | |||
211 | setOperationAction(ISD::UREM, XLenVT, Expand); | |||
212 | } else { | |||
213 | if (Subtarget.is64Bit()) { | |||
214 | setOperationAction(ISD::MUL, MVT::i32, Custom); | |||
215 | setOperationAction(ISD::MUL, MVT::i128, Custom); | |||
216 | ||||
217 | setOperationAction(ISD::SDIV, MVT::i8, Custom); | |||
218 | setOperationAction(ISD::UDIV, MVT::i8, Custom); | |||
219 | setOperationAction(ISD::UREM, MVT::i8, Custom); | |||
220 | setOperationAction(ISD::SDIV, MVT::i16, Custom); | |||
221 | setOperationAction(ISD::UDIV, MVT::i16, Custom); | |||
222 | setOperationAction(ISD::UREM, MVT::i16, Custom); | |||
223 | setOperationAction(ISD::SDIV, MVT::i32, Custom); | |||
224 | setOperationAction(ISD::UDIV, MVT::i32, Custom); | |||
225 | setOperationAction(ISD::UREM, MVT::i32, Custom); | |||
226 | } else { | |||
227 | setOperationAction(ISD::MUL, MVT::i64, Custom); | |||
228 | } | |||
229 | } | |||
230 | ||||
231 | setOperationAction(ISD::SDIVREM, XLenVT, Expand); | |||
232 | setOperationAction(ISD::UDIVREM, XLenVT, Expand); | |||
233 | setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand); | |||
234 | setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand); | |||
235 | ||||
236 | setOperationAction(ISD::SHL_PARTS, XLenVT, Custom); | |||
237 | setOperationAction(ISD::SRL_PARTS, XLenVT, Custom); | |||
238 | setOperationAction(ISD::SRA_PARTS, XLenVT, Custom); | |||
239 | ||||
240 | if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) { | |||
241 | if (Subtarget.is64Bit()) { | |||
242 | setOperationAction(ISD::ROTL, MVT::i32, Custom); | |||
243 | setOperationAction(ISD::ROTR, MVT::i32, Custom); | |||
244 | } | |||
245 | } else { | |||
246 | setOperationAction(ISD::ROTL, XLenVT, Expand); | |||
247 | setOperationAction(ISD::ROTR, XLenVT, Expand); | |||
248 | } | |||
249 | ||||
250 | if (Subtarget.hasStdExtZbp()) { | |||
251 | // Custom lower bswap/bitreverse so we can convert them to GREVI to enable | |||
252 | // more combining. | |||
253 | setOperationAction(ISD::BITREVERSE, XLenVT, Custom); | |||
254 | setOperationAction(ISD::BSWAP, XLenVT, Custom); | |||
255 | setOperationAction(ISD::BITREVERSE, MVT::i8, Custom); | |||
256 | // BSWAP i8 doesn't exist. | |||
257 | setOperationAction(ISD::BITREVERSE, MVT::i16, Custom); | |||
258 | setOperationAction(ISD::BSWAP, MVT::i16, Custom); | |||
259 | ||||
260 | if (Subtarget.is64Bit()) { | |||
261 | setOperationAction(ISD::BITREVERSE, MVT::i32, Custom); | |||
262 | setOperationAction(ISD::BSWAP, MVT::i32, Custom); | |||
263 | } | |||
264 | } else { | |||
265 | // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll | |||
266 | // pattern match it directly in isel. | |||
267 | setOperationAction(ISD::BSWAP, XLenVT, | |||
268 | Subtarget.hasStdExtZbb() ? Legal : Expand); | |||
269 | } | |||
270 | ||||
271 | if (Subtarget.hasStdExtZbb()) { | |||
272 | setOperationAction(ISD::SMIN, XLenVT, Legal); | |||
273 | setOperationAction(ISD::SMAX, XLenVT, Legal); | |||
274 | setOperationAction(ISD::UMIN, XLenVT, Legal); | |||
275 | setOperationAction(ISD::UMAX, XLenVT, Legal); | |||
276 | ||||
277 | if (Subtarget.is64Bit()) { | |||
278 | setOperationAction(ISD::CTTZ, MVT::i32, Custom); | |||
279 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom); | |||
280 | setOperationAction(ISD::CTLZ, MVT::i32, Custom); | |||
281 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); | |||
282 | } | |||
283 | } else { | |||
284 | setOperationAction(ISD::CTTZ, XLenVT, Expand); | |||
285 | setOperationAction(ISD::CTLZ, XLenVT, Expand); | |||
286 | setOperationAction(ISD::CTPOP, XLenVT, Expand); | |||
287 | } | |||
288 | ||||
289 | if (Subtarget.hasStdExtZbt()) { | |||
290 | setOperationAction(ISD::FSHL, XLenVT, Custom); | |||
291 | setOperationAction(ISD::FSHR, XLenVT, Custom); | |||
292 | setOperationAction(ISD::SELECT, XLenVT, Legal); | |||
293 | ||||
294 | if (Subtarget.is64Bit()) { | |||
295 | setOperationAction(ISD::FSHL, MVT::i32, Custom); | |||
296 | setOperationAction(ISD::FSHR, MVT::i32, Custom); | |||
297 | } | |||
298 | } else { | |||
299 | setOperationAction(ISD::SELECT, XLenVT, Custom); | |||
300 | } | |||
301 | ||||
302 | ISD::CondCode FPCCToExpand[] = { | |||
303 | ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, | |||
304 | ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT, | |||
305 | ISD::SETGE, ISD::SETNE, ISD::SETO, ISD::SETUO}; | |||
306 | ||||
307 | ISD::NodeType FPOpToExpand[] = { | |||
308 | ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP, | |||
309 | ISD::FP_TO_FP16}; | |||
310 | ||||
311 | if (Subtarget.hasStdExtZfh()) | |||
312 | setOperationAction(ISD::BITCAST, MVT::i16, Custom); | |||
313 | ||||
314 | if (Subtarget.hasStdExtZfh()) { | |||
315 | setOperationAction(ISD::FMINNUM, MVT::f16, Legal); | |||
316 | setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); | |||
317 | setOperationAction(ISD::LRINT, MVT::f16, Legal); | |||
318 | setOperationAction(ISD::LLRINT, MVT::f16, Legal); | |||
319 | setOperationAction(ISD::LROUND, MVT::f16, Legal); | |||
320 | setOperationAction(ISD::LLROUND, MVT::f16, Legal); | |||
321 | for (auto CC : FPCCToExpand) | |||
322 | setCondCodeAction(CC, MVT::f16, Expand); | |||
323 | setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); | |||
324 | setOperationAction(ISD::SELECT, MVT::f16, Custom); | |||
325 | setOperationAction(ISD::BR_CC, MVT::f16, Expand); | |||
326 | for (auto Op : FPOpToExpand) | |||
327 | setOperationAction(Op, MVT::f16, Expand); | |||
328 | } | |||
329 | ||||
330 | if (Subtarget.hasStdExtF()) { | |||
331 | setOperationAction(ISD::FMINNUM, MVT::f32, Legal); | |||
332 | setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); | |||
333 | setOperationAction(ISD::LRINT, MVT::f32, Legal); | |||
334 | setOperationAction(ISD::LLRINT, MVT::f32, Legal); | |||
335 | setOperationAction(ISD::LROUND, MVT::f32, Legal); | |||
336 | setOperationAction(ISD::LLROUND, MVT::f32, Legal); | |||
337 | for (auto CC : FPCCToExpand) | |||
338 | setCondCodeAction(CC, MVT::f32, Expand); | |||
339 | setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); | |||
340 | setOperationAction(ISD::SELECT, MVT::f32, Custom); | |||
341 | setOperationAction(ISD::BR_CC, MVT::f32, Expand); | |||
342 | for (auto Op : FPOpToExpand) | |||
343 | setOperationAction(Op, MVT::f32, Expand); | |||
344 | setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); | |||
345 | setTruncStoreAction(MVT::f32, MVT::f16, Expand); | |||
346 | } | |||
347 | ||||
348 | if (Subtarget.hasStdExtF() && Subtarget.is64Bit()) | |||
349 | setOperationAction(ISD::BITCAST, MVT::i32, Custom); | |||
350 | ||||
351 | if (Subtarget.hasStdExtD()) { | |||
352 | setOperationAction(ISD::FMINNUM, MVT::f64, Legal); | |||
353 | setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); | |||
354 | setOperationAction(ISD::LRINT, MVT::f64, Legal); | |||
355 | setOperationAction(ISD::LLRINT, MVT::f64, Legal); | |||
356 | setOperationAction(ISD::LROUND, MVT::f64, Legal); | |||
357 | setOperationAction(ISD::LLROUND, MVT::f64, Legal); | |||
358 | for (auto CC : FPCCToExpand) | |||
359 | setCondCodeAction(CC, MVT::f64, Expand); | |||
360 | setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); | |||
361 | setOperationAction(ISD::SELECT, MVT::f64, Custom); | |||
362 | setOperationAction(ISD::BR_CC, MVT::f64, Expand); | |||
363 | setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); | |||
364 | setTruncStoreAction(MVT::f64, MVT::f32, Expand); | |||
365 | for (auto Op : FPOpToExpand) | |||
366 | setOperationAction(Op, MVT::f64, Expand); | |||
367 | setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); | |||
368 | setTruncStoreAction(MVT::f64, MVT::f16, Expand); | |||
369 | } | |||
370 | ||||
371 | if (Subtarget.is64Bit()) { | |||
372 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); | |||
373 | setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); | |||
374 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); | |||
375 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); | |||
376 | } | |||
377 | ||||
378 | if (Subtarget.hasStdExtF()) { | |||
379 | setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom); | |||
380 | setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom); | |||
381 | ||||
382 | setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom); | |||
383 | setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom); | |||
384 | } | |||
385 | ||||
386 | setOperationAction(ISD::GlobalAddress, XLenVT, Custom); | |||
387 | setOperationAction(ISD::BlockAddress, XLenVT, Custom); | |||
388 | setOperationAction(ISD::ConstantPool, XLenVT, Custom); | |||
389 | setOperationAction(ISD::JumpTable, XLenVT, Custom); | |||
390 | ||||
391 | setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom); | |||
392 | ||||
393 | // TODO: On M-mode only targets, the cycle[h] CSR may not be present. | |||
394 | // Unfortunately this can't be determined just from the ISA naming string. | |||
395 | setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, | |||
396 | Subtarget.is64Bit() ? Legal : Custom); | |||
397 | ||||
398 | setOperationAction(ISD::TRAP, MVT::Other, Legal); | |||
399 | setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); | |||
400 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); | |||
401 | if (Subtarget.is64Bit()) | |||
402 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom); | |||
403 | ||||
404 | if (Subtarget.hasStdExtA()) { | |||
405 | setMaxAtomicSizeInBitsSupported(Subtarget.getXLen()); | |||
406 | setMinCmpXchgSizeInBits(32); | |||
407 | } else { | |||
408 | setMaxAtomicSizeInBitsSupported(0); | |||
409 | } | |||
410 | ||||
411 | setBooleanContents(ZeroOrOneBooleanContent); | |||
412 | ||||
413 | if (Subtarget.hasStdExtV()) { | |||
414 | setBooleanVectorContents(ZeroOrOneBooleanContent); | |||
415 | ||||
416 | setOperationAction(ISD::VSCALE, XLenVT, Custom); | |||
417 | ||||
418 | // RVV intrinsics may have illegal operands. | |||
419 | // We also need to custom legalize vmv.x.s. | |||
420 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom); | |||
421 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); | |||
422 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); | |||
423 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom); | |||
424 | if (Subtarget.is64Bit()) { | |||
425 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom); | |||
426 | } else { | |||
427 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); | |||
428 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); | |||
429 | } | |||
430 | ||||
431 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); | |||
432 | ||||
433 | static unsigned IntegerVPOps[] = { | |||
434 | ISD::VP_ADD, ISD::VP_SUB, ISD::VP_MUL, ISD::VP_SDIV, ISD::VP_UDIV, | |||
435 | ISD::VP_SREM, ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR, | |||
436 | ISD::VP_ASHR, ISD::VP_LSHR, ISD::VP_SHL}; | |||
437 | ||||
438 | static unsigned FloatingPointVPOps[] = {ISD::VP_FADD, ISD::VP_FSUB, | |||
439 | ISD::VP_FMUL, ISD::VP_FDIV}; | |||
440 | ||||
441 | if (!Subtarget.is64Bit()) { | |||
442 | // We must custom-lower certain vXi64 operations on RV32 due to the vector | |||
443 | // element type being illegal. | |||
444 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom); | |||
445 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom); | |||
446 | ||||
447 | setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom); | |||
448 | setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom); | |||
449 | setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom); | |||
450 | setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom); | |||
451 | setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom); | |||
452 | setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom); | |||
453 | setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom); | |||
454 | setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom); | |||
455 | } | |||
456 | ||||
457 | for (MVT VT : BoolVecVTs) { | |||
458 | setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); | |||
459 | ||||
460 | // Mask VTs are custom-expanded into a series of standard nodes | |||
461 | setOperationAction(ISD::TRUNCATE, VT, Custom); | |||
462 | setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); | |||
463 | setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); | |||
464 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); | |||
465 | ||||
466 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
467 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
468 | ||||
469 | setOperationAction(ISD::SELECT, VT, Custom); | |||
470 | setOperationAction(ISD::SELECT_CC, VT, Expand); | |||
471 | setOperationAction(ISD::VSELECT, VT, Expand); | |||
472 | ||||
473 | setOperationAction(ISD::VECREDUCE_AND, VT, Custom); | |||
474 | setOperationAction(ISD::VECREDUCE_OR, VT, Custom); | |||
475 | setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); | |||
476 | ||||
477 | // RVV has native int->float & float->int conversions where the | |||
478 | // element type sizes are within one power-of-two of each other. Any | |||
479 | // wider distances between type sizes have to be lowered as sequences | |||
480 | // which progressively narrow the gap in stages. | |||
481 | setOperationAction(ISD::SINT_TO_FP, VT, Custom); | |||
482 | setOperationAction(ISD::UINT_TO_FP, VT, Custom); | |||
483 | setOperationAction(ISD::FP_TO_SINT, VT, Custom); | |||
484 | setOperationAction(ISD::FP_TO_UINT, VT, Custom); | |||
485 | ||||
486 | // Expand all extending loads to types larger than this, and truncating | |||
487 | // stores from types larger than this. | |||
488 | for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) { | |||
489 | setTruncStoreAction(OtherVT, VT, Expand); | |||
490 | setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); | |||
491 | setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand); | |||
492 | setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand); | |||
493 | } | |||
494 | } | |||
495 | ||||
496 | for (MVT VT : IntVecVTs) { | |||
497 | setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); | |||
498 | setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom); | |||
499 | ||||
500 | setOperationAction(ISD::SMIN, VT, Legal); | |||
501 | setOperationAction(ISD::SMAX, VT, Legal); | |||
502 | setOperationAction(ISD::UMIN, VT, Legal); | |||
503 | setOperationAction(ISD::UMAX, VT, Legal); | |||
504 | ||||
505 | setOperationAction(ISD::ROTL, VT, Expand); | |||
506 | setOperationAction(ISD::ROTR, VT, Expand); | |||
507 | ||||
508 | // Custom-lower extensions and truncations from/to mask types. | |||
509 | setOperationAction(ISD::ANY_EXTEND, VT, Custom); | |||
510 | setOperationAction(ISD::SIGN_EXTEND, VT, Custom); | |||
511 | setOperationAction(ISD::ZERO_EXTEND, VT, Custom); | |||
512 | ||||
513 | // RVV has native int->float & float->int conversions where the | |||
514 | // element type sizes are within one power-of-two of each other. Any | |||
515 | // wider distances between type sizes have to be lowered as sequences | |||
516 | // which progressively narrow the gap in stages. | |||
517 | setOperationAction(ISD::SINT_TO_FP, VT, Custom); | |||
518 | setOperationAction(ISD::UINT_TO_FP, VT, Custom); | |||
519 | setOperationAction(ISD::FP_TO_SINT, VT, Custom); | |||
520 | setOperationAction(ISD::FP_TO_UINT, VT, Custom); | |||
521 | ||||
522 | setOperationAction(ISD::SADDSAT, VT, Legal); | |||
523 | setOperationAction(ISD::UADDSAT, VT, Legal); | |||
524 | setOperationAction(ISD::SSUBSAT, VT, Legal); | |||
525 | setOperationAction(ISD::USUBSAT, VT, Legal); | |||
526 | ||||
527 | // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL" | |||
528 | // nodes which truncate by one power of two at a time. | |||
529 | setOperationAction(ISD::TRUNCATE, VT, Custom); | |||
530 | ||||
531 | // Custom-lower insert/extract operations to simplify patterns. | |||
532 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
533 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
534 | ||||
535 | // Custom-lower reduction operations to set up the corresponding custom | |||
536 | // nodes' operands. | |||
537 | setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); | |||
538 | setOperationAction(ISD::VECREDUCE_AND, VT, Custom); | |||
539 | setOperationAction(ISD::VECREDUCE_OR, VT, Custom); | |||
540 | setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); | |||
541 | setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); | |||
542 | setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); | |||
543 | setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); | |||
544 | setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); | |||
545 | ||||
546 | for (unsigned VPOpc : IntegerVPOps) | |||
547 | setOperationAction(VPOpc, VT, Custom); | |||
548 | ||||
549 | setOperationAction(ISD::LOAD, VT, Custom); | |||
550 | setOperationAction(ISD::STORE, VT, Custom); | |||
551 | ||||
552 | setOperationAction(ISD::MLOAD, VT, Custom); | |||
553 | setOperationAction(ISD::MSTORE, VT, Custom); | |||
554 | setOperationAction(ISD::MGATHER, VT, Custom); | |||
555 | setOperationAction(ISD::MSCATTER, VT, Custom); | |||
556 | ||||
557 | setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); | |||
558 | setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); | |||
559 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); | |||
560 | ||||
561 | setOperationAction(ISD::SELECT, VT, Custom); | |||
562 | setOperationAction(ISD::SELECT_CC, VT, Expand); | |||
563 | ||||
564 | setOperationAction(ISD::STEP_VECTOR, VT, Custom); | |||
565 | setOperationAction(ISD::VECTOR_REVERSE, VT, Custom); | |||
566 | ||||
567 | for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) { | |||
568 | setTruncStoreAction(VT, OtherVT, Expand); | |||
569 | setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); | |||
570 | setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand); | |||
571 | setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand); | |||
572 | } | |||
573 | } | |||
574 | ||||
575 | // Expand various CCs to best match the RVV ISA, which natively supports UNE | |||
576 | // but no other unordered comparisons, and supports all ordered comparisons | |||
577 | // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization | |||
578 | // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE), | |||
579 | // and we pattern-match those back to the "original", swapping operands once | |||
580 | // more. This way we catch both operations and both "vf" and "fv" forms with | |||
581 | // fewer patterns. | |||
582 | ISD::CondCode VFPCCToExpand[] = { | |||
583 | ISD::SETO, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, | |||
584 | ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO, | |||
585 | ISD::SETGT, ISD::SETOGT, ISD::SETGE, ISD::SETOGE, | |||
586 | }; | |||
587 | ||||
588 | // Sets common operation actions on RVV floating-point vector types. | |||
589 | const auto SetCommonVFPActions = [&](MVT VT) { | |||
590 | setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); | |||
591 | // RVV has native FP_ROUND & FP_EXTEND conversions where the element type | |||
592 | // sizes are within one power-of-two of each other. Therefore conversions | |||
593 | // between vXf16 and vXf64 must be lowered as sequences which convert via | |||
594 | // vXf32. | |||
595 | setOperationAction(ISD::FP_ROUND, VT, Custom); | |||
596 | setOperationAction(ISD::FP_EXTEND, VT, Custom); | |||
597 | // Custom-lower insert/extract operations to simplify patterns. | |||
598 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
599 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
600 | // Expand various condition codes (explained above). | |||
601 | for (auto CC : VFPCCToExpand) | |||
602 | setCondCodeAction(CC, VT, Expand); | |||
603 | ||||
604 | setOperationAction(ISD::FMINNUM, VT, Legal); | |||
605 | setOperationAction(ISD::FMAXNUM, VT, Legal); | |||
606 | ||||
607 | setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); | |||
608 | setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); | |||
609 | setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); | |||
610 | setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); | |||
611 | setOperationAction(ISD::FCOPYSIGN, VT, Legal); | |||
612 | ||||
613 | setOperationAction(ISD::LOAD, VT, Custom); | |||
614 | setOperationAction(ISD::STORE, VT, Custom); | |||
615 | ||||
616 | setOperationAction(ISD::MLOAD, VT, Custom); | |||
617 | setOperationAction(ISD::MSTORE, VT, Custom); | |||
618 | setOperationAction(ISD::MGATHER, VT, Custom); | |||
619 | setOperationAction(ISD::MSCATTER, VT, Custom); | |||
620 | ||||
621 | setOperationAction(ISD::SELECT, VT, Custom); | |||
622 | setOperationAction(ISD::SELECT_CC, VT, Expand); | |||
623 | ||||
624 | setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); | |||
625 | setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); | |||
626 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); | |||
627 | ||||
628 | setOperationAction(ISD::VECTOR_REVERSE, VT, Custom); | |||
629 | ||||
630 | for (unsigned VPOpc : FloatingPointVPOps) | |||
631 | setOperationAction(VPOpc, VT, Custom); | |||
632 | }; | |||
633 | ||||
634 | // Sets common extload/truncstore actions on RVV floating-point vector | |||
635 | // types. | |||
636 | const auto SetCommonVFPExtLoadTruncStoreActions = | |||
637 | [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) { | |||
638 | for (auto SmallVT : SmallerVTs) { | |||
639 | setTruncStoreAction(VT, SmallVT, Expand); | |||
640 | setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand); | |||
641 | } | |||
642 | }; | |||
643 | ||||
644 | if (Subtarget.hasStdExtZfh()) | |||
645 | for (MVT VT : F16VecVTs) | |||
646 | SetCommonVFPActions(VT); | |||
647 | ||||
648 | for (MVT VT : F32VecVTs) { | |||
649 | if (Subtarget.hasStdExtF()) | |||
650 | SetCommonVFPActions(VT); | |||
651 | SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs); | |||
652 | } | |||
653 | ||||
654 | for (MVT VT : F64VecVTs) { | |||
655 | if (Subtarget.hasStdExtD()) | |||
656 | SetCommonVFPActions(VT); | |||
657 | SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs); | |||
658 | SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs); | |||
659 | } | |||
660 | ||||
661 | if (Subtarget.useRVVForFixedLengthVectors()) { | |||
662 | for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { | |||
663 | if (!useRVVForFixedLengthVectorVT(VT)) | |||
664 | continue; | |||
665 | ||||
666 | // By default everything must be expanded. | |||
667 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) | |||
668 | setOperationAction(Op, VT, Expand); | |||
669 | for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) { | |||
670 | setTruncStoreAction(VT, OtherVT, Expand); | |||
671 | setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); | |||
672 | setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand); | |||
673 | setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand); | |||
674 | } | |||
675 | ||||
676 | // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed. | |||
677 | setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); | |||
678 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); | |||
679 | ||||
680 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
681 | setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); | |||
682 | ||||
683 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
684 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
685 | ||||
686 | setOperationAction(ISD::LOAD, VT, Custom); | |||
687 | setOperationAction(ISD::STORE, VT, Custom); | |||
688 | ||||
689 | setOperationAction(ISD::SETCC, VT, Custom); | |||
690 | ||||
691 | setOperationAction(ISD::SELECT, VT, Custom); | |||
692 | ||||
693 | setOperationAction(ISD::TRUNCATE, VT, Custom); | |||
694 | ||||
695 | setOperationAction(ISD::BITCAST, VT, Custom); | |||
696 | ||||
697 | setOperationAction(ISD::VECREDUCE_AND, VT, Custom); | |||
698 | setOperationAction(ISD::VECREDUCE_OR, VT, Custom); | |||
699 | setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); | |||
700 | ||||
701 | setOperationAction(ISD::SINT_TO_FP, VT, Custom); | |||
702 | setOperationAction(ISD::UINT_TO_FP, VT, Custom); | |||
703 | setOperationAction(ISD::FP_TO_SINT, VT, Custom); | |||
704 | setOperationAction(ISD::FP_TO_UINT, VT, Custom); | |||
705 | ||||
706 | // Operations below are different for between masks and other vectors. | |||
707 | if (VT.getVectorElementType() == MVT::i1) { | |||
708 | setOperationAction(ISD::AND, VT, Custom); | |||
709 | setOperationAction(ISD::OR, VT, Custom); | |||
710 | setOperationAction(ISD::XOR, VT, Custom); | |||
711 | continue; | |||
712 | } | |||
713 | ||||
714 | // Use SPLAT_VECTOR to prevent type legalization from destroying the | |||
715 | // splats when type legalizing i64 scalar on RV32. | |||
716 | // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs | |||
717 | // improvements first. | |||
718 | if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) { | |||
719 | setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); | |||
720 | setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom); | |||
721 | } | |||
722 | ||||
723 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | |||
724 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
725 | ||||
726 | setOperationAction(ISD::MLOAD, VT, Custom); | |||
727 | setOperationAction(ISD::MSTORE, VT, Custom); | |||
728 | setOperationAction(ISD::MGATHER, VT, Custom); | |||
729 | setOperationAction(ISD::MSCATTER, VT, Custom); | |||
730 | setOperationAction(ISD::ADD, VT, Custom); | |||
731 | setOperationAction(ISD::MUL, VT, Custom); | |||
732 | setOperationAction(ISD::SUB, VT, Custom); | |||
733 | setOperationAction(ISD::AND, VT, Custom); | |||
734 | setOperationAction(ISD::OR, VT, Custom); | |||
735 | setOperationAction(ISD::XOR, VT, Custom); | |||
736 | setOperationAction(ISD::SDIV, VT, Custom); | |||
737 | setOperationAction(ISD::SREM, VT, Custom); | |||
738 | setOperationAction(ISD::UDIV, VT, Custom); | |||
739 | setOperationAction(ISD::UREM, VT, Custom); | |||
740 | setOperationAction(ISD::SHL, VT, Custom); | |||
741 | setOperationAction(ISD::SRA, VT, Custom); | |||
742 | setOperationAction(ISD::SRL, VT, Custom); | |||
743 | ||||
744 | setOperationAction(ISD::SMIN, VT, Custom); | |||
745 | setOperationAction(ISD::SMAX, VT, Custom); | |||
746 | setOperationAction(ISD::UMIN, VT, Custom); | |||
747 | setOperationAction(ISD::UMAX, VT, Custom); | |||
748 | setOperationAction(ISD::ABS, VT, Custom); | |||
749 | ||||
750 | setOperationAction(ISD::MULHS, VT, Custom); | |||
751 | setOperationAction(ISD::MULHU, VT, Custom); | |||
752 | ||||
753 | setOperationAction(ISD::SADDSAT, VT, Custom); | |||
754 | setOperationAction(ISD::UADDSAT, VT, Custom); | |||
755 | setOperationAction(ISD::SSUBSAT, VT, Custom); | |||
756 | setOperationAction(ISD::USUBSAT, VT, Custom); | |||
757 | ||||
758 | setOperationAction(ISD::VSELECT, VT, Custom); | |||
759 | setOperationAction(ISD::SELECT_CC, VT, Expand); | |||
760 | ||||
761 | setOperationAction(ISD::ANY_EXTEND, VT, Custom); | |||
762 | setOperationAction(ISD::SIGN_EXTEND, VT, Custom); | |||
763 | setOperationAction(ISD::ZERO_EXTEND, VT, Custom); | |||
764 | ||||
765 | // Custom-lower reduction operations to set up the corresponding custom | |||
766 | // nodes' operands. | |||
767 | setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); | |||
768 | setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); | |||
769 | setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); | |||
770 | setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); | |||
771 | setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); | |||
772 | ||||
773 | for (unsigned VPOpc : IntegerVPOps) | |||
774 | setOperationAction(VPOpc, VT, Custom); | |||
775 | } | |||
776 | ||||
777 | for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) { | |||
778 | if (!useRVVForFixedLengthVectorVT(VT)) | |||
779 | continue; | |||
780 | ||||
781 | // By default everything must be expanded. | |||
782 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) | |||
783 | setOperationAction(Op, VT, Expand); | |||
784 | for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) { | |||
785 | setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); | |||
786 | setTruncStoreAction(VT, OtherVT, Expand); | |||
787 | } | |||
788 | ||||
789 | // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed. | |||
790 | setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); | |||
791 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); | |||
792 | ||||
793 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
794 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | |||
795 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
796 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
797 | ||||
798 | setOperationAction(ISD::LOAD, VT, Custom); | |||
799 | setOperationAction(ISD::STORE, VT, Custom); | |||
800 | setOperationAction(ISD::MLOAD, VT, Custom); | |||
801 | setOperationAction(ISD::MSTORE, VT, Custom); | |||
802 | setOperationAction(ISD::MGATHER, VT, Custom); | |||
803 | setOperationAction(ISD::MSCATTER, VT, Custom); | |||
804 | setOperationAction(ISD::FADD, VT, Custom); | |||
805 | setOperationAction(ISD::FSUB, VT, Custom); | |||
806 | setOperationAction(ISD::FMUL, VT, Custom); | |||
807 | setOperationAction(ISD::FDIV, VT, Custom); | |||
808 | setOperationAction(ISD::FNEG, VT, Custom); | |||
809 | setOperationAction(ISD::FABS, VT, Custom); | |||
810 | setOperationAction(ISD::FCOPYSIGN, VT, Custom); | |||
811 | setOperationAction(ISD::FSQRT, VT, Custom); | |||
812 | setOperationAction(ISD::FMA, VT, Custom); | |||
813 | setOperationAction(ISD::FMINNUM, VT, Custom); | |||
814 | setOperationAction(ISD::FMAXNUM, VT, Custom); | |||
815 | ||||
816 | setOperationAction(ISD::FP_ROUND, VT, Custom); | |||
817 | setOperationAction(ISD::FP_EXTEND, VT, Custom); | |||
818 | ||||
819 | for (auto CC : VFPCCToExpand) | |||
820 | setCondCodeAction(CC, VT, Expand); | |||
821 | ||||
822 | setOperationAction(ISD::VSELECT, VT, Custom); | |||
823 | setOperationAction(ISD::SELECT, VT, Custom); | |||
824 | setOperationAction(ISD::SELECT_CC, VT, Expand); | |||
825 | ||||
826 | setOperationAction(ISD::BITCAST, VT, Custom); | |||
827 | ||||
828 | setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); | |||
829 | setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); | |||
830 | setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); | |||
831 | setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); | |||
832 | ||||
833 | for (unsigned VPOpc : FloatingPointVPOps) | |||
834 | setOperationAction(VPOpc, VT, Custom); | |||
835 | } | |||
836 | ||||
837 | // Custom-legalize bitcasts from fixed-length vectors to scalar types. | |||
838 | setOperationAction(ISD::BITCAST, MVT::i8, Custom); | |||
839 | setOperationAction(ISD::BITCAST, MVT::i16, Custom); | |||
840 | setOperationAction(ISD::BITCAST, MVT::i32, Custom); | |||
841 | setOperationAction(ISD::BITCAST, MVT::i64, Custom); | |||
842 | setOperationAction(ISD::BITCAST, MVT::f16, Custom); | |||
843 | setOperationAction(ISD::BITCAST, MVT::f32, Custom); | |||
844 | setOperationAction(ISD::BITCAST, MVT::f64, Custom); | |||
845 | } | |||
846 | } | |||
847 | ||||
848 | // Function alignments. | |||
849 | const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); | |||
850 | setMinFunctionAlignment(FunctionAlignment); | |||
851 | setPrefFunctionAlignment(FunctionAlignment); | |||
852 | ||||
853 | setMinimumJumpTableEntries(5); | |||
854 | ||||
855 | // Jumps are expensive, compared to logic | |||
856 | setJumpIsExpensive(); | |||
857 | ||||
858 | // We can use any register for comparisons | |||
859 | setHasMultipleConditionRegisters(); | |||
860 | ||||
861 | setTargetDAGCombine(ISD::ADD); | |||
862 | setTargetDAGCombine(ISD::SUB); | |||
863 | setTargetDAGCombine(ISD::AND); | |||
864 | setTargetDAGCombine(ISD::OR); | |||
865 | setTargetDAGCombine(ISD::XOR); | |||
866 | setTargetDAGCombine(ISD::ANY_EXTEND); | |||
867 | setTargetDAGCombine(ISD::ZERO_EXTEND); | |||
868 | if (Subtarget.hasStdExtV()) { | |||
869 | setTargetDAGCombine(ISD::FCOPYSIGN); | |||
870 | setTargetDAGCombine(ISD::MGATHER); | |||
871 | setTargetDAGCombine(ISD::MSCATTER); | |||
872 | setTargetDAGCombine(ISD::SRA); | |||
873 | setTargetDAGCombine(ISD::SRL); | |||
874 | setTargetDAGCombine(ISD::SHL); | |||
875 | } | |||
876 | } | |||
877 | ||||
878 | EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, | |||
879 | LLVMContext &Context, | |||
880 | EVT VT) const { | |||
881 | if (!VT.isVector()) | |||
882 | return getPointerTy(DL); | |||
883 | if (Subtarget.hasStdExtV() && | |||
884 | (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors())) | |||
885 | return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount()); | |||
886 | return VT.changeVectorElementTypeToInteger(); | |||
887 | } | |||
888 | ||||
889 | MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const { | |||
890 | return Subtarget.getXLenVT(); | |||
891 | } | |||
892 | ||||
893 | bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, | |||
894 | const CallInst &I, | |||
895 | MachineFunction &MF, | |||
896 | unsigned Intrinsic) const { | |||
897 | switch (Intrinsic) { | |||
898 | default: | |||
899 | return false; | |||
900 | case Intrinsic::riscv_masked_atomicrmw_xchg_i32: | |||
901 | case Intrinsic::riscv_masked_atomicrmw_add_i32: | |||
902 | case Intrinsic::riscv_masked_atomicrmw_sub_i32: | |||
903 | case Intrinsic::riscv_masked_atomicrmw_nand_i32: | |||
904 | case Intrinsic::riscv_masked_atomicrmw_max_i32: | |||
905 | case Intrinsic::riscv_masked_atomicrmw_min_i32: | |||
906 | case Intrinsic::riscv_masked_atomicrmw_umax_i32: | |||
907 | case Intrinsic::riscv_masked_atomicrmw_umin_i32: | |||
908 | case Intrinsic::riscv_masked_cmpxchg_i32: { | |||
909 | PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); | |||
910 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
911 | Info.memVT = MVT::getVT(PtrTy->getElementType()); | |||
912 | Info.ptrVal = I.getArgOperand(0); | |||
913 | Info.offset = 0; | |||
914 | Info.align = Align(4); | |||
915 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore | | |||
916 | MachineMemOperand::MOVolatile; | |||
917 | return true; | |||
918 | } | |||
919 | } | |||
920 | } | |||
921 | ||||
922 | bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL, | |||
923 | const AddrMode &AM, Type *Ty, | |||
924 | unsigned AS, | |||
925 | Instruction *I) const { | |||
926 | // No global is ever allowed as a base. | |||
927 | if (AM.BaseGV) | |||
928 | return false; | |||
929 | ||||
930 | // Require a 12-bit signed offset. | |||
931 | if (!isInt<12>(AM.BaseOffs)) | |||
932 | return false; | |||
933 | ||||
934 | switch (AM.Scale) { | |||
935 | case 0: // "r+i" or just "i", depending on HasBaseReg. | |||
936 | break; | |||
937 | case 1: | |||
938 | if (!AM.HasBaseReg) // allow "r+i". | |||
939 | break; | |||
940 | return false; // disallow "r+r" or "r+r+i". | |||
941 | default: | |||
942 | return false; | |||
943 | } | |||
944 | ||||
945 | return true; | |||
946 | } | |||
947 | ||||
948 | bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const { | |||
949 | return isInt<12>(Imm); | |||
950 | } | |||
951 | ||||
952 | bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const { | |||
953 | return isInt<12>(Imm); | |||
954 | } | |||
955 | ||||
956 | // On RV32, 64-bit integers are split into their high and low parts and held | |||
957 | // in two different registers, so the trunc is free since the low register can | |||
958 | // just be used. | |||
959 | bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { | |||
960 | if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) | |||
961 | return false; | |||
962 | unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); | |||
963 | unsigned DestBits = DstTy->getPrimitiveSizeInBits(); | |||
964 | return (SrcBits == 64 && DestBits == 32); | |||
965 | } | |||
966 | ||||
967 | bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { | |||
968 | if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() || | |||
969 | !SrcVT.isInteger() || !DstVT.isInteger()) | |||
970 | return false; | |||
971 | unsigned SrcBits = SrcVT.getSizeInBits(); | |||
972 | unsigned DestBits = DstVT.getSizeInBits(); | |||
973 | return (SrcBits == 64 && DestBits == 32); | |||
974 | } | |||
975 | ||||
976 | bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { | |||
977 | // Zexts are free if they can be combined with a load. | |||
978 | if (auto *LD = dyn_cast<LoadSDNode>(Val)) { | |||
| ||||
979 | EVT MemVT = LD->getMemoryVT(); | |||
980 | if ((MemVT == MVT::i8 || MemVT == MVT::i16 || | |||
981 | (Subtarget.is64Bit() && MemVT == MVT::i32)) && | |||
982 | (LD->getExtensionType() == ISD::NON_EXTLOAD || | |||
983 | LD->getExtensionType() == ISD::ZEXTLOAD)) | |||
984 | return true; | |||
985 | } | |||
986 | ||||
987 | return TargetLowering::isZExtFree(Val, VT2); | |||
988 | } | |||
989 | ||||
990 | bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const { | |||
991 | return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64; | |||
992 | } | |||
993 | ||||
994 | bool RISCVTargetLowering::isCheapToSpeculateCttz() const { | |||
995 | return Subtarget.hasStdExtZbb(); | |||
996 | } | |||
997 | ||||
998 | bool RISCVTargetLowering::isCheapToSpeculateCtlz() const { | |||
999 | return Subtarget.hasStdExtZbb(); | |||
1000 | } | |||
1001 | ||||
1002 | bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, | |||
1003 | bool ForCodeSize) const { | |||
1004 | if (VT == MVT::f16 && !Subtarget.hasStdExtZfh()) | |||
1005 | return false; | |||
1006 | if (VT == MVT::f32 && !Subtarget.hasStdExtF()) | |||
1007 | return false; | |||
1008 | if (VT == MVT::f64 && !Subtarget.hasStdExtD()) | |||
1009 | return false; | |||
1010 | if (Imm.isNegZero()) | |||
1011 | return false; | |||
1012 | return Imm.isZero(); | |||
1013 | } | |||
1014 | ||||
1015 | bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const { | |||
1016 | return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) || | |||
1017 | (VT == MVT::f32 && Subtarget.hasStdExtF()) || | |||
1018 | (VT == MVT::f64 && Subtarget.hasStdExtD()); | |||
1019 | } | |||
1020 | ||||
1021 | MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, | |||
1022 | CallingConv::ID CC, | |||
1023 | EVT VT) const { | |||
1024 | // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still | |||
1025 | // end up using a GPR but that will be decided based on ABI. | |||
1026 | if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh()) | |||
1027 | return MVT::f32; | |||
1028 | ||||
1029 | return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); | |||
1030 | } | |||
1031 | ||||
1032 | unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, | |||
1033 | CallingConv::ID CC, | |||
1034 | EVT VT) const { | |||
1035 | // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still | |||
1036 | // end up using a GPR but that will be decided based on ABI. | |||
1037 | if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh()) | |||
1038 | return 1; | |||
1039 | ||||
1040 | return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); | |||
1041 | } | |||
1042 | ||||
1043 | // Changes the condition code and swaps operands if necessary, so the SetCC | |||
1044 | // operation matches one of the comparisons supported directly by branches | |||
1045 | // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare | |||
1046 | // with 1/-1. | |||
1047 | static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS, | |||
1048 | ISD::CondCode &CC, SelectionDAG &DAG) { | |||
1049 | // Convert X > -1 to X >= 0. | |||
1050 | if (CC == ISD::SETGT && isAllOnesConstant(RHS)) { | |||
1051 | RHS = DAG.getConstant(0, DL, RHS.getValueType()); | |||
1052 | CC = ISD::SETGE; | |||
1053 | return; | |||
1054 | } | |||
1055 | // Convert X < 1 to 0 >= X. | |||
1056 | if (CC == ISD::SETLT && isOneConstant(RHS)) { | |||
1057 | RHS = LHS; | |||
1058 | LHS = DAG.getConstant(0, DL, RHS.getValueType()); | |||
1059 | CC = ISD::SETGE; | |||
1060 | return; | |||
1061 | } | |||
1062 | ||||
1063 | switch (CC) { | |||
1064 | default: | |||
1065 | break; | |||
1066 | case ISD::SETGT: | |||
1067 | case ISD::SETLE: | |||
1068 | case ISD::SETUGT: | |||
1069 | case ISD::SETULE: | |||
1070 | CC = ISD::getSetCCSwappedOperands(CC); | |||
1071 | std::swap(LHS, RHS); | |||
1072 | break; | |||
1073 | } | |||
1074 | } | |||
1075 | ||||
1076 | RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) { | |||
1077 | assert(VT.isScalableVector() && "Expecting a scalable vector type")(static_cast <bool> (VT.isScalableVector() && "Expecting a scalable vector type" ) ? void (0) : __assert_fail ("VT.isScalableVector() && \"Expecting a scalable vector type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1077, __extension__ __PRETTY_FUNCTION__)); | |||
1078 | unsigned KnownSize = VT.getSizeInBits().getKnownMinValue(); | |||
1079 | if (VT.getVectorElementType() == MVT::i1) | |||
1080 | KnownSize *= 8; | |||
1081 | ||||
1082 | switch (KnownSize) { | |||
1083 | default: | |||
1084 | llvm_unreachable("Invalid LMUL.")::llvm::llvm_unreachable_internal("Invalid LMUL.", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1084); | |||
1085 | case 8: | |||
1086 | return RISCVII::VLMUL::LMUL_F8; | |||
1087 | case 16: | |||
1088 | return RISCVII::VLMUL::LMUL_F4; | |||
1089 | case 32: | |||
1090 | return RISCVII::VLMUL::LMUL_F2; | |||
1091 | case 64: | |||
1092 | return RISCVII::VLMUL::LMUL_1; | |||
1093 | case 128: | |||
1094 | return RISCVII::VLMUL::LMUL_2; | |||
1095 | case 256: | |||
1096 | return RISCVII::VLMUL::LMUL_4; | |||
1097 | case 512: | |||
1098 | return RISCVII::VLMUL::LMUL_8; | |||
1099 | } | |||
1100 | } | |||
1101 | ||||
1102 | unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) { | |||
1103 | switch (LMul) { | |||
1104 | default: | |||
1105 | llvm_unreachable("Invalid LMUL.")::llvm::llvm_unreachable_internal("Invalid LMUL.", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1105); | |||
1106 | case RISCVII::VLMUL::LMUL_F8: | |||
1107 | case RISCVII::VLMUL::LMUL_F4: | |||
1108 | case RISCVII::VLMUL::LMUL_F2: | |||
1109 | case RISCVII::VLMUL::LMUL_1: | |||
1110 | return RISCV::VRRegClassID; | |||
1111 | case RISCVII::VLMUL::LMUL_2: | |||
1112 | return RISCV::VRM2RegClassID; | |||
1113 | case RISCVII::VLMUL::LMUL_4: | |||
1114 | return RISCV::VRM4RegClassID; | |||
1115 | case RISCVII::VLMUL::LMUL_8: | |||
1116 | return RISCV::VRM8RegClassID; | |||
1117 | } | |||
1118 | } | |||
1119 | ||||
1120 | unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) { | |||
1121 | RISCVII::VLMUL LMUL = getLMUL(VT); | |||
1122 | if (LMUL == RISCVII::VLMUL::LMUL_F8 || | |||
1123 | LMUL == RISCVII::VLMUL::LMUL_F4 || | |||
1124 | LMUL == RISCVII::VLMUL::LMUL_F2 || | |||
1125 | LMUL == RISCVII::VLMUL::LMUL_1) { | |||
1126 | static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, | |||
1127 | "Unexpected subreg numbering"); | |||
1128 | return RISCV::sub_vrm1_0 + Index; | |||
1129 | } | |||
1130 | if (LMUL == RISCVII::VLMUL::LMUL_2) { | |||
1131 | static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, | |||
1132 | "Unexpected subreg numbering"); | |||
1133 | return RISCV::sub_vrm2_0 + Index; | |||
1134 | } | |||
1135 | if (LMUL == RISCVII::VLMUL::LMUL_4) { | |||
1136 | static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, | |||
1137 | "Unexpected subreg numbering"); | |||
1138 | return RISCV::sub_vrm4_0 + Index; | |||
1139 | } | |||
1140 | llvm_unreachable("Invalid vector type.")::llvm::llvm_unreachable_internal("Invalid vector type.", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1140); | |||
1141 | } | |||
1142 | ||||
1143 | unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) { | |||
1144 | if (VT.getVectorElementType() == MVT::i1) | |||
1145 | return RISCV::VRRegClassID; | |||
1146 | return getRegClassIDForLMUL(getLMUL(VT)); | |||
1147 | } | |||
1148 | ||||
1149 | // Attempt to decompose a subvector insert/extract between VecVT and | |||
1150 | // SubVecVT via subregister indices. Returns the subregister index that | |||
1151 | // can perform the subvector insert/extract with the given element index, as | |||
1152 | // well as the index corresponding to any leftover subvectors that must be | |||
1153 | // further inserted/extracted within the register class for SubVecVT. | |||
1154 | std::pair<unsigned, unsigned> | |||
1155 | RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( | |||
1156 | MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, | |||
1157 | const RISCVRegisterInfo *TRI) { | |||
1158 | static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID && | |||
1159 | RISCV::VRM4RegClassID > RISCV::VRM2RegClassID && | |||
1160 | RISCV::VRM2RegClassID > RISCV::VRRegClassID), | |||
1161 | "Register classes not ordered"); | |||
1162 | unsigned VecRegClassID = getRegClassIDForVecVT(VecVT); | |||
1163 | unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT); | |||
1164 | // Try to compose a subregister index that takes us from the incoming | |||
1165 | // LMUL>1 register class down to the outgoing one. At each step we half | |||
1166 | // the LMUL: | |||
1167 | // nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0 | |||
1168 | // Note that this is not guaranteed to find a subregister index, such as | |||
1169 | // when we are extracting from one VR type to another. | |||
1170 | unsigned SubRegIdx = RISCV::NoSubRegister; | |||
1171 | for (const unsigned RCID : | |||
1172 | {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID}) | |||
1173 | if (VecRegClassID > RCID && SubRegClassID <= RCID) { | |||
1174 | VecVT = VecVT.getHalfNumVectorElementsVT(); | |||
1175 | bool IsHi = | |||
1176 | InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue(); | |||
1177 | SubRegIdx = TRI->composeSubRegIndices(SubRegIdx, | |||
1178 | getSubregIndexByMVT(VecVT, IsHi)); | |||
1179 | if (IsHi) | |||
1180 | InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue(); | |||
1181 | } | |||
1182 | return {SubRegIdx, InsertExtractIdx}; | |||
1183 | } | |||
1184 | ||||
1185 | // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar | |||
1186 | // stores for those types. | |||
1187 | bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const { | |||
1188 | return !Subtarget.useRVVForFixedLengthVectors() || | |||
1189 | (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1); | |||
1190 | } | |||
1191 | ||||
1192 | static bool useRVVForFixedLengthVectorVT(MVT VT, | |||
1193 | const RISCVSubtarget &Subtarget) { | |||
1194 | assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!")(static_cast <bool> (VT.isFixedLengthVector() && "Expected a fixed length vector type!") ? void (0) : __assert_fail ("VT.isFixedLengthVector() && \"Expected a fixed length vector type!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1194, __extension__ __PRETTY_FUNCTION__)); | |||
1195 | if (!Subtarget.useRVVForFixedLengthVectors()) | |||
1196 | return false; | |||
1197 | ||||
1198 | // We only support a set of vector types with a consistent maximum fixed size | |||
1199 | // across all supported vector element types to avoid legalization issues. | |||
1200 | // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest | |||
1201 | // fixed-length vector type we support is 1024 bytes. | |||
1202 | if (VT.getFixedSizeInBits() > 1024 * 8) | |||
1203 | return false; | |||
1204 | ||||
1205 | unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits(); | |||
1206 | ||||
1207 | MVT EltVT = VT.getVectorElementType(); | |||
1208 | ||||
1209 | // Don't use RVV for vectors we cannot scalarize if required. | |||
1210 | switch (EltVT.SimpleTy) { | |||
1211 | // i1 is supported but has different rules. | |||
1212 | default: | |||
1213 | return false; | |||
1214 | case MVT::i1: | |||
1215 | // Masks can only use a single register. | |||
1216 | if (VT.getVectorNumElements() > MinVLen) | |||
1217 | return false; | |||
1218 | MinVLen /= 8; | |||
1219 | break; | |||
1220 | case MVT::i8: | |||
1221 | case MVT::i16: | |||
1222 | case MVT::i32: | |||
1223 | case MVT::i64: | |||
1224 | break; | |||
1225 | case MVT::f16: | |||
1226 | if (!Subtarget.hasStdExtZfh()) | |||
1227 | return false; | |||
1228 | break; | |||
1229 | case MVT::f32: | |||
1230 | if (!Subtarget.hasStdExtF()) | |||
1231 | return false; | |||
1232 | break; | |||
1233 | case MVT::f64: | |||
1234 | if (!Subtarget.hasStdExtD()) | |||
1235 | return false; | |||
1236 | break; | |||
1237 | } | |||
1238 | ||||
1239 | // Reject elements larger than ELEN. | |||
1240 | if (EltVT.getSizeInBits() > Subtarget.getMaxELENForFixedLengthVectors()) | |||
1241 | return false; | |||
1242 | ||||
1243 | unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen); | |||
1244 | // Don't use RVV for types that don't fit. | |||
1245 | if (LMul > Subtarget.getMaxLMULForFixedLengthVectors()) | |||
1246 | return false; | |||
1247 | ||||
1248 | // TODO: Perhaps an artificial restriction, but worth having whilst getting | |||
1249 | // the base fixed length RVV support in place. | |||
1250 | if (!VT.isPow2VectorType()) | |||
1251 | return false; | |||
1252 | ||||
1253 | return true; | |||
1254 | } | |||
1255 | ||||
1256 | bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const { | |||
1257 | return ::useRVVForFixedLengthVectorVT(VT, Subtarget); | |||
1258 | } | |||
1259 | ||||
1260 | // Return the largest legal scalable vector type that matches VT's element type. | |||
1261 | static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT, | |||
1262 | const RISCVSubtarget &Subtarget) { | |||
1263 | // This may be called before legal types are setup. | |||
1264 | assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||(static_cast <bool> (((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) || useRVVForFixedLengthVectorVT(VT, Subtarget )) && "Expected legal fixed length vector!") ? void ( 0) : __assert_fail ("((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) || useRVVForFixedLengthVectorVT(VT, Subtarget)) && \"Expected legal fixed length vector!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1266, __extension__ __PRETTY_FUNCTION__)) | |||
1265 | useRVVForFixedLengthVectorVT(VT, Subtarget)) &&(static_cast <bool> (((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) || useRVVForFixedLengthVectorVT(VT, Subtarget )) && "Expected legal fixed length vector!") ? void ( 0) : __assert_fail ("((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) || useRVVForFixedLengthVectorVT(VT, Subtarget)) && \"Expected legal fixed length vector!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1266, __extension__ __PRETTY_FUNCTION__)) | |||
1266 | "Expected legal fixed length vector!")(static_cast <bool> (((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) || useRVVForFixedLengthVectorVT(VT, Subtarget )) && "Expected legal fixed length vector!") ? void ( 0) : __assert_fail ("((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) || useRVVForFixedLengthVectorVT(VT, Subtarget)) && \"Expected legal fixed length vector!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1266, __extension__ __PRETTY_FUNCTION__)); | |||
1267 | ||||
1268 | unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits(); | |||
1269 | unsigned MaxELen = Subtarget.getMaxELENForFixedLengthVectors(); | |||
1270 | ||||
1271 | MVT EltVT = VT.getVectorElementType(); | |||
1272 | switch (EltVT.SimpleTy) { | |||
1273 | default: | |||
1274 | llvm_unreachable("unexpected element type for RVV container")::llvm::llvm_unreachable_internal("unexpected element type for RVV container" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1274); | |||
1275 | case MVT::i1: | |||
1276 | case MVT::i8: | |||
1277 | case MVT::i16: | |||
1278 | case MVT::i32: | |||
1279 | case MVT::i64: | |||
1280 | case MVT::f16: | |||
1281 | case MVT::f32: | |||
1282 | case MVT::f64: { | |||
1283 | // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for | |||
1284 | // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within | |||
1285 | // each fractional LMUL we support SEW between 8 and LMUL*ELEN. | |||
1286 | unsigned NumElts = | |||
1287 | (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen; | |||
1288 | NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen); | |||
1289 | assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts")(static_cast <bool> (isPowerOf2_32(NumElts) && "Expected power of 2 NumElts" ) ? void (0) : __assert_fail ("isPowerOf2_32(NumElts) && \"Expected power of 2 NumElts\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1289, __extension__ __PRETTY_FUNCTION__)); | |||
1290 | return MVT::getScalableVectorVT(EltVT, NumElts); | |||
1291 | } | |||
1292 | } | |||
1293 | } | |||
1294 | ||||
1295 | static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT, | |||
1296 | const RISCVSubtarget &Subtarget) { | |||
1297 | return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT, | |||
1298 | Subtarget); | |||
1299 | } | |||
1300 | ||||
1301 | MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const { | |||
1302 | return ::getContainerForFixedLengthVector(*this, VT, getSubtarget()); | |||
1303 | } | |||
1304 | ||||
1305 | // Grow V to consume an entire RVV register. | |||
1306 | static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG, | |||
1307 | const RISCVSubtarget &Subtarget) { | |||
1308 | assert(VT.isScalableVector() &&(static_cast <bool> (VT.isScalableVector() && "Expected to convert into a scalable vector!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && \"Expected to convert into a scalable vector!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1309, __extension__ __PRETTY_FUNCTION__)) | |||
1309 | "Expected to convert into a scalable vector!")(static_cast <bool> (VT.isScalableVector() && "Expected to convert into a scalable vector!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && \"Expected to convert into a scalable vector!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1309, __extension__ __PRETTY_FUNCTION__)); | |||
1310 | assert(V.getValueType().isFixedLengthVector() &&(static_cast <bool> (V.getValueType().isFixedLengthVector () && "Expected a fixed length vector operand!") ? void (0) : __assert_fail ("V.getValueType().isFixedLengthVector() && \"Expected a fixed length vector operand!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1311, __extension__ __PRETTY_FUNCTION__)) | |||
1311 | "Expected a fixed length vector operand!")(static_cast <bool> (V.getValueType().isFixedLengthVector () && "Expected a fixed length vector operand!") ? void (0) : __assert_fail ("V.getValueType().isFixedLengthVector() && \"Expected a fixed length vector operand!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1311, __extension__ __PRETTY_FUNCTION__)); | |||
1312 | SDLoc DL(V); | |||
1313 | SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); | |||
1314 | return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero); | |||
1315 | } | |||
1316 | ||||
1317 | // Shrink V so it's just big enough to maintain a VT's worth of data. | |||
1318 | static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG, | |||
1319 | const RISCVSubtarget &Subtarget) { | |||
1320 | assert(VT.isFixedLengthVector() &&(static_cast <bool> (VT.isFixedLengthVector() && "Expected to convert into a fixed length vector!") ? void (0 ) : __assert_fail ("VT.isFixedLengthVector() && \"Expected to convert into a fixed length vector!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1321, __extension__ __PRETTY_FUNCTION__)) | |||
1321 | "Expected to convert into a fixed length vector!")(static_cast <bool> (VT.isFixedLengthVector() && "Expected to convert into a fixed length vector!") ? void (0 ) : __assert_fail ("VT.isFixedLengthVector() && \"Expected to convert into a fixed length vector!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1321, __extension__ __PRETTY_FUNCTION__)); | |||
1322 | assert(V.getValueType().isScalableVector() &&(static_cast <bool> (V.getValueType().isScalableVector( ) && "Expected a scalable vector operand!") ? void (0 ) : __assert_fail ("V.getValueType().isScalableVector() && \"Expected a scalable vector operand!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1323, __extension__ __PRETTY_FUNCTION__)) | |||
1323 | "Expected a scalable vector operand!")(static_cast <bool> (V.getValueType().isScalableVector( ) && "Expected a scalable vector operand!") ? void (0 ) : __assert_fail ("V.getValueType().isScalableVector() && \"Expected a scalable vector operand!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1323, __extension__ __PRETTY_FUNCTION__)); | |||
1324 | SDLoc DL(V); | |||
1325 | SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); | |||
1326 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero); | |||
1327 | } | |||
1328 | ||||
1329 | // Gets the two common "VL" operands: an all-ones mask and the vector length. | |||
1330 | // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is | |||
1331 | // the vector type that it is contained in. | |||
1332 | static std::pair<SDValue, SDValue> | |||
1333 | getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG, | |||
1334 | const RISCVSubtarget &Subtarget) { | |||
1335 | assert(ContainerVT.isScalableVector() && "Expecting scalable container type")(static_cast <bool> (ContainerVT.isScalableVector() && "Expecting scalable container type") ? void (0) : __assert_fail ("ContainerVT.isScalableVector() && \"Expecting scalable container type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1335, __extension__ __PRETTY_FUNCTION__)); | |||
1336 | MVT XLenVT = Subtarget.getXLenVT(); | |||
1337 | SDValue VL = VecVT.isFixedLengthVector() | |||
1338 | ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT) | |||
1339 | : DAG.getRegister(RISCV::X0, XLenVT); | |||
1340 | MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); | |||
1341 | SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); | |||
1342 | return {Mask, VL}; | |||
1343 | } | |||
1344 | ||||
1345 | // As above but assuming the given type is a scalable vector type. | |||
1346 | static std::pair<SDValue, SDValue> | |||
1347 | getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG, | |||
1348 | const RISCVSubtarget &Subtarget) { | |||
1349 | assert(VecVT.isScalableVector() && "Expecting a scalable vector")(static_cast <bool> (VecVT.isScalableVector() && "Expecting a scalable vector") ? void (0) : __assert_fail ("VecVT.isScalableVector() && \"Expecting a scalable vector\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1349, __extension__ __PRETTY_FUNCTION__)); | |||
1350 | return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget); | |||
1351 | } | |||
1352 | ||||
1353 | // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few | |||
1354 | // of either is (currently) supported. This can get us into an infinite loop | |||
1355 | // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR | |||
1356 | // as a ..., etc. | |||
1357 | // Until either (or both) of these can reliably lower any node, reporting that | |||
1358 | // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks | |||
1359 | // the infinite loop. Note that this lowers BUILD_VECTOR through the stack, | |||
1360 | // which is not desirable. | |||
1361 | bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles( | |||
1362 | EVT VT, unsigned DefinedValues) const { | |||
1363 | return false; | |||
1364 | } | |||
1365 | ||||
1366 | bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { | |||
1367 | // Only splats are currently supported. | |||
1368 | if (ShuffleVectorSDNode::isSplatMask(M.data(), VT)) | |||
1369 | return true; | |||
1370 | ||||
1371 | return false; | |||
1372 | } | |||
1373 | ||||
1374 | static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) { | |||
1375 | // RISCV FP-to-int conversions saturate to the destination register size, but | |||
1376 | // don't produce 0 for nan. We can use a conversion instruction and fix the | |||
1377 | // nan case with a compare and a select. | |||
1378 | SDValue Src = Op.getOperand(0); | |||
1379 | ||||
1380 | EVT DstVT = Op.getValueType(); | |||
1381 | EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); | |||
1382 | ||||
1383 | bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT; | |||
1384 | unsigned Opc; | |||
1385 | if (SatVT == DstVT) | |||
1386 | Opc = IsSigned ? RISCVISD::FCVT_X_RTZ : RISCVISD::FCVT_XU_RTZ; | |||
1387 | else if (DstVT == MVT::i64 && SatVT == MVT::i32) | |||
1388 | Opc = IsSigned ? RISCVISD::FCVT_W_RTZ_RV64 : RISCVISD::FCVT_WU_RTZ_RV64; | |||
1389 | else | |||
1390 | return SDValue(); | |||
1391 | // FIXME: Support other SatVTs by clamping before or after the conversion. | |||
1392 | ||||
1393 | SDLoc DL(Op); | |||
1394 | SDValue FpToInt = DAG.getNode(Opc, DL, DstVT, Src); | |||
1395 | ||||
1396 | SDValue ZeroInt = DAG.getConstant(0, DL, DstVT); | |||
1397 | return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO); | |||
1398 | } | |||
1399 | ||||
1400 | static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG, | |||
1401 | const RISCVSubtarget &Subtarget) { | |||
1402 | MVT VT = Op.getSimpleValueType(); | |||
1403 | assert(VT.isFixedLengthVector() && "Unexpected vector!")(static_cast <bool> (VT.isFixedLengthVector() && "Unexpected vector!") ? void (0) : __assert_fail ("VT.isFixedLengthVector() && \"Unexpected vector!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1403, __extension__ __PRETTY_FUNCTION__)); | |||
1404 | ||||
1405 | MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget); | |||
1406 | ||||
1407 | SDLoc DL(Op); | |||
1408 | SDValue Mask, VL; | |||
1409 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
1410 | ||||
1411 | unsigned Opc = | |||
1412 | VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL; | |||
1413 | SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL); | |||
1414 | return convertFromScalableVector(VT, Splat, DAG, Subtarget); | |||
1415 | } | |||
1416 | ||||
1417 | struct VIDSequence { | |||
1418 | int64_t StepNumerator; | |||
1419 | unsigned StepDenominator; | |||
1420 | int64_t Addend; | |||
1421 | }; | |||
1422 | ||||
1423 | // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S] | |||
1424 | // to the (non-zero) step S and start value X. This can be then lowered as the | |||
1425 | // RVV sequence (VID * S) + X, for example. | |||
1426 | // The step S is represented as an integer numerator divided by a positive | |||
1427 | // denominator. Note that the implementation currently only identifies | |||
1428 | // sequences in which either the numerator is +/- 1 or the denominator is 1. It | |||
1429 | // cannot detect 2/3, for example. | |||
1430 | // Note that this method will also match potentially unappealing index | |||
1431 | // sequences, like <i32 0, i32 50939494>, however it is left to the caller to | |||
1432 | // determine whether this is worth generating code for. | |||
1433 | static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) { | |||
1434 | unsigned NumElts = Op.getNumOperands(); | |||
1435 | assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR")(static_cast <bool> (Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR") ? void (0) : __assert_fail ("Op.getOpcode() == ISD::BUILD_VECTOR && \"Unexpected BUILD_VECTOR\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1435, __extension__ __PRETTY_FUNCTION__)); | |||
1436 | if (!Op.getValueType().isInteger()) | |||
1437 | return None; | |||
1438 | ||||
1439 | Optional<unsigned> SeqStepDenom; | |||
1440 | Optional<int64_t> SeqStepNum, SeqAddend; | |||
1441 | Optional<std::pair<uint64_t, unsigned>> PrevElt; | |||
1442 | unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits(); | |||
1443 | for (unsigned Idx = 0; Idx < NumElts; Idx++) { | |||
1444 | // Assume undef elements match the sequence; we just have to be careful | |||
1445 | // when interpolating across them. | |||
1446 | if (Op.getOperand(Idx).isUndef()) | |||
1447 | continue; | |||
1448 | // The BUILD_VECTOR must be all constants. | |||
1449 | if (!isa<ConstantSDNode>(Op.getOperand(Idx))) | |||
1450 | return None; | |||
1451 | ||||
1452 | uint64_t Val = Op.getConstantOperandVal(Idx) & | |||
1453 | maskTrailingOnes<uint64_t>(EltSizeInBits); | |||
1454 | ||||
1455 | if (PrevElt) { | |||
1456 | // Calculate the step since the last non-undef element, and ensure | |||
1457 | // it's consistent across the entire sequence. | |||
1458 | unsigned IdxDiff = Idx - PrevElt->second; | |||
1459 | int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits); | |||
1460 | ||||
1461 | // A zero-value value difference means that we're somewhere in the middle | |||
1462 | // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a | |||
1463 | // step change before evaluating the sequence. | |||
1464 | if (ValDiff != 0) { | |||
1465 | int64_t Remainder = ValDiff % IdxDiff; | |||
1466 | // Normalize the step if it's greater than 1. | |||
1467 | if (Remainder != ValDiff) { | |||
1468 | // The difference must cleanly divide the element span. | |||
1469 | if (Remainder != 0) | |||
1470 | return None; | |||
1471 | ValDiff /= IdxDiff; | |||
1472 | IdxDiff = 1; | |||
1473 | } | |||
1474 | ||||
1475 | if (!SeqStepNum) | |||
1476 | SeqStepNum = ValDiff; | |||
1477 | else if (ValDiff != SeqStepNum) | |||
1478 | return None; | |||
1479 | ||||
1480 | if (!SeqStepDenom) | |||
1481 | SeqStepDenom = IdxDiff; | |||
1482 | else if (IdxDiff != *SeqStepDenom) | |||
1483 | return None; | |||
1484 | } | |||
1485 | } | |||
1486 | ||||
1487 | // Record and/or check any addend. | |||
1488 | if (SeqStepNum && SeqStepDenom) { | |||
1489 | uint64_t ExpectedVal = | |||
1490 | (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom; | |||
1491 | int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits); | |||
1492 | if (!SeqAddend) | |||
1493 | SeqAddend = Addend; | |||
1494 | else if (SeqAddend != Addend) | |||
1495 | return None; | |||
1496 | } | |||
1497 | ||||
1498 | // Record this non-undef element for later. | |||
1499 | if (!PrevElt || PrevElt->first != Val) | |||
1500 | PrevElt = std::make_pair(Val, Idx); | |||
1501 | } | |||
1502 | // We need to have logged both a step and an addend for this to count as | |||
1503 | // a legal index sequence. | |||
1504 | if (!SeqStepNum || !SeqStepDenom || !SeqAddend) | |||
1505 | return None; | |||
1506 | ||||
1507 | return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend}; | |||
1508 | } | |||
1509 | ||||
1510 | static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, | |||
1511 | const RISCVSubtarget &Subtarget) { | |||
1512 | MVT VT = Op.getSimpleValueType(); | |||
1513 | assert(VT.isFixedLengthVector() && "Unexpected vector!")(static_cast <bool> (VT.isFixedLengthVector() && "Unexpected vector!") ? void (0) : __assert_fail ("VT.isFixedLengthVector() && \"Unexpected vector!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1513, __extension__ __PRETTY_FUNCTION__)); | |||
1514 | ||||
1515 | MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget); | |||
1516 | ||||
1517 | SDLoc DL(Op); | |||
1518 | SDValue Mask, VL; | |||
1519 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
1520 | ||||
1521 | MVT XLenVT = Subtarget.getXLenVT(); | |||
1522 | unsigned NumElts = Op.getNumOperands(); | |||
1523 | ||||
1524 | if (VT.getVectorElementType() == MVT::i1) { | |||
1525 | if (ISD::isBuildVectorAllZeros(Op.getNode())) { | |||
1526 | SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL); | |||
1527 | return convertFromScalableVector(VT, VMClr, DAG, Subtarget); | |||
1528 | } | |||
1529 | ||||
1530 | if (ISD::isBuildVectorAllOnes(Op.getNode())) { | |||
1531 | SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL); | |||
1532 | return convertFromScalableVector(VT, VMSet, DAG, Subtarget); | |||
1533 | } | |||
1534 | ||||
1535 | // Lower constant mask BUILD_VECTORs via an integer vector type, in | |||
1536 | // scalar integer chunks whose bit-width depends on the number of mask | |||
1537 | // bits and XLEN. | |||
1538 | // First, determine the most appropriate scalar integer type to use. This | |||
1539 | // is at most XLenVT, but may be shrunk to a smaller vector element type | |||
1540 | // according to the size of the final vector - use i8 chunks rather than | |||
1541 | // XLenVT if we're producing a v8i1. This results in more consistent | |||
1542 | // codegen across RV32 and RV64. | |||
1543 | unsigned NumViaIntegerBits = | |||
1544 | std::min(std::max(NumElts, 8u), Subtarget.getXLen()); | |||
1545 | if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) { | |||
1546 | // If we have to use more than one INSERT_VECTOR_ELT then this | |||
1547 | // optimization is likely to increase code size; avoid peforming it in | |||
1548 | // such a case. We can use a load from a constant pool in this case. | |||
1549 | if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits) | |||
1550 | return SDValue(); | |||
1551 | // Now we can create our integer vector type. Note that it may be larger | |||
1552 | // than the resulting mask type: v4i1 would use v1i8 as its integer type. | |||
1553 | MVT IntegerViaVecVT = | |||
1554 | MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits), | |||
1555 | divideCeil(NumElts, NumViaIntegerBits)); | |||
1556 | ||||
1557 | uint64_t Bits = 0; | |||
1558 | unsigned BitPos = 0, IntegerEltIdx = 0; | |||
1559 | SDValue Vec = DAG.getUNDEF(IntegerViaVecVT); | |||
1560 | ||||
1561 | for (unsigned I = 0; I < NumElts; I++, BitPos++) { | |||
1562 | // Once we accumulate enough bits to fill our scalar type, insert into | |||
1563 | // our vector and clear our accumulated data. | |||
1564 | if (I != 0 && I % NumViaIntegerBits == 0) { | |||
1565 | if (NumViaIntegerBits <= 32) | |||
1566 | Bits = SignExtend64(Bits, 32); | |||
1567 | SDValue Elt = DAG.getConstant(Bits, DL, XLenVT); | |||
1568 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, | |||
1569 | Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT)); | |||
1570 | Bits = 0; | |||
1571 | BitPos = 0; | |||
1572 | IntegerEltIdx++; | |||
1573 | } | |||
1574 | SDValue V = Op.getOperand(I); | |||
1575 | bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue(); | |||
1576 | Bits |= ((uint64_t)BitValue << BitPos); | |||
1577 | } | |||
1578 | ||||
1579 | // Insert the (remaining) scalar value into position in our integer | |||
1580 | // vector type. | |||
1581 | if (NumViaIntegerBits <= 32) | |||
1582 | Bits = SignExtend64(Bits, 32); | |||
1583 | SDValue Elt = DAG.getConstant(Bits, DL, XLenVT); | |||
1584 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt, | |||
1585 | DAG.getConstant(IntegerEltIdx, DL, XLenVT)); | |||
1586 | ||||
1587 | if (NumElts < NumViaIntegerBits) { | |||
1588 | // If we're producing a smaller vector than our minimum legal integer | |||
1589 | // type, bitcast to the equivalent (known-legal) mask type, and extract | |||
1590 | // our final mask. | |||
1591 | assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type")(static_cast <bool> (IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type") ? void (0) : __assert_fail ("IntegerViaVecVT == MVT::v1i8 && \"Unexpected mask vector type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1591, __extension__ __PRETTY_FUNCTION__)); | |||
1592 | Vec = DAG.getBitcast(MVT::v8i1, Vec); | |||
1593 | Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec, | |||
1594 | DAG.getConstant(0, DL, XLenVT)); | |||
1595 | } else { | |||
1596 | // Else we must have produced an integer type with the same size as the | |||
1597 | // mask type; bitcast for the final result. | |||
1598 | assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits())(static_cast <bool> (VT.getSizeInBits() == IntegerViaVecVT .getSizeInBits()) ? void (0) : __assert_fail ("VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1598, __extension__ __PRETTY_FUNCTION__)); | |||
1599 | Vec = DAG.getBitcast(VT, Vec); | |||
1600 | } | |||
1601 | ||||
1602 | return Vec; | |||
1603 | } | |||
1604 | ||||
1605 | // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask | |||
1606 | // vector type, we have a legal equivalently-sized i8 type, so we can use | |||
1607 | // that. | |||
1608 | MVT WideVecVT = VT.changeVectorElementType(MVT::i8); | |||
1609 | SDValue VecZero = DAG.getConstant(0, DL, WideVecVT); | |||
1610 | ||||
1611 | SDValue WideVec; | |||
1612 | if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) { | |||
1613 | // For a splat, perform a scalar truncate before creating the wider | |||
1614 | // vector. | |||
1615 | assert(Splat.getValueType() == XLenVT &&(static_cast <bool> (Splat.getValueType() == XLenVT && "Unexpected type for i1 splat value") ? void (0) : __assert_fail ("Splat.getValueType() == XLenVT && \"Unexpected type for i1 splat value\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1616, __extension__ __PRETTY_FUNCTION__)) | |||
1616 | "Unexpected type for i1 splat value")(static_cast <bool> (Splat.getValueType() == XLenVT && "Unexpected type for i1 splat value") ? void (0) : __assert_fail ("Splat.getValueType() == XLenVT && \"Unexpected type for i1 splat value\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1616, __extension__ __PRETTY_FUNCTION__)); | |||
1617 | Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat, | |||
1618 | DAG.getConstant(1, DL, XLenVT)); | |||
1619 | WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat); | |||
1620 | } else { | |||
1621 | SmallVector<SDValue, 8> Ops(Op->op_values()); | |||
1622 | WideVec = DAG.getBuildVector(WideVecVT, DL, Ops); | |||
1623 | SDValue VecOne = DAG.getConstant(1, DL, WideVecVT); | |||
1624 | WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne); | |||
1625 | } | |||
1626 | ||||
1627 | return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE); | |||
1628 | } | |||
1629 | ||||
1630 | if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) { | |||
1631 | unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL | |||
1632 | : RISCVISD::VMV_V_X_VL; | |||
1633 | Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL); | |||
1634 | return convertFromScalableVector(VT, Splat, DAG, Subtarget); | |||
1635 | } | |||
1636 | ||||
1637 | // Try and match index sequences, which we can lower to the vid instruction | |||
1638 | // with optional modifications. An all-undef vector is matched by | |||
1639 | // getSplatValue, above. | |||
1640 | if (auto SimpleVID = isSimpleVIDSequence(Op)) { | |||
1641 | int64_t StepNumerator = SimpleVID->StepNumerator; | |||
1642 | unsigned StepDenominator = SimpleVID->StepDenominator; | |||
1643 | int64_t Addend = SimpleVID->Addend; | |||
1644 | // Only emit VIDs with suitably-small steps/addends. We use imm5 is a | |||
1645 | // threshold since it's the immediate value many RVV instructions accept. | |||
1646 | if (isInt<5>(StepNumerator) && isPowerOf2_32(StepDenominator) && | |||
1647 | isInt<5>(Addend)) { | |||
1648 | SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL); | |||
1649 | // Convert right out of the scalable type so we can use standard ISD | |||
1650 | // nodes for the rest of the computation. If we used scalable types with | |||
1651 | // these, we'd lose the fixed-length vector info and generate worse | |||
1652 | // vsetvli code. | |||
1653 | VID = convertFromScalableVector(VT, VID, DAG, Subtarget); | |||
1654 | assert(StepNumerator != 0 && "Invalid step")(static_cast <bool> (StepNumerator != 0 && "Invalid step" ) ? void (0) : __assert_fail ("StepNumerator != 0 && \"Invalid step\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1654, __extension__ __PRETTY_FUNCTION__)); | |||
1655 | bool Negate = false; | |||
1656 | if (StepNumerator != 1) { | |||
1657 | int64_t SplatStepVal = StepNumerator; | |||
1658 | unsigned Opcode = ISD::MUL; | |||
1659 | if (isPowerOf2_64(std::abs(StepNumerator))) { | |||
1660 | Negate = StepNumerator < 0; | |||
1661 | Opcode = ISD::SHL; | |||
1662 | SplatStepVal = Log2_64(std::abs(StepNumerator)); | |||
1663 | } | |||
1664 | SDValue SplatStep = DAG.getSplatVector( | |||
1665 | VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT)); | |||
1666 | VID = DAG.getNode(Opcode, DL, VT, VID, SplatStep); | |||
1667 | } | |||
1668 | if (StepDenominator != 1) { | |||
1669 | SDValue SplatStep = DAG.getSplatVector( | |||
1670 | VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT)); | |||
1671 | VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep); | |||
1672 | } | |||
1673 | if (Addend != 0 || Negate) { | |||
1674 | SDValue SplatAddend = | |||
1675 | DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT)); | |||
1676 | VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID); | |||
1677 | } | |||
1678 | return VID; | |||
1679 | } | |||
1680 | } | |||
1681 | ||||
1682 | // Attempt to detect "hidden" splats, which only reveal themselves as splats | |||
1683 | // when re-interpreted as a vector with a larger element type. For example, | |||
1684 | // v4i16 = build_vector i16 0, i16 1, i16 0, i16 1 | |||
1685 | // could be instead splat as | |||
1686 | // v2i32 = build_vector i32 0x00010000, i32 0x00010000 | |||
1687 | // TODO: This optimization could also work on non-constant splats, but it | |||
1688 | // would require bit-manipulation instructions to construct the splat value. | |||
1689 | SmallVector<SDValue> Sequence; | |||
1690 | unsigned EltBitSize = VT.getScalarSizeInBits(); | |||
1691 | const auto *BV = cast<BuildVectorSDNode>(Op); | |||
1692 | if (VT.isInteger() && EltBitSize < 64 && | |||
1693 | ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && | |||
1694 | BV->getRepeatedSequence(Sequence) && | |||
1695 | (Sequence.size() * EltBitSize) <= 64) { | |||
1696 | unsigned SeqLen = Sequence.size(); | |||
1697 | MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen); | |||
1698 | MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen); | |||
1699 | assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||(static_cast <bool> ((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 || ViaIntVT == MVT::i64) && "Unexpected sequence type" ) ? void (0) : __assert_fail ("(ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 || ViaIntVT == MVT::i64) && \"Unexpected sequence type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1701, __extension__ __PRETTY_FUNCTION__)) | |||
1700 | ViaIntVT == MVT::i64) &&(static_cast <bool> ((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 || ViaIntVT == MVT::i64) && "Unexpected sequence type" ) ? void (0) : __assert_fail ("(ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 || ViaIntVT == MVT::i64) && \"Unexpected sequence type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1701, __extension__ __PRETTY_FUNCTION__)) | |||
1701 | "Unexpected sequence type")(static_cast <bool> ((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 || ViaIntVT == MVT::i64) && "Unexpected sequence type" ) ? void (0) : __assert_fail ("(ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 || ViaIntVT == MVT::i64) && \"Unexpected sequence type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1701, __extension__ __PRETTY_FUNCTION__)); | |||
1702 | ||||
1703 | unsigned EltIdx = 0; | |||
1704 | uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize); | |||
1705 | uint64_t SplatValue = 0; | |||
1706 | // Construct the amalgamated value which can be splatted as this larger | |||
1707 | // vector type. | |||
1708 | for (const auto &SeqV : Sequence) { | |||
1709 | if (!SeqV.isUndef()) | |||
1710 | SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask) | |||
1711 | << (EltIdx * EltBitSize)); | |||
1712 | EltIdx++; | |||
1713 | } | |||
1714 | ||||
1715 | // On RV64, sign-extend from 32 to 64 bits where possible in order to | |||
1716 | // achieve better constant materializion. | |||
1717 | if (Subtarget.is64Bit() && ViaIntVT == MVT::i32) | |||
1718 | SplatValue = SignExtend64(SplatValue, 32); | |||
1719 | ||||
1720 | // Since we can't introduce illegal i64 types at this stage, we can only | |||
1721 | // perform an i64 splat on RV32 if it is its own sign-extended value. That | |||
1722 | // way we can use RVV instructions to splat. | |||
1723 | assert((ViaIntVT.bitsLE(XLenVT) ||(static_cast <bool> ((ViaIntVT.bitsLE(XLenVT) || (!Subtarget .is64Bit() && ViaIntVT == MVT::i64)) && "Unexpected bitcast sequence" ) ? void (0) : __assert_fail ("(ViaIntVT.bitsLE(XLenVT) || (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) && \"Unexpected bitcast sequence\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1725, __extension__ __PRETTY_FUNCTION__)) | |||
1724 | (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&(static_cast <bool> ((ViaIntVT.bitsLE(XLenVT) || (!Subtarget .is64Bit() && ViaIntVT == MVT::i64)) && "Unexpected bitcast sequence" ) ? void (0) : __assert_fail ("(ViaIntVT.bitsLE(XLenVT) || (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) && \"Unexpected bitcast sequence\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1725, __extension__ __PRETTY_FUNCTION__)) | |||
1725 | "Unexpected bitcast sequence")(static_cast <bool> ((ViaIntVT.bitsLE(XLenVT) || (!Subtarget .is64Bit() && ViaIntVT == MVT::i64)) && "Unexpected bitcast sequence" ) ? void (0) : __assert_fail ("(ViaIntVT.bitsLE(XLenVT) || (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) && \"Unexpected bitcast sequence\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1725, __extension__ __PRETTY_FUNCTION__)); | |||
1726 | if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) { | |||
1727 | SDValue ViaVL = | |||
1728 | DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT); | |||
1729 | MVT ViaContainerVT = | |||
1730 | getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget); | |||
1731 | SDValue Splat = | |||
1732 | DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT, | |||
1733 | DAG.getConstant(SplatValue, DL, XLenVT), ViaVL); | |||
1734 | Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget); | |||
1735 | return DAG.getBitcast(VT, Splat); | |||
1736 | } | |||
1737 | } | |||
1738 | ||||
1739 | // Try and optimize BUILD_VECTORs with "dominant values" - these are values | |||
1740 | // which constitute a large proportion of the elements. In such cases we can | |||
1741 | // splat a vector with the dominant element and make up the shortfall with | |||
1742 | // INSERT_VECTOR_ELTs. | |||
1743 | // Note that this includes vectors of 2 elements by association. The | |||
1744 | // upper-most element is the "dominant" one, allowing us to use a splat to | |||
1745 | // "insert" the upper element, and an insert of the lower element at position | |||
1746 | // 0, which improves codegen. | |||
1747 | SDValue DominantValue; | |||
1748 | unsigned MostCommonCount = 0; | |||
1749 | DenseMap<SDValue, unsigned> ValueCounts; | |||
1750 | unsigned NumUndefElts = | |||
1751 | count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); }); | |||
1752 | ||||
1753 | // Track the number of scalar loads we know we'd be inserting, estimated as | |||
1754 | // any non-zero floating-point constant. Other kinds of element are either | |||
1755 | // already in registers or are materialized on demand. The threshold at which | |||
1756 | // a vector load is more desirable than several scalar materializion and | |||
1757 | // vector-insertion instructions is not known. | |||
1758 | unsigned NumScalarLoads = 0; | |||
1759 | ||||
1760 | for (SDValue V : Op->op_values()) { | |||
1761 | if (V.isUndef()) | |||
1762 | continue; | |||
1763 | ||||
1764 | ValueCounts.insert(std::make_pair(V, 0)); | |||
1765 | unsigned &Count = ValueCounts[V]; | |||
1766 | ||||
1767 | if (auto *CFP = dyn_cast<ConstantFPSDNode>(V)) | |||
1768 | NumScalarLoads += !CFP->isExactlyValue(+0.0); | |||
1769 | ||||
1770 | // Is this value dominant? In case of a tie, prefer the highest element as | |||
1771 | // it's cheaper to insert near the beginning of a vector than it is at the | |||
1772 | // end. | |||
1773 | if (++Count >= MostCommonCount) { | |||
1774 | DominantValue = V; | |||
1775 | MostCommonCount = Count; | |||
1776 | } | |||
1777 | } | |||
1778 | ||||
1779 | assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR")(static_cast <bool> (DominantValue && "Not expecting an all-undef BUILD_VECTOR" ) ? void (0) : __assert_fail ("DominantValue && \"Not expecting an all-undef BUILD_VECTOR\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1779, __extension__ __PRETTY_FUNCTION__)); | |||
1780 | unsigned NumDefElts = NumElts - NumUndefElts; | |||
1781 | unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2; | |||
1782 | ||||
1783 | // Don't perform this optimization when optimizing for size, since | |||
1784 | // materializing elements and inserting them tends to cause code bloat. | |||
1785 | if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts && | |||
1786 | ((MostCommonCount > DominantValueCountThreshold) || | |||
1787 | (ValueCounts.size() <= Log2_32(NumDefElts)))) { | |||
1788 | // Start by splatting the most common element. | |||
1789 | SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue); | |||
1790 | ||||
1791 | DenseSet<SDValue> Processed{DominantValue}; | |||
1792 | MVT SelMaskTy = VT.changeVectorElementType(MVT::i1); | |||
1793 | for (const auto &OpIdx : enumerate(Op->ops())) { | |||
1794 | const SDValue &V = OpIdx.value(); | |||
1795 | if (V.isUndef() || !Processed.insert(V).second) | |||
1796 | continue; | |||
1797 | if (ValueCounts[V] == 1) { | |||
1798 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V, | |||
1799 | DAG.getConstant(OpIdx.index(), DL, XLenVT)); | |||
1800 | } else { | |||
1801 | // Blend in all instances of this value using a VSELECT, using a | |||
1802 | // mask where each bit signals whether that element is the one | |||
1803 | // we're after. | |||
1804 | SmallVector<SDValue> Ops; | |||
1805 | transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) { | |||
1806 | return DAG.getConstant(V == V1, DL, XLenVT); | |||
1807 | }); | |||
1808 | Vec = DAG.getNode(ISD::VSELECT, DL, VT, | |||
1809 | DAG.getBuildVector(SelMaskTy, DL, Ops), | |||
1810 | DAG.getSplatBuildVector(VT, DL, V), Vec); | |||
1811 | } | |||
1812 | } | |||
1813 | ||||
1814 | return Vec; | |||
1815 | } | |||
1816 | ||||
1817 | return SDValue(); | |||
1818 | } | |||
1819 | ||||
1820 | static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo, | |||
1821 | SDValue Hi, SDValue VL, SelectionDAG &DAG) { | |||
1822 | if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) { | |||
1823 | int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue(); | |||
1824 | int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue(); | |||
1825 | // If Hi constant is all the same sign bit as Lo, lower this as a custom | |||
1826 | // node in order to try and match RVV vector/scalar instructions. | |||
1827 | if ((LoC >> 31) == HiC) | |||
1828 | return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL); | |||
1829 | } | |||
1830 | ||||
1831 | // Fall back to a stack store and stride x0 vector load. | |||
1832 | return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL); | |||
1833 | } | |||
1834 | ||||
1835 | // Called by type legalization to handle splat of i64 on RV32. | |||
1836 | // FIXME: We can optimize this when the type has sign or zero bits in one | |||
1837 | // of the halves. | |||
1838 | static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar, | |||
1839 | SDValue VL, SelectionDAG &DAG) { | |||
1840 | assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!")(static_cast <bool> (Scalar.getValueType() == MVT::i64 && "Unexpected VT!") ? void (0) : __assert_fail ("Scalar.getValueType() == MVT::i64 && \"Unexpected VT!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1840, __extension__ __PRETTY_FUNCTION__)); | |||
1841 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, | |||
1842 | DAG.getConstant(0, DL, MVT::i32)); | |||
1843 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, | |||
1844 | DAG.getConstant(1, DL, MVT::i32)); | |||
1845 | return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG); | |||
1846 | } | |||
1847 | ||||
1848 | // This function lowers a splat of a scalar operand Splat with the vector | |||
1849 | // length VL. It ensures the final sequence is type legal, which is useful when | |||
1850 | // lowering a splat after type legalization. | |||
1851 | static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL, | |||
1852 | SelectionDAG &DAG, | |||
1853 | const RISCVSubtarget &Subtarget) { | |||
1854 | if (VT.isFloatingPoint()) | |||
1855 | return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL); | |||
1856 | ||||
1857 | MVT XLenVT = Subtarget.getXLenVT(); | |||
1858 | ||||
1859 | // Simplest case is that the operand needs to be promoted to XLenVT. | |||
1860 | if (Scalar.getValueType().bitsLE(XLenVT)) { | |||
1861 | // If the operand is a constant, sign extend to increase our chances | |||
1862 | // of being able to use a .vi instruction. ANY_EXTEND would become a | |||
1863 | // a zero extend and the simm5 check in isel would fail. | |||
1864 | // FIXME: Should we ignore the upper bits in isel instead? | |||
1865 | unsigned ExtOpc = | |||
1866 | isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND; | |||
1867 | Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar); | |||
1868 | return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL); | |||
1869 | } | |||
1870 | ||||
1871 | assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&(static_cast <bool> (XLenVT == MVT::i32 && Scalar .getValueType() == MVT::i64 && "Unexpected scalar for splat lowering!" ) ? void (0) : __assert_fail ("XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 && \"Unexpected scalar for splat lowering!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1872, __extension__ __PRETTY_FUNCTION__)) | |||
1872 | "Unexpected scalar for splat lowering!")(static_cast <bool> (XLenVT == MVT::i32 && Scalar .getValueType() == MVT::i64 && "Unexpected scalar for splat lowering!" ) ? void (0) : __assert_fail ("XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 && \"Unexpected scalar for splat lowering!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1872, __extension__ __PRETTY_FUNCTION__)); | |||
1873 | ||||
1874 | // Otherwise use the more complicated splatting algorithm. | |||
1875 | return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG); | |||
1876 | } | |||
1877 | ||||
1878 | static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, | |||
1879 | const RISCVSubtarget &Subtarget) { | |||
1880 | SDValue V1 = Op.getOperand(0); | |||
1881 | SDValue V2 = Op.getOperand(1); | |||
1882 | SDLoc DL(Op); | |||
1883 | MVT XLenVT = Subtarget.getXLenVT(); | |||
1884 | MVT VT = Op.getSimpleValueType(); | |||
1885 | unsigned NumElts = VT.getVectorNumElements(); | |||
1886 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); | |||
1887 | ||||
1888 | MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget); | |||
1889 | ||||
1890 | SDValue TrueMask, VL; | |||
1891 | std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
1892 | ||||
1893 | if (SVN->isSplat()) { | |||
1894 | const int Lane = SVN->getSplatIndex(); | |||
1895 | if (Lane >= 0) { | |||
1896 | MVT SVT = VT.getVectorElementType(); | |||
1897 | ||||
1898 | // Turn splatted vector load into a strided load with an X0 stride. | |||
1899 | SDValue V = V1; | |||
1900 | // Peek through CONCAT_VECTORS as VectorCombine can concat a vector | |||
1901 | // with undef. | |||
1902 | // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts? | |||
1903 | int Offset = Lane; | |||
1904 | if (V.getOpcode() == ISD::CONCAT_VECTORS) { | |||
1905 | int OpElements = | |||
1906 | V.getOperand(0).getSimpleValueType().getVectorNumElements(); | |||
1907 | V = V.getOperand(Offset / OpElements); | |||
1908 | Offset %= OpElements; | |||
1909 | } | |||
1910 | ||||
1911 | // We need to ensure the load isn't atomic or volatile. | |||
1912 | if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) { | |||
1913 | auto *Ld = cast<LoadSDNode>(V); | |||
1914 | Offset *= SVT.getStoreSize(); | |||
1915 | SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(), | |||
1916 | TypeSize::Fixed(Offset), DL); | |||
1917 | ||||
1918 | // If this is SEW=64 on RV32, use a strided load with a stride of x0. | |||
1919 | if (SVT.isInteger() && SVT.bitsGT(XLenVT)) { | |||
1920 | SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); | |||
1921 | SDValue IntID = | |||
1922 | DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT); | |||
1923 | SDValue Ops[] = {Ld->getChain(), IntID, NewAddr, | |||
1924 | DAG.getRegister(RISCV::X0, XLenVT), VL}; | |||
1925 | SDValue NewLoad = DAG.getMemIntrinsicNode( | |||
1926 | ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT, | |||
1927 | DAG.getMachineFunction().getMachineMemOperand( | |||
1928 | Ld->getMemOperand(), Offset, SVT.getStoreSize())); | |||
1929 | DAG.makeEquivalentMemoryOrdering(Ld, NewLoad); | |||
1930 | return convertFromScalableVector(VT, NewLoad, DAG, Subtarget); | |||
1931 | } | |||
1932 | ||||
1933 | // Otherwise use a scalar load and splat. This will give the best | |||
1934 | // opportunity to fold a splat into the operation. ISel can turn it into | |||
1935 | // the x0 strided load if we aren't able to fold away the select. | |||
1936 | if (SVT.isFloatingPoint()) | |||
1937 | V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr, | |||
1938 | Ld->getPointerInfo().getWithOffset(Offset), | |||
1939 | Ld->getOriginalAlign(), | |||
1940 | Ld->getMemOperand()->getFlags()); | |||
1941 | else | |||
1942 | V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr, | |||
1943 | Ld->getPointerInfo().getWithOffset(Offset), SVT, | |||
1944 | Ld->getOriginalAlign(), | |||
1945 | Ld->getMemOperand()->getFlags()); | |||
1946 | DAG.makeEquivalentMemoryOrdering(Ld, V); | |||
1947 | ||||
1948 | unsigned Opc = | |||
1949 | VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL; | |||
1950 | SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL); | |||
1951 | return convertFromScalableVector(VT, Splat, DAG, Subtarget); | |||
1952 | } | |||
1953 | ||||
1954 | V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget); | |||
1955 | assert(Lane < (int)NumElts && "Unexpected lane!")(static_cast <bool> (Lane < (int)NumElts && "Unexpected lane!" ) ? void (0) : __assert_fail ("Lane < (int)NumElts && \"Unexpected lane!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1955, __extension__ __PRETTY_FUNCTION__)); | |||
1956 | SDValue Gather = | |||
1957 | DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1, | |||
1958 | DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL); | |||
1959 | return convertFromScalableVector(VT, Gather, DAG, Subtarget); | |||
1960 | } | |||
1961 | } | |||
1962 | ||||
1963 | // Detect shuffles which can be re-expressed as vector selects; these are | |||
1964 | // shuffles in which each element in the destination is taken from an element | |||
1965 | // at the corresponding index in either source vectors. | |||
1966 | bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) { | |||
1967 | int MaskIndex = MaskIdx.value(); | |||
1968 | return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts; | |||
1969 | }); | |||
1970 | ||||
1971 | assert(!V1.isUndef() && "Unexpected shuffle canonicalization")(static_cast <bool> (!V1.isUndef() && "Unexpected shuffle canonicalization" ) ? void (0) : __assert_fail ("!V1.isUndef() && \"Unexpected shuffle canonicalization\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 1971, __extension__ __PRETTY_FUNCTION__)); | |||
1972 | ||||
1973 | SmallVector<SDValue> MaskVals; | |||
1974 | // As a backup, shuffles can be lowered via a vrgather instruction, possibly | |||
1975 | // merged with a second vrgather. | |||
1976 | SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS; | |||
1977 | ||||
1978 | // By default we preserve the original operand order, and use a mask to | |||
1979 | // select LHS as true and RHS as false. However, since RVV vector selects may | |||
1980 | // feature splats but only on the LHS, we may choose to invert our mask and | |||
1981 | // instead select between RHS and LHS. | |||
1982 | bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1); | |||
1983 | bool InvertMask = IsSelect == SwapOps; | |||
1984 | ||||
1985 | // Keep a track of which non-undef indices are used by each LHS/RHS shuffle | |||
1986 | // half. | |||
1987 | DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts; | |||
1988 | ||||
1989 | // Now construct the mask that will be used by the vselect or blended | |||
1990 | // vrgather operation. For vrgathers, construct the appropriate indices into | |||
1991 | // each vector. | |||
1992 | for (int MaskIndex : SVN->getMask()) { | |||
1993 | bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask; | |||
1994 | MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT)); | |||
1995 | if (!IsSelect) { | |||
1996 | bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts; | |||
1997 | GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0 | |||
1998 | ? DAG.getConstant(MaskIndex, DL, XLenVT) | |||
1999 | : DAG.getUNDEF(XLenVT)); | |||
2000 | GatherIndicesRHS.push_back( | |||
2001 | IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT) | |||
2002 | : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT)); | |||
2003 | if (IsLHSOrUndefIndex && MaskIndex >= 0) | |||
2004 | ++LHSIndexCounts[MaskIndex]; | |||
2005 | if (!IsLHSOrUndefIndex) | |||
2006 | ++RHSIndexCounts[MaskIndex - NumElts]; | |||
2007 | } | |||
2008 | } | |||
2009 | ||||
2010 | if (SwapOps) { | |||
2011 | std::swap(V1, V2); | |||
2012 | std::swap(GatherIndicesLHS, GatherIndicesRHS); | |||
2013 | } | |||
2014 | ||||
2015 | assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle")(static_cast <bool> (MaskVals.size() == NumElts && "Unexpected select-like shuffle") ? void (0) : __assert_fail ("MaskVals.size() == NumElts && \"Unexpected select-like shuffle\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2015, __extension__ __PRETTY_FUNCTION__)); | |||
2016 | MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts); | |||
2017 | SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals); | |||
2018 | ||||
2019 | if (IsSelect) | |||
2020 | return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2); | |||
2021 | ||||
2022 | if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) { | |||
2023 | // On such a large vector we're unable to use i8 as the index type. | |||
2024 | // FIXME: We could promote the index to i16 and use vrgatherei16, but that | |||
2025 | // may involve vector splitting if we're already at LMUL=8, or our | |||
2026 | // user-supplied maximum fixed-length LMUL. | |||
2027 | return SDValue(); | |||
2028 | } | |||
2029 | ||||
2030 | unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL; | |||
2031 | unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL; | |||
2032 | MVT IndexVT = VT.changeTypeToInteger(); | |||
2033 | // Since we can't introduce illegal index types at this stage, use i16 and | |||
2034 | // vrgatherei16 if the corresponding index type for plain vrgather is greater | |||
2035 | // than XLenVT. | |||
2036 | if (IndexVT.getScalarType().bitsGT(XLenVT)) { | |||
2037 | GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL; | |||
2038 | IndexVT = IndexVT.changeVectorElementType(MVT::i16); | |||
2039 | } | |||
2040 | ||||
2041 | MVT IndexContainerVT = | |||
2042 | ContainerVT.changeVectorElementType(IndexVT.getScalarType()); | |||
2043 | ||||
2044 | SDValue Gather; | |||
2045 | // TODO: This doesn't trigger for i64 vectors on RV32, since there we | |||
2046 | // encounter a bitcasted BUILD_VECTOR with low/high i32 values. | |||
2047 | if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) { | |||
2048 | Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget); | |||
2049 | } else { | |||
2050 | V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget); | |||
2051 | // If only one index is used, we can use a "splat" vrgather. | |||
2052 | // TODO: We can splat the most-common index and fix-up any stragglers, if | |||
2053 | // that's beneficial. | |||
2054 | if (LHSIndexCounts.size() == 1) { | |||
2055 | int SplatIndex = LHSIndexCounts.begin()->getFirst(); | |||
2056 | Gather = | |||
2057 | DAG.getNode(GatherVXOpc, DL, ContainerVT, V1, | |||
2058 | DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL); | |||
2059 | } else { | |||
2060 | SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS); | |||
2061 | LHSIndices = | |||
2062 | convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget); | |||
2063 | ||||
2064 | Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices, | |||
2065 | TrueMask, VL); | |||
2066 | } | |||
2067 | } | |||
2068 | ||||
2069 | // If a second vector operand is used by this shuffle, blend it in with an | |||
2070 | // additional vrgather. | |||
2071 | if (!V2.isUndef()) { | |||
2072 | V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget); | |||
2073 | // If only one index is used, we can use a "splat" vrgather. | |||
2074 | // TODO: We can splat the most-common index and fix-up any stragglers, if | |||
2075 | // that's beneficial. | |||
2076 | if (RHSIndexCounts.size() == 1) { | |||
2077 | int SplatIndex = RHSIndexCounts.begin()->getFirst(); | |||
2078 | V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2, | |||
2079 | DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL); | |||
2080 | } else { | |||
2081 | SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS); | |||
2082 | RHSIndices = | |||
2083 | convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget); | |||
2084 | V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask, | |||
2085 | VL); | |||
2086 | } | |||
2087 | ||||
2088 | MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1); | |||
2089 | SelectMask = | |||
2090 | convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget); | |||
2091 | ||||
2092 | Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2, | |||
2093 | Gather, VL); | |||
2094 | } | |||
2095 | ||||
2096 | return convertFromScalableVector(VT, Gather, DAG, Subtarget); | |||
2097 | } | |||
2098 | ||||
2099 | static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT, | |||
2100 | SDLoc DL, SelectionDAG &DAG, | |||
2101 | const RISCVSubtarget &Subtarget) { | |||
2102 | if (VT.isScalableVector()) | |||
2103 | return DAG.getFPExtendOrRound(Op, DL, VT); | |||
2104 | assert(VT.isFixedLengthVector() &&(static_cast <bool> (VT.isFixedLengthVector() && "Unexpected value type for RVV FP extend/round lowering") ? void (0) : __assert_fail ("VT.isFixedLengthVector() && \"Unexpected value type for RVV FP extend/round lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2105, __extension__ __PRETTY_FUNCTION__)) | |||
2105 | "Unexpected value type for RVV FP extend/round lowering")(static_cast <bool> (VT.isFixedLengthVector() && "Unexpected value type for RVV FP extend/round lowering") ? void (0) : __assert_fail ("VT.isFixedLengthVector() && \"Unexpected value type for RVV FP extend/round lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2105, __extension__ __PRETTY_FUNCTION__)); | |||
2106 | SDValue Mask, VL; | |||
2107 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
2108 | unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType()) | |||
2109 | ? RISCVISD::FP_EXTEND_VL | |||
2110 | : RISCVISD::FP_ROUND_VL; | |||
2111 | return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL); | |||
2112 | } | |||
2113 | ||||
2114 | // While RVV has alignment restrictions, we should always be able to load as a | |||
2115 | // legal equivalently-sized byte-typed vector instead. This method is | |||
2116 | // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If | |||
2117 | // the load is already correctly-aligned, it returns SDValue(). | |||
2118 | SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op, | |||
2119 | SelectionDAG &DAG) const { | |||
2120 | auto *Load = cast<LoadSDNode>(Op); | |||
2121 | assert(Load && Load->getMemoryVT().isVector() && "Expected vector load")(static_cast <bool> (Load && Load->getMemoryVT ().isVector() && "Expected vector load") ? void (0) : __assert_fail ("Load && Load->getMemoryVT().isVector() && \"Expected vector load\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2121, __extension__ __PRETTY_FUNCTION__)); | |||
2122 | ||||
2123 | if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), | |||
2124 | Load->getMemoryVT(), | |||
2125 | *Load->getMemOperand())) | |||
2126 | return SDValue(); | |||
2127 | ||||
2128 | SDLoc DL(Op); | |||
2129 | MVT VT = Op.getSimpleValueType(); | |||
2130 | unsigned EltSizeBits = VT.getScalarSizeInBits(); | |||
2131 | assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&(static_cast <bool> ((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) && "Unexpected unaligned RVV load type" ) ? void (0) : __assert_fail ("(EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) && \"Unexpected unaligned RVV load type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2132, __extension__ __PRETTY_FUNCTION__)) | |||
2132 | "Unexpected unaligned RVV load type")(static_cast <bool> ((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) && "Unexpected unaligned RVV load type" ) ? void (0) : __assert_fail ("(EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) && \"Unexpected unaligned RVV load type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2132, __extension__ __PRETTY_FUNCTION__)); | |||
2133 | MVT NewVT = | |||
2134 | MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8)); | |||
2135 | assert(NewVT.isValid() &&(static_cast <bool> (NewVT.isValid() && "Expecting equally-sized RVV vector types to be legal" ) ? void (0) : __assert_fail ("NewVT.isValid() && \"Expecting equally-sized RVV vector types to be legal\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2136, __extension__ __PRETTY_FUNCTION__)) | |||
2136 | "Expecting equally-sized RVV vector types to be legal")(static_cast <bool> (NewVT.isValid() && "Expecting equally-sized RVV vector types to be legal" ) ? void (0) : __assert_fail ("NewVT.isValid() && \"Expecting equally-sized RVV vector types to be legal\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2136, __extension__ __PRETTY_FUNCTION__)); | |||
2137 | SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(), | |||
2138 | Load->getPointerInfo(), Load->getOriginalAlign(), | |||
2139 | Load->getMemOperand()->getFlags()); | |||
2140 | return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL); | |||
2141 | } | |||
2142 | ||||
2143 | // While RVV has alignment restrictions, we should always be able to store as a | |||
2144 | // legal equivalently-sized byte-typed vector instead. This method is | |||
2145 | // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It | |||
2146 | // returns SDValue() if the store is already correctly aligned. | |||
2147 | SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op, | |||
2148 | SelectionDAG &DAG) const { | |||
2149 | auto *Store = cast<StoreSDNode>(Op); | |||
2150 | assert(Store && Store->getValue().getValueType().isVector() &&(static_cast <bool> (Store && Store->getValue ().getValueType().isVector() && "Expected vector store" ) ? void (0) : __assert_fail ("Store && Store->getValue().getValueType().isVector() && \"Expected vector store\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2151, __extension__ __PRETTY_FUNCTION__)) | |||
2151 | "Expected vector store")(static_cast <bool> (Store && Store->getValue ().getValueType().isVector() && "Expected vector store" ) ? void (0) : __assert_fail ("Store && Store->getValue().getValueType().isVector() && \"Expected vector store\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2151, __extension__ __PRETTY_FUNCTION__)); | |||
2152 | ||||
2153 | if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), | |||
2154 | Store->getMemoryVT(), | |||
2155 | *Store->getMemOperand())) | |||
2156 | return SDValue(); | |||
2157 | ||||
2158 | SDLoc DL(Op); | |||
2159 | SDValue StoredVal = Store->getValue(); | |||
2160 | MVT VT = StoredVal.getSimpleValueType(); | |||
2161 | unsigned EltSizeBits = VT.getScalarSizeInBits(); | |||
2162 | assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&(static_cast <bool> ((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) && "Unexpected unaligned RVV store type" ) ? void (0) : __assert_fail ("(EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) && \"Unexpected unaligned RVV store type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2163, __extension__ __PRETTY_FUNCTION__)) | |||
2163 | "Unexpected unaligned RVV store type")(static_cast <bool> ((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) && "Unexpected unaligned RVV store type" ) ? void (0) : __assert_fail ("(EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) && \"Unexpected unaligned RVV store type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2163, __extension__ __PRETTY_FUNCTION__)); | |||
2164 | MVT NewVT = | |||
2165 | MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8)); | |||
2166 | assert(NewVT.isValid() &&(static_cast <bool> (NewVT.isValid() && "Expecting equally-sized RVV vector types to be legal" ) ? void (0) : __assert_fail ("NewVT.isValid() && \"Expecting equally-sized RVV vector types to be legal\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2167, __extension__ __PRETTY_FUNCTION__)) | |||
2167 | "Expecting equally-sized RVV vector types to be legal")(static_cast <bool> (NewVT.isValid() && "Expecting equally-sized RVV vector types to be legal" ) ? void (0) : __assert_fail ("NewVT.isValid() && \"Expecting equally-sized RVV vector types to be legal\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2167, __extension__ __PRETTY_FUNCTION__)); | |||
2168 | StoredVal = DAG.getBitcast(NewVT, StoredVal); | |||
2169 | return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(), | |||
2170 | Store->getPointerInfo(), Store->getOriginalAlign(), | |||
2171 | Store->getMemOperand()->getFlags()); | |||
2172 | } | |||
2173 | ||||
2174 | SDValue RISCVTargetLowering::LowerOperation(SDValue Op, | |||
2175 | SelectionDAG &DAG) const { | |||
2176 | switch (Op.getOpcode()) { | |||
2177 | default: | |||
2178 | report_fatal_error("unimplemented operand"); | |||
2179 | case ISD::GlobalAddress: | |||
2180 | return lowerGlobalAddress(Op, DAG); | |||
2181 | case ISD::BlockAddress: | |||
2182 | return lowerBlockAddress(Op, DAG); | |||
2183 | case ISD::ConstantPool: | |||
2184 | return lowerConstantPool(Op, DAG); | |||
2185 | case ISD::JumpTable: | |||
2186 | return lowerJumpTable(Op, DAG); | |||
2187 | case ISD::GlobalTLSAddress: | |||
2188 | return lowerGlobalTLSAddress(Op, DAG); | |||
2189 | case ISD::SELECT: | |||
2190 | return lowerSELECT(Op, DAG); | |||
2191 | case ISD::BRCOND: | |||
2192 | return lowerBRCOND(Op, DAG); | |||
2193 | case ISD::VASTART: | |||
2194 | return lowerVASTART(Op, DAG); | |||
2195 | case ISD::FRAMEADDR: | |||
2196 | return lowerFRAMEADDR(Op, DAG); | |||
2197 | case ISD::RETURNADDR: | |||
2198 | return lowerRETURNADDR(Op, DAG); | |||
2199 | case ISD::SHL_PARTS: | |||
2200 | return lowerShiftLeftParts(Op, DAG); | |||
2201 | case ISD::SRA_PARTS: | |||
2202 | return lowerShiftRightParts(Op, DAG, true); | |||
2203 | case ISD::SRL_PARTS: | |||
2204 | return lowerShiftRightParts(Op, DAG, false); | |||
2205 | case ISD::BITCAST: { | |||
2206 | SDLoc DL(Op); | |||
2207 | EVT VT = Op.getValueType(); | |||
2208 | SDValue Op0 = Op.getOperand(0); | |||
2209 | EVT Op0VT = Op0.getValueType(); | |||
2210 | MVT XLenVT = Subtarget.getXLenVT(); | |||
2211 | if (VT.isFixedLengthVector()) { | |||
2212 | // We can handle fixed length vector bitcasts with a simple replacement | |||
2213 | // in isel. | |||
2214 | if (Op0VT.isFixedLengthVector()) | |||
2215 | return Op; | |||
2216 | // When bitcasting from scalar to fixed-length vector, insert the scalar | |||
2217 | // into a one-element vector of the result type, and perform a vector | |||
2218 | // bitcast. | |||
2219 | if (!Op0VT.isVector()) { | |||
2220 | auto BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1); | |||
2221 | return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT, | |||
2222 | DAG.getUNDEF(BVT), Op0, | |||
2223 | DAG.getConstant(0, DL, XLenVT))); | |||
2224 | } | |||
2225 | return SDValue(); | |||
2226 | } | |||
2227 | // Custom-legalize bitcasts from fixed-length vector types to scalar types | |||
2228 | // thus: bitcast the vector to a one-element vector type whose element type | |||
2229 | // is the same as the result type, and extract the first element. | |||
2230 | if (!VT.isVector() && Op0VT.isFixedLengthVector()) { | |||
2231 | LLVMContext &Context = *DAG.getContext(); | |||
2232 | SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0); | |||
2233 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec, | |||
2234 | DAG.getConstant(0, DL, XLenVT)); | |||
2235 | } | |||
2236 | if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) { | |||
2237 | SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0); | |||
2238 | SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0); | |||
2239 | return FPConv; | |||
2240 | } | |||
2241 | if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() && | |||
2242 | Subtarget.hasStdExtF()) { | |||
2243 | SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); | |||
2244 | SDValue FPConv = | |||
2245 | DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0); | |||
2246 | return FPConv; | |||
2247 | } | |||
2248 | return SDValue(); | |||
2249 | } | |||
2250 | case ISD::INTRINSIC_WO_CHAIN: | |||
2251 | return LowerINTRINSIC_WO_CHAIN(Op, DAG); | |||
2252 | case ISD::INTRINSIC_W_CHAIN: | |||
2253 | return LowerINTRINSIC_W_CHAIN(Op, DAG); | |||
2254 | case ISD::BSWAP: | |||
2255 | case ISD::BITREVERSE: { | |||
2256 | // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining. | |||
2257 | assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation")(static_cast <bool> (Subtarget.hasStdExtZbp() && "Unexpected custom legalisation") ? void (0) : __assert_fail ("Subtarget.hasStdExtZbp() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2257, __extension__ __PRETTY_FUNCTION__)); | |||
2258 | MVT VT = Op.getSimpleValueType(); | |||
2259 | SDLoc DL(Op); | |||
2260 | // Start with the maximum immediate value which is the bitwidth - 1. | |||
2261 | unsigned Imm = VT.getSizeInBits() - 1; | |||
2262 | // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits. | |||
2263 | if (Op.getOpcode() == ISD::BSWAP) | |||
2264 | Imm &= ~0x7U; | |||
2265 | return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0), | |||
2266 | DAG.getConstant(Imm, DL, VT)); | |||
2267 | } | |||
2268 | case ISD::FSHL: | |||
2269 | case ISD::FSHR: { | |||
2270 | MVT VT = Op.getSimpleValueType(); | |||
2271 | assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization")(static_cast <bool> (VT == Subtarget.getXLenVT() && "Unexpected custom legalization") ? void (0) : __assert_fail ("VT == Subtarget.getXLenVT() && \"Unexpected custom legalization\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2271, __extension__ __PRETTY_FUNCTION__)); | |||
2272 | SDLoc DL(Op); | |||
2273 | if (Op.getOperand(2).getOpcode() == ISD::Constant) | |||
2274 | return Op; | |||
2275 | // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only | |||
2276 | // use log(XLen) bits. Mask the shift amount accordingly. | |||
2277 | unsigned ShAmtWidth = Subtarget.getXLen() - 1; | |||
2278 | SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2), | |||
2279 | DAG.getConstant(ShAmtWidth, DL, VT)); | |||
2280 | unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR; | |||
2281 | return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt); | |||
2282 | } | |||
2283 | case ISD::TRUNCATE: { | |||
2284 | SDLoc DL(Op); | |||
2285 | MVT VT = Op.getSimpleValueType(); | |||
2286 | // Only custom-lower vector truncates | |||
2287 | if (!VT.isVector()) | |||
2288 | return Op; | |||
2289 | ||||
2290 | // Truncates to mask types are handled differently | |||
2291 | if (VT.getVectorElementType() == MVT::i1) | |||
2292 | return lowerVectorMaskTrunc(Op, DAG); | |||
2293 | ||||
2294 | // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary | |||
2295 | // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which | |||
2296 | // truncate by one power of two at a time. | |||
2297 | MVT DstEltVT = VT.getVectorElementType(); | |||
2298 | ||||
2299 | SDValue Src = Op.getOperand(0); | |||
2300 | MVT SrcVT = Src.getSimpleValueType(); | |||
2301 | MVT SrcEltVT = SrcVT.getVectorElementType(); | |||
2302 | ||||
2303 | assert(DstEltVT.bitsLT(SrcEltVT) &&(static_cast <bool> (DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) && isPowerOf2_64 (SrcEltVT.getSizeInBits()) && "Unexpected vector truncate lowering" ) ? void (0) : __assert_fail ("DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) && isPowerOf2_64(SrcEltVT.getSizeInBits()) && \"Unexpected vector truncate lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2306, __extension__ __PRETTY_FUNCTION__)) | |||
2304 | isPowerOf2_64(DstEltVT.getSizeInBits()) &&(static_cast <bool> (DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) && isPowerOf2_64 (SrcEltVT.getSizeInBits()) && "Unexpected vector truncate lowering" ) ? void (0) : __assert_fail ("DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) && isPowerOf2_64(SrcEltVT.getSizeInBits()) && \"Unexpected vector truncate lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2306, __extension__ __PRETTY_FUNCTION__)) | |||
2305 | isPowerOf2_64(SrcEltVT.getSizeInBits()) &&(static_cast <bool> (DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) && isPowerOf2_64 (SrcEltVT.getSizeInBits()) && "Unexpected vector truncate lowering" ) ? void (0) : __assert_fail ("DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) && isPowerOf2_64(SrcEltVT.getSizeInBits()) && \"Unexpected vector truncate lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2306, __extension__ __PRETTY_FUNCTION__)) | |||
2306 | "Unexpected vector truncate lowering")(static_cast <bool> (DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) && isPowerOf2_64 (SrcEltVT.getSizeInBits()) && "Unexpected vector truncate lowering" ) ? void (0) : __assert_fail ("DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) && isPowerOf2_64(SrcEltVT.getSizeInBits()) && \"Unexpected vector truncate lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2306, __extension__ __PRETTY_FUNCTION__)); | |||
2307 | ||||
2308 | MVT ContainerVT = SrcVT; | |||
2309 | if (SrcVT.isFixedLengthVector()) { | |||
2310 | ContainerVT = getContainerForFixedLengthVector(SrcVT); | |||
2311 | Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget); | |||
2312 | } | |||
2313 | ||||
2314 | SDValue Result = Src; | |||
2315 | SDValue Mask, VL; | |||
2316 | std::tie(Mask, VL) = | |||
2317 | getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget); | |||
2318 | LLVMContext &Context = *DAG.getContext(); | |||
2319 | const ElementCount Count = ContainerVT.getVectorElementCount(); | |||
2320 | do { | |||
2321 | SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2); | |||
2322 | EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count); | |||
2323 | Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result, | |||
2324 | Mask, VL); | |||
2325 | } while (SrcEltVT != DstEltVT); | |||
2326 | ||||
2327 | if (SrcVT.isFixedLengthVector()) | |||
2328 | Result = convertFromScalableVector(VT, Result, DAG, Subtarget); | |||
2329 | ||||
2330 | return Result; | |||
2331 | } | |||
2332 | case ISD::ANY_EXTEND: | |||
2333 | case ISD::ZERO_EXTEND: | |||
2334 | if (Op.getOperand(0).getValueType().isVector() && | |||
2335 | Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) | |||
2336 | return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1); | |||
2337 | return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL); | |||
2338 | case ISD::SIGN_EXTEND: | |||
2339 | if (Op.getOperand(0).getValueType().isVector() && | |||
2340 | Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) | |||
2341 | return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1); | |||
2342 | return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL); | |||
2343 | case ISD::SPLAT_VECTOR_PARTS: | |||
2344 | return lowerSPLAT_VECTOR_PARTS(Op, DAG); | |||
2345 | case ISD::INSERT_VECTOR_ELT: | |||
2346 | return lowerINSERT_VECTOR_ELT(Op, DAG); | |||
2347 | case ISD::EXTRACT_VECTOR_ELT: | |||
2348 | return lowerEXTRACT_VECTOR_ELT(Op, DAG); | |||
2349 | case ISD::VSCALE: { | |||
2350 | MVT VT = Op.getSimpleValueType(); | |||
2351 | SDLoc DL(Op); | |||
2352 | SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT); | |||
2353 | // We define our scalable vector types for lmul=1 to use a 64 bit known | |||
2354 | // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate | |||
2355 | // vscale as VLENB / 8. | |||
2356 | assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!")(static_cast <bool> (RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!") ? void (0) : __assert_fail ("RISCV::RVVBitsPerBlock == 64 && \"Unexpected bits per block!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2356, __extension__ __PRETTY_FUNCTION__)); | |||
2357 | if (isa<ConstantSDNode>(Op.getOperand(0))) { | |||
2358 | // We assume VLENB is a multiple of 8. We manually choose the best shift | |||
2359 | // here because SimplifyDemandedBits isn't always able to simplify it. | |||
2360 | uint64_t Val = Op.getConstantOperandVal(0); | |||
2361 | if (isPowerOf2_64(Val)) { | |||
2362 | uint64_t Log2 = Log2_64(Val); | |||
2363 | if (Log2 < 3) | |||
2364 | return DAG.getNode(ISD::SRL, DL, VT, VLENB, | |||
2365 | DAG.getConstant(3 - Log2, DL, VT)); | |||
2366 | if (Log2 > 3) | |||
2367 | return DAG.getNode(ISD::SHL, DL, VT, VLENB, | |||
2368 | DAG.getConstant(Log2 - 3, DL, VT)); | |||
2369 | return VLENB; | |||
2370 | } | |||
2371 | // If the multiplier is a multiple of 8, scale it down to avoid needing | |||
2372 | // to shift the VLENB value. | |||
2373 | if ((Val % 8) == 0) | |||
2374 | return DAG.getNode(ISD::MUL, DL, VT, VLENB, | |||
2375 | DAG.getConstant(Val / 8, DL, VT)); | |||
2376 | } | |||
2377 | ||||
2378 | SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB, | |||
2379 | DAG.getConstant(3, DL, VT)); | |||
2380 | return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0)); | |||
2381 | } | |||
2382 | case ISD::FP_EXTEND: { | |||
2383 | // RVV can only do fp_extend to types double the size as the source. We | |||
2384 | // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going | |||
2385 | // via f32. | |||
2386 | SDLoc DL(Op); | |||
2387 | MVT VT = Op.getSimpleValueType(); | |||
2388 | SDValue Src = Op.getOperand(0); | |||
2389 | MVT SrcVT = Src.getSimpleValueType(); | |||
2390 | ||||
2391 | // Prepare any fixed-length vector operands. | |||
2392 | MVT ContainerVT = VT; | |||
2393 | if (SrcVT.isFixedLengthVector()) { | |||
2394 | ContainerVT = getContainerForFixedLengthVector(VT); | |||
2395 | MVT SrcContainerVT = | |||
2396 | ContainerVT.changeVectorElementType(SrcVT.getVectorElementType()); | |||
2397 | Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); | |||
2398 | } | |||
2399 | ||||
2400 | if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 || | |||
2401 | SrcVT.getVectorElementType() != MVT::f16) { | |||
2402 | // For scalable vectors, we only need to close the gap between | |||
2403 | // vXf16->vXf64. | |||
2404 | if (!VT.isFixedLengthVector()) | |||
2405 | return Op; | |||
2406 | // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version. | |||
2407 | Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget); | |||
2408 | return convertFromScalableVector(VT, Src, DAG, Subtarget); | |||
2409 | } | |||
2410 | ||||
2411 | MVT InterVT = VT.changeVectorElementType(MVT::f32); | |||
2412 | MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32); | |||
2413 | SDValue IntermediateExtend = getRVVFPExtendOrRound( | |||
2414 | Src, InterVT, InterContainerVT, DL, DAG, Subtarget); | |||
2415 | ||||
2416 | SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT, | |||
2417 | DL, DAG, Subtarget); | |||
2418 | if (VT.isFixedLengthVector()) | |||
2419 | return convertFromScalableVector(VT, Extend, DAG, Subtarget); | |||
2420 | return Extend; | |||
2421 | } | |||
2422 | case ISD::FP_ROUND: { | |||
2423 | // RVV can only do fp_round to types half the size as the source. We | |||
2424 | // custom-lower f64->f16 rounds via RVV's round-to-odd float | |||
2425 | // conversion instruction. | |||
2426 | SDLoc DL(Op); | |||
2427 | MVT VT = Op.getSimpleValueType(); | |||
2428 | SDValue Src = Op.getOperand(0); | |||
2429 | MVT SrcVT = Src.getSimpleValueType(); | |||
2430 | ||||
2431 | // Prepare any fixed-length vector operands. | |||
2432 | MVT ContainerVT = VT; | |||
2433 | if (VT.isFixedLengthVector()) { | |||
2434 | MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT); | |||
2435 | ContainerVT = | |||
2436 | SrcContainerVT.changeVectorElementType(VT.getVectorElementType()); | |||
2437 | Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); | |||
2438 | } | |||
2439 | ||||
2440 | if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 || | |||
2441 | SrcVT.getVectorElementType() != MVT::f64) { | |||
2442 | // For scalable vectors, we only need to close the gap between | |||
2443 | // vXf64<->vXf16. | |||
2444 | if (!VT.isFixedLengthVector()) | |||
2445 | return Op; | |||
2446 | // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version. | |||
2447 | Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget); | |||
2448 | return convertFromScalableVector(VT, Src, DAG, Subtarget); | |||
2449 | } | |||
2450 | ||||
2451 | SDValue Mask, VL; | |||
2452 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
2453 | ||||
2454 | MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32); | |||
2455 | SDValue IntermediateRound = | |||
2456 | DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL); | |||
2457 | SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT, | |||
2458 | DL, DAG, Subtarget); | |||
2459 | ||||
2460 | if (VT.isFixedLengthVector()) | |||
2461 | return convertFromScalableVector(VT, Round, DAG, Subtarget); | |||
2462 | return Round; | |||
2463 | } | |||
2464 | case ISD::FP_TO_SINT: | |||
2465 | case ISD::FP_TO_UINT: | |||
2466 | case ISD::SINT_TO_FP: | |||
2467 | case ISD::UINT_TO_FP: { | |||
2468 | // RVV can only do fp<->int conversions to types half/double the size as | |||
2469 | // the source. We custom-lower any conversions that do two hops into | |||
2470 | // sequences. | |||
2471 | MVT VT = Op.getSimpleValueType(); | |||
2472 | if (!VT.isVector()) | |||
2473 | return Op; | |||
2474 | SDLoc DL(Op); | |||
2475 | SDValue Src = Op.getOperand(0); | |||
2476 | MVT EltVT = VT.getVectorElementType(); | |||
2477 | MVT SrcVT = Src.getSimpleValueType(); | |||
2478 | MVT SrcEltVT = SrcVT.getVectorElementType(); | |||
2479 | unsigned EltSize = EltVT.getSizeInBits(); | |||
2480 | unsigned SrcEltSize = SrcEltVT.getSizeInBits(); | |||
2481 | assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&(static_cast <bool> (isPowerOf2_32(EltSize) && isPowerOf2_32 (SrcEltSize) && "Unexpected vector element types") ? void (0) : __assert_fail ("isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) && \"Unexpected vector element types\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2482, __extension__ __PRETTY_FUNCTION__)) | |||
2482 | "Unexpected vector element types")(static_cast <bool> (isPowerOf2_32(EltSize) && isPowerOf2_32 (SrcEltSize) && "Unexpected vector element types") ? void (0) : __assert_fail ("isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) && \"Unexpected vector element types\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2482, __extension__ __PRETTY_FUNCTION__)); | |||
2483 | ||||
2484 | bool IsInt2FP = SrcEltVT.isInteger(); | |||
2485 | // Widening conversions | |||
2486 | if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) { | |||
2487 | if (IsInt2FP) { | |||
2488 | // Do a regular integer sign/zero extension then convert to float. | |||
2489 | MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()), | |||
2490 | VT.getVectorElementCount()); | |||
2491 | unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP | |||
2492 | ? ISD::ZERO_EXTEND | |||
2493 | : ISD::SIGN_EXTEND; | |||
2494 | SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src); | |||
2495 | return DAG.getNode(Op.getOpcode(), DL, VT, Ext); | |||
2496 | } | |||
2497 | // FP2Int | |||
2498 | assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering")(static_cast <bool> (SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering" ) ? void (0) : __assert_fail ("SrcEltVT == MVT::f16 && \"Unexpected FP_TO_[US]INT lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2498, __extension__ __PRETTY_FUNCTION__)); | |||
2499 | // Do one doubling fp_extend then complete the operation by converting | |||
2500 | // to int. | |||
2501 | MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount()); | |||
2502 | SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT); | |||
2503 | return DAG.getNode(Op.getOpcode(), DL, VT, FExt); | |||
2504 | } | |||
2505 | ||||
2506 | // Narrowing conversions | |||
2507 | if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) { | |||
2508 | if (IsInt2FP) { | |||
2509 | // One narrowing int_to_fp, then an fp_round. | |||
2510 | assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering")(static_cast <bool> (EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering" ) ? void (0) : __assert_fail ("EltVT == MVT::f16 && \"Unexpected [US]_TO_FP lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2510, __extension__ __PRETTY_FUNCTION__)); | |||
2511 | MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount()); | |||
2512 | SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src); | |||
2513 | return DAG.getFPExtendOrRound(Int2FP, DL, VT); | |||
2514 | } | |||
2515 | // FP2Int | |||
2516 | // One narrowing fp_to_int, then truncate the integer. If the float isn't | |||
2517 | // representable by the integer, the result is poison. | |||
2518 | MVT IVecVT = | |||
2519 | MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2), | |||
2520 | VT.getVectorElementCount()); | |||
2521 | SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src); | |||
2522 | return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int); | |||
2523 | } | |||
2524 | ||||
2525 | // Scalable vectors can exit here. Patterns will handle equally-sized | |||
2526 | // conversions halving/doubling ones. | |||
2527 | if (!VT.isFixedLengthVector()) | |||
2528 | return Op; | |||
2529 | ||||
2530 | // For fixed-length vectors we lower to a custom "VL" node. | |||
2531 | unsigned RVVOpc = 0; | |||
2532 | switch (Op.getOpcode()) { | |||
2533 | default: | |||
2534 | llvm_unreachable("Impossible opcode")::llvm::llvm_unreachable_internal("Impossible opcode", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2534); | |||
2535 | case ISD::FP_TO_SINT: | |||
2536 | RVVOpc = RISCVISD::FP_TO_SINT_VL; | |||
2537 | break; | |||
2538 | case ISD::FP_TO_UINT: | |||
2539 | RVVOpc = RISCVISD::FP_TO_UINT_VL; | |||
2540 | break; | |||
2541 | case ISD::SINT_TO_FP: | |||
2542 | RVVOpc = RISCVISD::SINT_TO_FP_VL; | |||
2543 | break; | |||
2544 | case ISD::UINT_TO_FP: | |||
2545 | RVVOpc = RISCVISD::UINT_TO_FP_VL; | |||
2546 | break; | |||
2547 | } | |||
2548 | ||||
2549 | MVT ContainerVT, SrcContainerVT; | |||
2550 | // Derive the reference container type from the larger vector type. | |||
2551 | if (SrcEltSize > EltSize) { | |||
2552 | SrcContainerVT = getContainerForFixedLengthVector(SrcVT); | |||
2553 | ContainerVT = | |||
2554 | SrcContainerVT.changeVectorElementType(VT.getVectorElementType()); | |||
2555 | } else { | |||
2556 | ContainerVT = getContainerForFixedLengthVector(VT); | |||
2557 | SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT); | |||
2558 | } | |||
2559 | ||||
2560 | SDValue Mask, VL; | |||
2561 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
2562 | ||||
2563 | Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); | |||
2564 | Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL); | |||
2565 | return convertFromScalableVector(VT, Src, DAG, Subtarget); | |||
2566 | } | |||
2567 | case ISD::FP_TO_SINT_SAT: | |||
2568 | case ISD::FP_TO_UINT_SAT: | |||
2569 | return lowerFP_TO_INT_SAT(Op, DAG); | |||
2570 | case ISD::VECREDUCE_ADD: | |||
2571 | case ISD::VECREDUCE_UMAX: | |||
2572 | case ISD::VECREDUCE_SMAX: | |||
2573 | case ISD::VECREDUCE_UMIN: | |||
2574 | case ISD::VECREDUCE_SMIN: | |||
2575 | return lowerVECREDUCE(Op, DAG); | |||
2576 | case ISD::VECREDUCE_AND: | |||
2577 | case ISD::VECREDUCE_OR: | |||
2578 | case ISD::VECREDUCE_XOR: | |||
2579 | if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) | |||
2580 | return lowerVectorMaskVECREDUCE(Op, DAG); | |||
2581 | return lowerVECREDUCE(Op, DAG); | |||
2582 | case ISD::VECREDUCE_FADD: | |||
2583 | case ISD::VECREDUCE_SEQ_FADD: | |||
2584 | case ISD::VECREDUCE_FMIN: | |||
2585 | case ISD::VECREDUCE_FMAX: | |||
2586 | return lowerFPVECREDUCE(Op, DAG); | |||
2587 | case ISD::INSERT_SUBVECTOR: | |||
2588 | return lowerINSERT_SUBVECTOR(Op, DAG); | |||
2589 | case ISD::EXTRACT_SUBVECTOR: | |||
2590 | return lowerEXTRACT_SUBVECTOR(Op, DAG); | |||
2591 | case ISD::STEP_VECTOR: | |||
2592 | return lowerSTEP_VECTOR(Op, DAG); | |||
2593 | case ISD::VECTOR_REVERSE: | |||
2594 | return lowerVECTOR_REVERSE(Op, DAG); | |||
2595 | case ISD::BUILD_VECTOR: | |||
2596 | return lowerBUILD_VECTOR(Op, DAG, Subtarget); | |||
2597 | case ISD::SPLAT_VECTOR: | |||
2598 | if (Op.getValueType().getVectorElementType() == MVT::i1) | |||
2599 | return lowerVectorMaskSplat(Op, DAG); | |||
2600 | return lowerSPLAT_VECTOR(Op, DAG, Subtarget); | |||
2601 | case ISD::VECTOR_SHUFFLE: | |||
2602 | return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget); | |||
2603 | case ISD::CONCAT_VECTORS: { | |||
2604 | // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is | |||
2605 | // better than going through the stack, as the default expansion does. | |||
2606 | SDLoc DL(Op); | |||
2607 | MVT VT = Op.getSimpleValueType(); | |||
2608 | unsigned NumOpElts = | |||
2609 | Op.getOperand(0).getSimpleValueType().getVectorMinNumElements(); | |||
2610 | SDValue Vec = DAG.getUNDEF(VT); | |||
2611 | for (const auto &OpIdx : enumerate(Op->ops())) | |||
2612 | Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(), | |||
2613 | DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL)); | |||
2614 | return Vec; | |||
2615 | } | |||
2616 | case ISD::LOAD: | |||
2617 | if (auto V = expandUnalignedRVVLoad(Op, DAG)) | |||
2618 | return V; | |||
2619 | if (Op.getValueType().isFixedLengthVector()) | |||
2620 | return lowerFixedLengthVectorLoadToRVV(Op, DAG); | |||
2621 | return Op; | |||
2622 | case ISD::STORE: | |||
2623 | if (auto V = expandUnalignedRVVStore(Op, DAG)) | |||
2624 | return V; | |||
2625 | if (Op.getOperand(1).getValueType().isFixedLengthVector()) | |||
2626 | return lowerFixedLengthVectorStoreToRVV(Op, DAG); | |||
2627 | return Op; | |||
2628 | case ISD::MLOAD: | |||
2629 | return lowerMLOAD(Op, DAG); | |||
2630 | case ISD::MSTORE: | |||
2631 | return lowerMSTORE(Op, DAG); | |||
2632 | case ISD::SETCC: | |||
2633 | return lowerFixedLengthVectorSetccToRVV(Op, DAG); | |||
2634 | case ISD::ADD: | |||
2635 | return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL); | |||
2636 | case ISD::SUB: | |||
2637 | return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL); | |||
2638 | case ISD::MUL: | |||
2639 | return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL); | |||
2640 | case ISD::MULHS: | |||
2641 | return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL); | |||
2642 | case ISD::MULHU: | |||
2643 | return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL); | |||
2644 | case ISD::AND: | |||
2645 | return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL, | |||
2646 | RISCVISD::AND_VL); | |||
2647 | case ISD::OR: | |||
2648 | return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL, | |||
2649 | RISCVISD::OR_VL); | |||
2650 | case ISD::XOR: | |||
2651 | return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL, | |||
2652 | RISCVISD::XOR_VL); | |||
2653 | case ISD::SDIV: | |||
2654 | return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL); | |||
2655 | case ISD::SREM: | |||
2656 | return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL); | |||
2657 | case ISD::UDIV: | |||
2658 | return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL); | |||
2659 | case ISD::UREM: | |||
2660 | return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL); | |||
2661 | case ISD::SHL: | |||
2662 | case ISD::SRA: | |||
2663 | case ISD::SRL: | |||
2664 | if (Op.getSimpleValueType().isFixedLengthVector()) | |||
2665 | return lowerFixedLengthVectorShiftToRVV(Op, DAG); | |||
2666 | // This can be called for an i32 shift amount that needs to be promoted. | |||
2667 | assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&(static_cast <bool> (Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2668, __extension__ __PRETTY_FUNCTION__)) | |||
2668 | "Unexpected custom legalisation")(static_cast <bool> (Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 2668, __extension__ __PRETTY_FUNCTION__)); | |||
2669 | return SDValue(); | |||
2670 | case ISD::SADDSAT: | |||
2671 | return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL); | |||
2672 | case ISD::UADDSAT: | |||
2673 | return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL); | |||
2674 | case ISD::SSUBSAT: | |||
2675 | return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL); | |||
2676 | case ISD::USUBSAT: | |||
2677 | return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL); | |||
2678 | case ISD::FADD: | |||
2679 | return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL); | |||
2680 | case ISD::FSUB: | |||
2681 | return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL); | |||
2682 | case ISD::FMUL: | |||
2683 | return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL); | |||
2684 | case ISD::FDIV: | |||
2685 | return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL); | |||
2686 | case ISD::FNEG: | |||
2687 | return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL); | |||
2688 | case ISD::FABS: | |||
2689 | return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL); | |||
2690 | case ISD::FSQRT: | |||
2691 | return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL); | |||
2692 | case ISD::FMA: | |||
2693 | return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL); | |||
2694 | case ISD::SMIN: | |||
2695 | return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL); | |||
2696 | case ISD::SMAX: | |||
2697 | return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL); | |||
2698 | case ISD::UMIN: | |||
2699 | return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL); | |||
2700 | case ISD::UMAX: | |||
2701 | return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL); | |||
2702 | case ISD::FMINNUM: | |||
2703 | return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL); | |||
2704 | case ISD::FMAXNUM: | |||
2705 | return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL); | |||
2706 | case ISD::ABS: | |||
2707 | return lowerABS(Op, DAG); | |||
2708 | case ISD::VSELECT: | |||
2709 | return lowerFixedLengthVectorSelectToRVV(Op, DAG); | |||
2710 | case ISD::FCOPYSIGN: | |||
2711 | return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG); | |||
2712 | case ISD::MGATHER: | |||
2713 | return lowerMGATHER(Op, DAG); | |||
2714 | case ISD::MSCATTER: | |||
2715 | return lowerMSCATTER(Op, DAG); | |||
2716 | case ISD::FLT_ROUNDS_: | |||
2717 | return lowerGET_ROUNDING(Op, DAG); | |||
2718 | case ISD::SET_ROUNDING: | |||
2719 | return lowerSET_ROUNDING(Op, DAG); | |||
2720 | case ISD::VP_ADD: | |||
2721 | return lowerVPOp(Op, DAG, RISCVISD::ADD_VL); | |||
2722 | case ISD::VP_SUB: | |||
2723 | return lowerVPOp(Op, DAG, RISCVISD::SUB_VL); | |||
2724 | case ISD::VP_MUL: | |||
2725 | return lowerVPOp(Op, DAG, RISCVISD::MUL_VL); | |||
2726 | case ISD::VP_SDIV: | |||
2727 | return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL); | |||
2728 | case ISD::VP_UDIV: | |||
2729 | return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL); | |||
2730 | case ISD::VP_SREM: | |||
2731 | return lowerVPOp(Op, DAG, RISCVISD::SREM_VL); | |||
2732 | case ISD::VP_UREM: | |||
2733 | return lowerVPOp(Op, DAG, RISCVISD::UREM_VL); | |||
2734 | case ISD::VP_AND: | |||
2735 | return lowerVPOp(Op, DAG, RISCVISD::AND_VL); | |||
2736 | case ISD::VP_OR: | |||
2737 | return lowerVPOp(Op, DAG, RISCVISD::OR_VL); | |||
2738 | case ISD::VP_XOR: | |||
2739 | return lowerVPOp(Op, DAG, RISCVISD::XOR_VL); | |||
2740 | case ISD::VP_ASHR: | |||
2741 | return lowerVPOp(Op, DAG, RISCVISD::SRA_VL); | |||
2742 | case ISD::VP_LSHR: | |||
2743 | return lowerVPOp(Op, DAG, RISCVISD::SRL_VL); | |||
2744 | case ISD::VP_SHL: | |||
2745 | return lowerVPOp(Op, DAG, RISCVISD::SHL_VL); | |||
2746 | case ISD::VP_FADD: | |||
2747 | return lowerVPOp(Op, DAG, RISCVISD::FADD_VL); | |||
2748 | case ISD::VP_FSUB: | |||
2749 | return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL); | |||
2750 | case ISD::VP_FMUL: | |||
2751 | return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL); | |||
2752 | case ISD::VP_FDIV: | |||
2753 | return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL); | |||
2754 | } | |||
2755 | } | |||
2756 | ||||
2757 | static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, | |||
2758 | SelectionDAG &DAG, unsigned Flags) { | |||
2759 | return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags); | |||
2760 | } | |||
2761 | ||||
2762 | static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty, | |||
2763 | SelectionDAG &DAG, unsigned Flags) { | |||
2764 | return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(), | |||
2765 | Flags); | |||
2766 | } | |||
2767 | ||||
2768 | static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty, | |||
2769 | SelectionDAG &DAG, unsigned Flags) { | |||
2770 | return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(), | |||
2771 | N->getOffset(), Flags); | |||
2772 | } | |||
2773 | ||||
2774 | static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty, | |||
2775 | SelectionDAG &DAG, unsigned Flags) { | |||
2776 | return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags); | |||
2777 | } | |||
2778 | ||||
2779 | template <class NodeTy> | |||
2780 | SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, | |||
2781 | bool IsLocal) const { | |||
2782 | SDLoc DL(N); | |||
2783 | EVT Ty = getPointerTy(DAG.getDataLayout()); | |||
2784 | ||||
2785 | if (isPositionIndependent()) { | |||
2786 | SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); | |||
2787 | if (IsLocal) | |||
2788 | // Use PC-relative addressing to access the symbol. This generates the | |||
2789 | // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym)) | |||
2790 | // %pcrel_lo(auipc)). | |||
2791 | return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); | |||
2792 | ||||
2793 | // Use PC-relative addressing to access the GOT for this symbol, then load | |||
2794 | // the address from the GOT. This generates the pattern (PseudoLA sym), | |||
2795 | // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))). | |||
2796 | return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0); | |||
2797 | } | |||
2798 | ||||
2799 | switch (getTargetMachine().getCodeModel()) { | |||
2800 | default: | |||
2801 | report_fatal_error("Unsupported code model for lowering"); | |||
2802 | case CodeModel::Small: { | |||
2803 | // Generate a sequence for accessing addresses within the first 2 GiB of | |||
2804 | // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)). | |||
2805 | SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI); | |||
2806 | SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO); | |||
2807 | SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); | |||
2808 | return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0); | |||
2809 | } | |||
2810 | case CodeModel::Medium: { | |||
2811 | // Generate a sequence for accessing addresses within any 2GiB range within | |||
2812 | // the address space. This generates the pattern (PseudoLLA sym), which | |||
2813 | // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)). | |||
2814 | SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); | |||
2815 | return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); | |||
2816 | } | |||
2817 | } | |||
2818 | } | |||
2819 | ||||
2820 | SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, | |||
2821 | SelectionDAG &DAG) const { | |||
2822 | SDLoc DL(Op); | |||
2823 | EVT Ty = Op.getValueType(); | |||
2824 | GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); | |||
2825 | int64_t Offset = N->getOffset(); | |||
2826 | MVT XLenVT = Subtarget.getXLenVT(); | |||
2827 | ||||
2828 | const GlobalValue *GV = N->getGlobal(); | |||
2829 | bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); | |||
2830 | SDValue Addr = getAddr(N, DAG, IsLocal); | |||
2831 | ||||
2832 | // In order to maximise the opportunity for common subexpression elimination, | |||
2833 | // emit a separate ADD node for the global address offset instead of folding | |||
2834 | // it in the global address node. Later peephole optimisations may choose to | |||
2835 | // fold it back in when profitable. | |||
2836 | if (Offset != 0) | |||
2837 | return DAG.getNode(ISD::ADD, DL, Ty, Addr, | |||
2838 | DAG.getConstant(Offset, DL, XLenVT)); | |||
2839 | return Addr; | |||
2840 | } | |||
2841 | ||||
2842 | SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, | |||
2843 | SelectionDAG &DAG) const { | |||
2844 | BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op); | |||
2845 | ||||
2846 | return getAddr(N, DAG); | |||
2847 | } | |||
2848 | ||||
2849 | SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op, | |||
2850 | SelectionDAG &DAG) const { | |||
2851 | ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); | |||
2852 | ||||
2853 | return getAddr(N, DAG); | |||
2854 | } | |||
2855 | ||||
2856 | SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op, | |||
2857 | SelectionDAG &DAG) const { | |||
2858 | JumpTableSDNode *N = cast<JumpTableSDNode>(Op); | |||
2859 | ||||
2860 | return getAddr(N, DAG); | |||
2861 | } | |||
2862 | ||||
2863 | SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N, | |||
2864 | SelectionDAG &DAG, | |||
2865 | bool UseGOT) const { | |||
2866 | SDLoc DL(N); | |||
2867 | EVT Ty = getPointerTy(DAG.getDataLayout()); | |||
2868 | const GlobalValue *GV = N->getGlobal(); | |||
2869 | MVT XLenVT = Subtarget.getXLenVT(); | |||
2870 | ||||
2871 | if (UseGOT) { | |||
2872 | // Use PC-relative addressing to access the GOT for this TLS symbol, then | |||
2873 | // load the address from the GOT and add the thread pointer. This generates | |||
2874 | // the pattern (PseudoLA_TLS_IE sym), which expands to | |||
2875 | // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)). | |||
2876 | SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); | |||
2877 | SDValue Load = | |||
2878 | SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0); | |||
2879 | ||||
2880 | // Add the thread pointer. | |||
2881 | SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); | |||
2882 | return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg); | |||
2883 | } | |||
2884 | ||||
2885 | // Generate a sequence for accessing the address relative to the thread | |||
2886 | // pointer, with the appropriate adjustment for the thread pointer offset. | |||
2887 | // This generates the pattern | |||
2888 | // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym)) | |||
2889 | SDValue AddrHi = | |||
2890 | DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI); | |||
2891 | SDValue AddrAdd = | |||
2892 | DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD); | |||
2893 | SDValue AddrLo = | |||
2894 | DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO); | |||
2895 | ||||
2896 | SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); | |||
2897 | SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); | |||
2898 | SDValue MNAdd = SDValue( | |||
2899 | DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd), | |||
2900 | 0); | |||
2901 | return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0); | |||
2902 | } | |||
2903 | ||||
2904 | SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N, | |||
2905 | SelectionDAG &DAG) const { | |||
2906 | SDLoc DL(N); | |||
2907 | EVT Ty = getPointerTy(DAG.getDataLayout()); | |||
2908 | IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits()); | |||
2909 | const GlobalValue *GV = N->getGlobal(); | |||
2910 | ||||
2911 | // Use a PC-relative addressing mode to access the global dynamic GOT address. | |||
2912 | // This generates the pattern (PseudoLA_TLS_GD sym), which expands to | |||
2913 | // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)). | |||
2914 | SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); | |||
2915 | SDValue Load = | |||
2916 | SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0); | |||
2917 | ||||
2918 | // Prepare argument list to generate call. | |||
2919 | ArgListTy Args; | |||
2920 | ArgListEntry Entry; | |||
2921 | Entry.Node = Load; | |||
2922 | Entry.Ty = CallTy; | |||
2923 | Args.push_back(Entry); | |||
2924 | ||||
2925 | // Setup call to __tls_get_addr. | |||
2926 | TargetLowering::CallLoweringInfo CLI(DAG); | |||
2927 | CLI.setDebugLoc(DL) | |||
2928 | .setChain(DAG.getEntryNode()) | |||
2929 | .setLibCallee(CallingConv::C, CallTy, | |||
2930 | DAG.getExternalSymbol("__tls_get_addr", Ty), | |||
2931 | std::move(Args)); | |||
2932 | ||||
2933 | return LowerCallTo(CLI).first; | |||
2934 | } | |||
2935 | ||||
2936 | SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op, | |||
2937 | SelectionDAG &DAG) const { | |||
2938 | SDLoc DL(Op); | |||
2939 | EVT Ty = Op.getValueType(); | |||
2940 | GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); | |||
2941 | int64_t Offset = N->getOffset(); | |||
2942 | MVT XLenVT = Subtarget.getXLenVT(); | |||
2943 | ||||
2944 | TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal()); | |||
2945 | ||||
2946 | if (DAG.getMachineFunction().getFunction().getCallingConv() == | |||
2947 | CallingConv::GHC) | |||
2948 | report_fatal_error("In GHC calling convention TLS is not supported"); | |||
2949 | ||||
2950 | SDValue Addr; | |||
2951 | switch (Model) { | |||
2952 | case TLSModel::LocalExec: | |||
2953 | Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false); | |||
2954 | break; | |||
2955 | case TLSModel::InitialExec: | |||
2956 | Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true); | |||
2957 | break; | |||
2958 | case TLSModel::LocalDynamic: | |||
2959 | case TLSModel::GeneralDynamic: | |||
2960 | Addr = getDynamicTLSAddr(N, DAG); | |||
2961 | break; | |||
2962 | } | |||
2963 | ||||
2964 | // In order to maximise the opportunity for common subexpression elimination, | |||
2965 | // emit a separate ADD node for the global address offset instead of folding | |||
2966 | // it in the global address node. Later peephole optimisations may choose to | |||
2967 | // fold it back in when profitable. | |||
2968 | if (Offset != 0) | |||
2969 | return DAG.getNode(ISD::ADD, DL, Ty, Addr, | |||
2970 | DAG.getConstant(Offset, DL, XLenVT)); | |||
2971 | return Addr; | |||
2972 | } | |||
2973 | ||||
2974 | SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { | |||
2975 | SDValue CondV = Op.getOperand(0); | |||
2976 | SDValue TrueV = Op.getOperand(1); | |||
2977 | SDValue FalseV = Op.getOperand(2); | |||
2978 | SDLoc DL(Op); | |||
2979 | MVT VT = Op.getSimpleValueType(); | |||
2980 | MVT XLenVT = Subtarget.getXLenVT(); | |||
2981 | ||||
2982 | // Lower vector SELECTs to VSELECTs by splatting the condition. | |||
2983 | if (VT.isVector()) { | |||
2984 | MVT SplatCondVT = VT.changeVectorElementType(MVT::i1); | |||
2985 | SDValue CondSplat = VT.isScalableVector() | |||
2986 | ? DAG.getSplatVector(SplatCondVT, DL, CondV) | |||
2987 | : DAG.getSplatBuildVector(SplatCondVT, DL, CondV); | |||
2988 | return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV); | |||
2989 | } | |||
2990 | ||||
2991 | // If the result type is XLenVT and CondV is the output of a SETCC node | |||
2992 | // which also operated on XLenVT inputs, then merge the SETCC node into the | |||
2993 | // lowered RISCVISD::SELECT_CC to take advantage of the integer | |||
2994 | // compare+branch instructions. i.e.: | |||
2995 | // (select (setcc lhs, rhs, cc), truev, falsev) | |||
2996 | // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev) | |||
2997 | if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC && | |||
2998 | CondV.getOperand(0).getSimpleValueType() == XLenVT) { | |||
2999 | SDValue LHS = CondV.getOperand(0); | |||
3000 | SDValue RHS = CondV.getOperand(1); | |||
3001 | const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2)); | |||
3002 | ISD::CondCode CCVal = CC->get(); | |||
3003 | ||||
3004 | // Special case for a select of 2 constants that have a diffence of 1. | |||
3005 | // Normally this is done by DAGCombine, but if the select is introduced by | |||
3006 | // type legalization or op legalization, we miss it. Restricting to SETLT | |||
3007 | // case for now because that is what signed saturating add/sub need. | |||
3008 | // FIXME: We don't need the condition to be SETLT or even a SETCC, | |||
3009 | // but we would probably want to swap the true/false values if the condition | |||
3010 | // is SETGE/SETLE to avoid an XORI. | |||
3011 | if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) && | |||
3012 | CCVal == ISD::SETLT) { | |||
3013 | const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue(); | |||
3014 | const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue(); | |||
3015 | if (TrueVal - 1 == FalseVal) | |||
3016 | return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV); | |||
3017 | if (TrueVal + 1 == FalseVal) | |||
3018 | return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV); | |||
3019 | } | |||
3020 | ||||
3021 | translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); | |||
3022 | ||||
3023 | SDValue TargetCC = DAG.getCondCode(CCVal); | |||
3024 | SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; | |||
3025 | return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); | |||
3026 | } | |||
3027 | ||||
3028 | // Otherwise: | |||
3029 | // (select condv, truev, falsev) | |||
3030 | // -> (riscvisd::select_cc condv, zero, setne, truev, falsev) | |||
3031 | SDValue Zero = DAG.getConstant(0, DL, XLenVT); | |||
3032 | SDValue SetNE = DAG.getCondCode(ISD::SETNE); | |||
3033 | ||||
3034 | SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV}; | |||
3035 | ||||
3036 | return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); | |||
3037 | } | |||
3038 | ||||
3039 | SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const { | |||
3040 | SDValue CondV = Op.getOperand(1); | |||
3041 | SDLoc DL(Op); | |||
3042 | MVT XLenVT = Subtarget.getXLenVT(); | |||
3043 | ||||
3044 | if (CondV.getOpcode() == ISD::SETCC && | |||
3045 | CondV.getOperand(0).getValueType() == XLenVT) { | |||
3046 | SDValue LHS = CondV.getOperand(0); | |||
3047 | SDValue RHS = CondV.getOperand(1); | |||
3048 | ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get(); | |||
3049 | ||||
3050 | translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); | |||
3051 | ||||
3052 | SDValue TargetCC = DAG.getCondCode(CCVal); | |||
3053 | return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0), | |||
3054 | LHS, RHS, TargetCC, Op.getOperand(2)); | |||
3055 | } | |||
3056 | ||||
3057 | return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0), | |||
3058 | CondV, DAG.getConstant(0, DL, XLenVT), | |||
3059 | DAG.getCondCode(ISD::SETNE), Op.getOperand(2)); | |||
3060 | } | |||
3061 | ||||
3062 | SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const { | |||
3063 | MachineFunction &MF = DAG.getMachineFunction(); | |||
3064 | RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>(); | |||
3065 | ||||
3066 | SDLoc DL(Op); | |||
3067 | SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), | |||
3068 | getPointerTy(MF.getDataLayout())); | |||
3069 | ||||
3070 | // vastart just stores the address of the VarArgsFrameIndex slot into the | |||
3071 | // memory location argument. | |||
3072 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); | |||
3073 | return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1), | |||
3074 | MachinePointerInfo(SV)); | |||
3075 | } | |||
3076 | ||||
3077 | SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op, | |||
3078 | SelectionDAG &DAG) const { | |||
3079 | const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); | |||
3080 | MachineFunction &MF = DAG.getMachineFunction(); | |||
3081 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
3082 | MFI.setFrameAddressIsTaken(true); | |||
3083 | Register FrameReg = RI.getFrameRegister(MF); | |||
3084 | int XLenInBytes = Subtarget.getXLen() / 8; | |||
3085 | ||||
3086 | EVT VT = Op.getValueType(); | |||
3087 | SDLoc DL(Op); | |||
3088 | SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); | |||
3089 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
3090 | while (Depth--) { | |||
3091 | int Offset = -(XLenInBytes * 2); | |||
3092 | SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr, | |||
3093 | DAG.getIntPtrConstant(Offset, DL)); | |||
3094 | FrameAddr = | |||
3095 | DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); | |||
3096 | } | |||
3097 | return FrameAddr; | |||
3098 | } | |||
3099 | ||||
3100 | SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op, | |||
3101 | SelectionDAG &DAG) const { | |||
3102 | const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); | |||
3103 | MachineFunction &MF = DAG.getMachineFunction(); | |||
3104 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
3105 | MFI.setReturnAddressIsTaken(true); | |||
3106 | MVT XLenVT = Subtarget.getXLenVT(); | |||
3107 | int XLenInBytes = Subtarget.getXLen() / 8; | |||
3108 | ||||
3109 | if (verifyReturnAddressArgumentIsConstant(Op, DAG)) | |||
3110 | return SDValue(); | |||
3111 | ||||
3112 | EVT VT = Op.getValueType(); | |||
3113 | SDLoc DL(Op); | |||
3114 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
3115 | if (Depth) { | |||
3116 | int Off = -XLenInBytes; | |||
3117 | SDValue FrameAddr = lowerFRAMEADDR(Op, DAG); | |||
3118 | SDValue Offset = DAG.getConstant(Off, DL, VT); | |||
3119 | return DAG.getLoad(VT, DL, DAG.getEntryNode(), | |||
3120 | DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), | |||
3121 | MachinePointerInfo()); | |||
3122 | } | |||
3123 | ||||
3124 | // Return the value of the return address register, marking it an implicit | |||
3125 | // live-in. | |||
3126 | Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT)); | |||
3127 | return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT); | |||
3128 | } | |||
3129 | ||||
3130 | SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op, | |||
3131 | SelectionDAG &DAG) const { | |||
3132 | SDLoc DL(Op); | |||
3133 | SDValue Lo = Op.getOperand(0); | |||
3134 | SDValue Hi = Op.getOperand(1); | |||
3135 | SDValue Shamt = Op.getOperand(2); | |||
3136 | EVT VT = Lo.getValueType(); | |||
3137 | ||||
3138 | // if Shamt-XLEN < 0: // Shamt < XLEN | |||
3139 | // Lo = Lo << Shamt | |||
3140 | // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt)) | |||
3141 | // else: | |||
3142 | // Lo = 0 | |||
3143 | // Hi = Lo << (Shamt-XLEN) | |||
3144 | ||||
3145 | SDValue Zero = DAG.getConstant(0, DL, VT); | |||
3146 | SDValue One = DAG.getConstant(1, DL, VT); | |||
3147 | SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); | |||
3148 | SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); | |||
3149 | SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); | |||
3150 | SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); | |||
3151 | ||||
3152 | SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt); | |||
3153 | SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One); | |||
3154 | SDValue ShiftRightLo = | |||
3155 | DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt); | |||
3156 | SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt); | |||
3157 | SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo); | |||
3158 | SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen); | |||
3159 | ||||
3160 | SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); | |||
3161 | ||||
3162 | Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero); | |||
3163 | Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); | |||
3164 | ||||
3165 | SDValue Parts[2] = {Lo, Hi}; | |||
3166 | return DAG.getMergeValues(Parts, DL); | |||
3167 | } | |||
3168 | ||||
3169 | SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, | |||
3170 | bool IsSRA) const { | |||
3171 | SDLoc DL(Op); | |||
3172 | SDValue Lo = Op.getOperand(0); | |||
3173 | SDValue Hi = Op.getOperand(1); | |||
3174 | SDValue Shamt = Op.getOperand(2); | |||
3175 | EVT VT = Lo.getValueType(); | |||
3176 | ||||
3177 | // SRA expansion: | |||
3178 | // if Shamt-XLEN < 0: // Shamt < XLEN | |||
3179 | // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) | |||
3180 | // Hi = Hi >>s Shamt | |||
3181 | // else: | |||
3182 | // Lo = Hi >>s (Shamt-XLEN); | |||
3183 | // Hi = Hi >>s (XLEN-1) | |||
3184 | // | |||
3185 | // SRL expansion: | |||
3186 | // if Shamt-XLEN < 0: // Shamt < XLEN | |||
3187 | // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) | |||
3188 | // Hi = Hi >>u Shamt | |||
3189 | // else: | |||
3190 | // Lo = Hi >>u (Shamt-XLEN); | |||
3191 | // Hi = 0; | |||
3192 | ||||
3193 | unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL; | |||
3194 | ||||
3195 | SDValue Zero = DAG.getConstant(0, DL, VT); | |||
3196 | SDValue One = DAG.getConstant(1, DL, VT); | |||
3197 | SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); | |||
3198 | SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); | |||
3199 | SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); | |||
3200 | SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); | |||
3201 | ||||
3202 | SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt); | |||
3203 | SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One); | |||
3204 | SDValue ShiftLeftHi = | |||
3205 | DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt); | |||
3206 | SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi); | |||
3207 | SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt); | |||
3208 | SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen); | |||
3209 | SDValue HiFalse = | |||
3210 | IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero; | |||
3211 | ||||
3212 | SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); | |||
3213 | ||||
3214 | Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse); | |||
3215 | Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); | |||
3216 | ||||
3217 | SDValue Parts[2] = {Lo, Hi}; | |||
3218 | return DAG.getMergeValues(Parts, DL); | |||
3219 | } | |||
3220 | ||||
3221 | // Lower splats of i1 types to SETCC. For each mask vector type, we have a | |||
3222 | // legal equivalently-sized i8 type, so we can use that as a go-between. | |||
3223 | SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op, | |||
3224 | SelectionDAG &DAG) const { | |||
3225 | SDLoc DL(Op); | |||
3226 | MVT VT = Op.getSimpleValueType(); | |||
3227 | SDValue SplatVal = Op.getOperand(0); | |||
3228 | // All-zeros or all-ones splats are handled specially. | |||
3229 | if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) { | |||
3230 | SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second; | |||
3231 | return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL); | |||
3232 | } | |||
3233 | if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) { | |||
3234 | SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second; | |||
3235 | return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL); | |||
3236 | } | |||
3237 | MVT XLenVT = Subtarget.getXLenVT(); | |||
3238 | assert(SplatVal.getValueType() == XLenVT &&(static_cast <bool> (SplatVal.getValueType() == XLenVT && "Unexpected type for i1 splat value") ? void (0) : __assert_fail ("SplatVal.getValueType() == XLenVT && \"Unexpected type for i1 splat value\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3239, __extension__ __PRETTY_FUNCTION__)) | |||
3239 | "Unexpected type for i1 splat value")(static_cast <bool> (SplatVal.getValueType() == XLenVT && "Unexpected type for i1 splat value") ? void (0) : __assert_fail ("SplatVal.getValueType() == XLenVT && \"Unexpected type for i1 splat value\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3239, __extension__ __PRETTY_FUNCTION__)); | |||
3240 | MVT InterVT = VT.changeVectorElementType(MVT::i8); | |||
3241 | SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal, | |||
3242 | DAG.getConstant(1, DL, XLenVT)); | |||
3243 | SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal); | |||
3244 | SDValue Zero = DAG.getConstant(0, DL, InterVT); | |||
3245 | return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE); | |||
3246 | } | |||
3247 | ||||
3248 | // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is | |||
3249 | // illegal (currently only vXi64 RV32). | |||
3250 | // FIXME: We could also catch non-constant sign-extended i32 values and lower | |||
3251 | // them to SPLAT_VECTOR_I64 | |||
3252 | SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op, | |||
3253 | SelectionDAG &DAG) const { | |||
3254 | SDLoc DL(Op); | |||
3255 | MVT VecVT = Op.getSimpleValueType(); | |||
3256 | assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&(static_cast <bool> (!Subtarget.is64Bit() && VecVT .getVectorElementType() == MVT::i64 && "Unexpected SPLAT_VECTOR_PARTS lowering" ) ? void (0) : __assert_fail ("!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 && \"Unexpected SPLAT_VECTOR_PARTS lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3257, __extension__ __PRETTY_FUNCTION__)) | |||
3257 | "Unexpected SPLAT_VECTOR_PARTS lowering")(static_cast <bool> (!Subtarget.is64Bit() && VecVT .getVectorElementType() == MVT::i64 && "Unexpected SPLAT_VECTOR_PARTS lowering" ) ? void (0) : __assert_fail ("!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 && \"Unexpected SPLAT_VECTOR_PARTS lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3257, __extension__ __PRETTY_FUNCTION__)); | |||
3258 | ||||
3259 | assert(Op.getNumOperands() == 2 && "Unexpected number of operands!")(static_cast <bool> (Op.getNumOperands() == 2 && "Unexpected number of operands!") ? void (0) : __assert_fail ("Op.getNumOperands() == 2 && \"Unexpected number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3259, __extension__ __PRETTY_FUNCTION__)); | |||
3260 | SDValue Lo = Op.getOperand(0); | |||
3261 | SDValue Hi = Op.getOperand(1); | |||
3262 | ||||
3263 | if (VecVT.isFixedLengthVector()) { | |||
3264 | MVT ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
3265 | SDLoc DL(Op); | |||
3266 | SDValue Mask, VL; | |||
3267 | std::tie(Mask, VL) = | |||
3268 | getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); | |||
3269 | ||||
3270 | SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG); | |||
3271 | return convertFromScalableVector(VecVT, Res, DAG, Subtarget); | |||
3272 | } | |||
3273 | ||||
3274 | if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) { | |||
3275 | int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue(); | |||
3276 | int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue(); | |||
3277 | // If Hi constant is all the same sign bit as Lo, lower this as a custom | |||
3278 | // node in order to try and match RVV vector/scalar instructions. | |||
3279 | if ((LoC >> 31) == HiC) | |||
3280 | return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo); | |||
3281 | } | |||
3282 | ||||
3283 | // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended. | |||
3284 | if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo && | |||
3285 | isa<ConstantSDNode>(Hi.getOperand(1)) && | |||
3286 | Hi.getConstantOperandVal(1) == 31) | |||
3287 | return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo); | |||
3288 | ||||
3289 | // Fall back to use a stack store and stride x0 vector load. Use X0 as VL. | |||
3290 | return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi, | |||
3291 | DAG.getRegister(RISCV::X0, MVT::i64)); | |||
3292 | } | |||
3293 | ||||
3294 | // Custom-lower extensions from mask vectors by using a vselect either with 1 | |||
3295 | // for zero/any-extension or -1 for sign-extension: | |||
3296 | // (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0) | |||
3297 | // Note that any-extension is lowered identically to zero-extension. | |||
3298 | SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG, | |||
3299 | int64_t ExtTrueVal) const { | |||
3300 | SDLoc DL(Op); | |||
3301 | MVT VecVT = Op.getSimpleValueType(); | |||
3302 | SDValue Src = Op.getOperand(0); | |||
3303 | // Only custom-lower extensions from mask types | |||
3304 | assert(Src.getValueType().isVector() &&(static_cast <bool> (Src.getValueType().isVector() && Src.getValueType().getVectorElementType() == MVT::i1) ? void (0) : __assert_fail ("Src.getValueType().isVector() && Src.getValueType().getVectorElementType() == MVT::i1" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3305, __extension__ __PRETTY_FUNCTION__)) | |||
3305 | Src.getValueType().getVectorElementType() == MVT::i1)(static_cast <bool> (Src.getValueType().isVector() && Src.getValueType().getVectorElementType() == MVT::i1) ? void (0) : __assert_fail ("Src.getValueType().isVector() && Src.getValueType().getVectorElementType() == MVT::i1" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3305, __extension__ __PRETTY_FUNCTION__)); | |||
3306 | ||||
3307 | MVT XLenVT = Subtarget.getXLenVT(); | |||
3308 | SDValue SplatZero = DAG.getConstant(0, DL, XLenVT); | |||
3309 | SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT); | |||
3310 | ||||
3311 | if (VecVT.isScalableVector()) { | |||
3312 | // Be careful not to introduce illegal scalar types at this stage, and be | |||
3313 | // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is | |||
3314 | // illegal and must be expanded. Since we know that the constants are | |||
3315 | // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly. | |||
3316 | bool IsRV32E64 = | |||
3317 | !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64; | |||
3318 | ||||
3319 | if (!IsRV32E64) { | |||
3320 | SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero); | |||
3321 | SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal); | |||
3322 | } else { | |||
3323 | SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero); | |||
3324 | SplatTrueVal = | |||
3325 | DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal); | |||
3326 | } | |||
3327 | ||||
3328 | return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero); | |||
3329 | } | |||
3330 | ||||
3331 | MVT ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
3332 | MVT I1ContainerVT = | |||
3333 | MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); | |||
3334 | ||||
3335 | SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget); | |||
3336 | ||||
3337 | SDValue Mask, VL; | |||
3338 | std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); | |||
3339 | ||||
3340 | SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL); | |||
3341 | SplatTrueVal = | |||
3342 | DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL); | |||
3343 | SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, | |||
3344 | SplatTrueVal, SplatZero, VL); | |||
3345 | ||||
3346 | return convertFromScalableVector(VecVT, Select, DAG, Subtarget); | |||
3347 | } | |||
3348 | ||||
3349 | SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV( | |||
3350 | SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const { | |||
3351 | MVT ExtVT = Op.getSimpleValueType(); | |||
3352 | // Only custom-lower extensions from fixed-length vector types. | |||
3353 | if (!ExtVT.isFixedLengthVector()) | |||
3354 | return Op; | |||
3355 | MVT VT = Op.getOperand(0).getSimpleValueType(); | |||
3356 | // Grab the canonical container type for the extended type. Infer the smaller | |||
3357 | // type from that to ensure the same number of vector elements, as we know | |||
3358 | // the LMUL will be sufficient to hold the smaller type. | |||
3359 | MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT); | |||
3360 | // Get the extended container type manually to ensure the same number of | |||
3361 | // vector elements between source and dest. | |||
3362 | MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(), | |||
3363 | ContainerExtVT.getVectorElementCount()); | |||
3364 | ||||
3365 | SDValue Op1 = | |||
3366 | convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget); | |||
3367 | ||||
3368 | SDLoc DL(Op); | |||
3369 | SDValue Mask, VL; | |||
3370 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
3371 | ||||
3372 | SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL); | |||
3373 | ||||
3374 | return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget); | |||
3375 | } | |||
3376 | ||||
3377 | // Custom-lower truncations from vectors to mask vectors by using a mask and a | |||
3378 | // setcc operation: | |||
3379 | // (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne) | |||
3380 | SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op, | |||
3381 | SelectionDAG &DAG) const { | |||
3382 | SDLoc DL(Op); | |||
3383 | EVT MaskVT = Op.getValueType(); | |||
3384 | // Only expect to custom-lower truncations to mask types | |||
3385 | assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&(static_cast <bool> (MaskVT.isVector() && MaskVT .getVectorElementType() == MVT::i1 && "Unexpected type for vector mask lowering" ) ? void (0) : __assert_fail ("MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 && \"Unexpected type for vector mask lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3386, __extension__ __PRETTY_FUNCTION__)) | |||
3386 | "Unexpected type for vector mask lowering")(static_cast <bool> (MaskVT.isVector() && MaskVT .getVectorElementType() == MVT::i1 && "Unexpected type for vector mask lowering" ) ? void (0) : __assert_fail ("MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 && \"Unexpected type for vector mask lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3386, __extension__ __PRETTY_FUNCTION__)); | |||
3387 | SDValue Src = Op.getOperand(0); | |||
3388 | MVT VecVT = Src.getSimpleValueType(); | |||
3389 | ||||
3390 | // If this is a fixed vector, we need to convert it to a scalable vector. | |||
3391 | MVT ContainerVT = VecVT; | |||
3392 | if (VecVT.isFixedLengthVector()) { | |||
3393 | ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
3394 | Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget); | |||
3395 | } | |||
3396 | ||||
3397 | SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT()); | |||
3398 | SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); | |||
3399 | ||||
3400 | SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne); | |||
3401 | SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero); | |||
3402 | ||||
3403 | if (VecVT.isScalableVector()) { | |||
3404 | SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne); | |||
3405 | return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE); | |||
3406 | } | |||
3407 | ||||
3408 | SDValue Mask, VL; | |||
3409 | std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); | |||
3410 | ||||
3411 | MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1); | |||
3412 | SDValue Trunc = | |||
3413 | DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL); | |||
3414 | Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero, | |||
3415 | DAG.getCondCode(ISD::SETNE), Mask, VL); | |||
3416 | return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget); | |||
3417 | } | |||
3418 | ||||
3419 | // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the | |||
3420 | // first position of a vector, and that vector is slid up to the insert index. | |||
3421 | // By limiting the active vector length to index+1 and merging with the | |||
3422 | // original vector (with an undisturbed tail policy for elements >= VL), we | |||
3423 | // achieve the desired result of leaving all elements untouched except the one | |||
3424 | // at VL-1, which is replaced with the desired value. | |||
3425 | SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, | |||
3426 | SelectionDAG &DAG) const { | |||
3427 | SDLoc DL(Op); | |||
3428 | MVT VecVT = Op.getSimpleValueType(); | |||
3429 | SDValue Vec = Op.getOperand(0); | |||
3430 | SDValue Val = Op.getOperand(1); | |||
3431 | SDValue Idx = Op.getOperand(2); | |||
3432 | ||||
3433 | if (VecVT.getVectorElementType() == MVT::i1) { | |||
3434 | // FIXME: For now we just promote to an i8 vector and insert into that, | |||
3435 | // but this is probably not optimal. | |||
3436 | MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount()); | |||
3437 | Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec); | |||
3438 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx); | |||
3439 | return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec); | |||
3440 | } | |||
3441 | ||||
3442 | MVT ContainerVT = VecVT; | |||
3443 | // If the operand is a fixed-length vector, convert to a scalable one. | |||
3444 | if (VecVT.isFixedLengthVector()) { | |||
3445 | ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
3446 | Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); | |||
3447 | } | |||
3448 | ||||
3449 | MVT XLenVT = Subtarget.getXLenVT(); | |||
3450 | ||||
3451 | SDValue Zero = DAG.getConstant(0, DL, XLenVT); | |||
3452 | bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64; | |||
3453 | // Even i64-element vectors on RV32 can be lowered without scalar | |||
3454 | // legalization if the most-significant 32 bits of the value are not affected | |||
3455 | // by the sign-extension of the lower 32 bits. | |||
3456 | // TODO: We could also catch sign extensions of a 32-bit value. | |||
3457 | if (!IsLegalInsert && isa<ConstantSDNode>(Val)) { | |||
3458 | const auto *CVal = cast<ConstantSDNode>(Val); | |||
3459 | if (isInt<32>(CVal->getSExtValue())) { | |||
3460 | IsLegalInsert = true; | |||
3461 | Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32); | |||
3462 | } | |||
3463 | } | |||
3464 | ||||
3465 | SDValue Mask, VL; | |||
3466 | std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); | |||
3467 | ||||
3468 | SDValue ValInVec; | |||
3469 | ||||
3470 | if (IsLegalInsert) { | |||
3471 | unsigned Opc = | |||
3472 | VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL; | |||
3473 | if (isNullConstant(Idx)) { | |||
3474 | Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL); | |||
3475 | if (!VecVT.isFixedLengthVector()) | |||
3476 | return Vec; | |||
3477 | return convertFromScalableVector(VecVT, Vec, DAG, Subtarget); | |||
3478 | } | |||
3479 | ValInVec = | |||
3480 | DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL); | |||
3481 | } else { | |||
3482 | // On RV32, i64-element vectors must be specially handled to place the | |||
3483 | // value at element 0, by using two vslide1up instructions in sequence on | |||
3484 | // the i32 split lo/hi value. Use an equivalently-sized i32 vector for | |||
3485 | // this. | |||
3486 | SDValue One = DAG.getConstant(1, DL, XLenVT); | |||
3487 | SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero); | |||
3488 | SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One); | |||
3489 | MVT I32ContainerVT = | |||
3490 | MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2); | |||
3491 | SDValue I32Mask = | |||
3492 | getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first; | |||
3493 | // Limit the active VL to two. | |||
3494 | SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT); | |||
3495 | // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied | |||
3496 | // undef doesn't obey the earlyclobber constraint. Just splat a zero value. | |||
3497 | ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero, | |||
3498 | InsertI64VL); | |||
3499 | // First slide in the hi value, then the lo in underneath it. | |||
3500 | ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec, | |||
3501 | ValHi, I32Mask, InsertI64VL); | |||
3502 | ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec, | |||
3503 | ValLo, I32Mask, InsertI64VL); | |||
3504 | // Bitcast back to the right container type. | |||
3505 | ValInVec = DAG.getBitcast(ContainerVT, ValInVec); | |||
3506 | } | |||
3507 | ||||
3508 | // Now that the value is in a vector, slide it into position. | |||
3509 | SDValue InsertVL = | |||
3510 | DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT)); | |||
3511 | SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec, | |||
3512 | ValInVec, Idx, Mask, InsertVL); | |||
3513 | if (!VecVT.isFixedLengthVector()) | |||
3514 | return Slideup; | |||
3515 | return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget); | |||
3516 | } | |||
3517 | ||||
3518 | // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then | |||
3519 | // extract the first element: (extractelt (slidedown vec, idx), 0). For integer | |||
3520 | // types this is done using VMV_X_S to allow us to glean information about the | |||
3521 | // sign bits of the result. | |||
3522 | SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, | |||
3523 | SelectionDAG &DAG) const { | |||
3524 | SDLoc DL(Op); | |||
3525 | SDValue Idx = Op.getOperand(1); | |||
3526 | SDValue Vec = Op.getOperand(0); | |||
3527 | EVT EltVT = Op.getValueType(); | |||
3528 | MVT VecVT = Vec.getSimpleValueType(); | |||
3529 | MVT XLenVT = Subtarget.getXLenVT(); | |||
3530 | ||||
3531 | if (VecVT.getVectorElementType() == MVT::i1) { | |||
3532 | // FIXME: For now we just promote to an i8 vector and extract from that, | |||
3533 | // but this is probably not optimal. | |||
3534 | MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount()); | |||
3535 | Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec); | |||
3536 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx); | |||
3537 | } | |||
3538 | ||||
3539 | // If this is a fixed vector, we need to convert it to a scalable vector. | |||
3540 | MVT ContainerVT = VecVT; | |||
3541 | if (VecVT.isFixedLengthVector()) { | |||
3542 | ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
3543 | Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); | |||
3544 | } | |||
3545 | ||||
3546 | // If the index is 0, the vector is already in the right position. | |||
3547 | if (!isNullConstant(Idx)) { | |||
3548 | // Use a VL of 1 to avoid processing more elements than we need. | |||
3549 | SDValue VL = DAG.getConstant(1, DL, XLenVT); | |||
3550 | MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); | |||
3551 | SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); | |||
3552 | Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, | |||
3553 | DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL); | |||
3554 | } | |||
3555 | ||||
3556 | if (!EltVT.isInteger()) { | |||
3557 | // Floating-point extracts are handled in TableGen. | |||
3558 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, | |||
3559 | DAG.getConstant(0, DL, XLenVT)); | |||
3560 | } | |||
3561 | ||||
3562 | SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); | |||
3563 | return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0); | |||
3564 | } | |||
3565 | ||||
3566 | // Some RVV intrinsics may claim that they want an integer operand to be | |||
3567 | // promoted or expanded. | |||
3568 | static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG, | |||
3569 | const RISCVSubtarget &Subtarget) { | |||
3570 | assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||(static_cast <bool> ((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) && "Unexpected opcode" ) ? void (0) : __assert_fail ("(Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) && \"Unexpected opcode\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3572, __extension__ __PRETTY_FUNCTION__)) | |||
3571 | Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&(static_cast <bool> ((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) && "Unexpected opcode" ) ? void (0) : __assert_fail ("(Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) && \"Unexpected opcode\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3572, __extension__ __PRETTY_FUNCTION__)) | |||
3572 | "Unexpected opcode")(static_cast <bool> ((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) && "Unexpected opcode" ) ? void (0) : __assert_fail ("(Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) && \"Unexpected opcode\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3572, __extension__ __PRETTY_FUNCTION__)); | |||
3573 | ||||
3574 | if (!Subtarget.hasStdExtV()) | |||
3575 | return SDValue(); | |||
3576 | ||||
3577 | bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN; | |||
3578 | unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0); | |||
3579 | SDLoc DL(Op); | |||
3580 | ||||
3581 | const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = | |||
3582 | RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo); | |||
3583 | if (!II || !II->SplatOperand) | |||
3584 | return SDValue(); | |||
3585 | ||||
3586 | unsigned SplatOp = II->SplatOperand + HasChain; | |||
3587 | assert(SplatOp < Op.getNumOperands())(static_cast <bool> (SplatOp < Op.getNumOperands()) ? void (0) : __assert_fail ("SplatOp < Op.getNumOperands()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3587, __extension__ __PRETTY_FUNCTION__)); | |||
3588 | ||||
3589 | SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end()); | |||
3590 | SDValue &ScalarOp = Operands[SplatOp]; | |||
3591 | MVT OpVT = ScalarOp.getSimpleValueType(); | |||
3592 | MVT XLenVT = Subtarget.getXLenVT(); | |||
3593 | ||||
3594 | // If this isn't a scalar, or its type is XLenVT we're done. | |||
3595 | if (!OpVT.isScalarInteger() || OpVT == XLenVT) | |||
3596 | return SDValue(); | |||
3597 | ||||
3598 | // Simplest case is that the operand needs to be promoted to XLenVT. | |||
3599 | if (OpVT.bitsLT(XLenVT)) { | |||
3600 | // If the operand is a constant, sign extend to increase our chances | |||
3601 | // of being able to use a .vi instruction. ANY_EXTEND would become a | |||
3602 | // a zero extend and the simm5 check in isel would fail. | |||
3603 | // FIXME: Should we ignore the upper bits in isel instead? | |||
3604 | unsigned ExtOpc = | |||
3605 | isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND; | |||
3606 | ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp); | |||
3607 | return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); | |||
3608 | } | |||
3609 | ||||
3610 | // Use the previous operand to get the vXi64 VT. The result might be a mask | |||
3611 | // VT for compares. Using the previous operand assumes that the previous | |||
3612 | // operand will never have a smaller element size than a scalar operand and | |||
3613 | // that a widening operation never uses SEW=64. | |||
3614 | // NOTE: If this fails the below assert, we can probably just find the | |||
3615 | // element count from any operand or result and use it to construct the VT. | |||
3616 | assert(II->SplatOperand > 1 && "Unexpected splat operand!")(static_cast <bool> (II->SplatOperand > 1 && "Unexpected splat operand!") ? void (0) : __assert_fail ("II->SplatOperand > 1 && \"Unexpected splat operand!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3616, __extension__ __PRETTY_FUNCTION__)); | |||
3617 | MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType(); | |||
3618 | ||||
3619 | // The more complex case is when the scalar is larger than XLenVT. | |||
3620 | assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&(static_cast <bool> (XLenVT == MVT::i32 && OpVT == MVT::i64 && VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!") ? void (0) : __assert_fail ("XLenVT == MVT::i32 && OpVT == MVT::i64 && VT.getVectorElementType() == MVT::i64 && \"Unexpected VTs!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3621, __extension__ __PRETTY_FUNCTION__)) | |||
3621 | VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!")(static_cast <bool> (XLenVT == MVT::i32 && OpVT == MVT::i64 && VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!") ? void (0) : __assert_fail ("XLenVT == MVT::i32 && OpVT == MVT::i64 && VT.getVectorElementType() == MVT::i64 && \"Unexpected VTs!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3621, __extension__ __PRETTY_FUNCTION__)); | |||
3622 | ||||
3623 | // If this is a sign-extended 32-bit constant, we can truncate it and rely | |||
3624 | // on the instruction to sign-extend since SEW>XLEN. | |||
3625 | if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) { | |||
3626 | if (isInt<32>(CVal->getSExtValue())) { | |||
3627 | ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32); | |||
3628 | return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); | |||
3629 | } | |||
3630 | } | |||
3631 | ||||
3632 | // We need to convert the scalar to a splat vector. | |||
3633 | // FIXME: Can we implicitly truncate the scalar if it is known to | |||
3634 | // be sign extended? | |||
3635 | // VL should be the last operand. | |||
3636 | SDValue VL = Op.getOperand(Op.getNumOperands() - 1); | |||
3637 | assert(VL.getValueType() == XLenVT)(static_cast <bool> (VL.getValueType() == XLenVT) ? void (0) : __assert_fail ("VL.getValueType() == XLenVT", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3637, __extension__ __PRETTY_FUNCTION__)); | |||
3638 | ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG); | |||
3639 | return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); | |||
3640 | } | |||
3641 | ||||
3642 | SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, | |||
3643 | SelectionDAG &DAG) const { | |||
3644 | unsigned IntNo = Op.getConstantOperandVal(0); | |||
3645 | SDLoc DL(Op); | |||
3646 | MVT XLenVT = Subtarget.getXLenVT(); | |||
3647 | ||||
3648 | switch (IntNo) { | |||
3649 | default: | |||
3650 | break; // Don't custom lower most intrinsics. | |||
3651 | case Intrinsic::thread_pointer: { | |||
3652 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
3653 | return DAG.getRegister(RISCV::X4, PtrVT); | |||
3654 | } | |||
3655 | case Intrinsic::riscv_orc_b: | |||
3656 | // Lower to the GORCI encoding for orc.b. | |||
3657 | return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1), | |||
3658 | DAG.getConstant(7, DL, XLenVT)); | |||
3659 | case Intrinsic::riscv_grev: | |||
3660 | case Intrinsic::riscv_gorc: { | |||
3661 | unsigned Opc = | |||
3662 | IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC; | |||
3663 | return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2)); | |||
3664 | } | |||
3665 | case Intrinsic::riscv_shfl: | |||
3666 | case Intrinsic::riscv_unshfl: { | |||
3667 | unsigned Opc = | |||
3668 | IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL; | |||
3669 | return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2)); | |||
3670 | } | |||
3671 | case Intrinsic::riscv_bcompress: | |||
3672 | case Intrinsic::riscv_bdecompress: { | |||
3673 | unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS | |||
3674 | : RISCVISD::BDECOMPRESS; | |||
3675 | return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2)); | |||
3676 | } | |||
3677 | case Intrinsic::riscv_vmv_x_s: | |||
3678 | assert(Op.getValueType() == XLenVT && "Unexpected VT!")(static_cast <bool> (Op.getValueType() == XLenVT && "Unexpected VT!") ? void (0) : __assert_fail ("Op.getValueType() == XLenVT && \"Unexpected VT!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3678, __extension__ __PRETTY_FUNCTION__)); | |||
3679 | return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(), | |||
3680 | Op.getOperand(1)); | |||
3681 | case Intrinsic::riscv_vmv_v_x: | |||
3682 | return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2), | |||
3683 | Op.getSimpleValueType(), DL, DAG, Subtarget); | |||
3684 | case Intrinsic::riscv_vfmv_v_f: | |||
3685 | return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(), | |||
3686 | Op.getOperand(1), Op.getOperand(2)); | |||
3687 | case Intrinsic::riscv_vmv_s_x: { | |||
3688 | SDValue Scalar = Op.getOperand(2); | |||
3689 | ||||
3690 | if (Scalar.getValueType().bitsLE(XLenVT)) { | |||
3691 | Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar); | |||
3692 | return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(), | |||
3693 | Op.getOperand(1), Scalar, Op.getOperand(3)); | |||
3694 | } | |||
3695 | ||||
3696 | assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!")(static_cast <bool> (Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!") ? void (0) : __assert_fail ("Scalar.getValueType() == MVT::i64 && \"Unexpected scalar VT!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3696, __extension__ __PRETTY_FUNCTION__)); | |||
3697 | ||||
3698 | // This is an i64 value that lives in two scalar registers. We have to | |||
3699 | // insert this in a convoluted way. First we build vXi64 splat containing | |||
3700 | // the/ two values that we assemble using some bit math. Next we'll use | |||
3701 | // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask | |||
3702 | // to merge element 0 from our splat into the source vector. | |||
3703 | // FIXME: This is probably not the best way to do this, but it is | |||
3704 | // consistent with INSERT_VECTOR_ELT lowering so it is a good starting | |||
3705 | // point. | |||
3706 | // sw lo, (a0) | |||
3707 | // sw hi, 4(a0) | |||
3708 | // vlse vX, (a0) | |||
3709 | // | |||
3710 | // vid.v vVid | |||
3711 | // vmseq.vx mMask, vVid, 0 | |||
3712 | // vmerge.vvm vDest, vSrc, vVal, mMask | |||
3713 | MVT VT = Op.getSimpleValueType(); | |||
3714 | SDValue Vec = Op.getOperand(1); | |||
3715 | SDValue VL = Op.getOperand(3); | |||
3716 | ||||
3717 | SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG); | |||
3718 | SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, | |||
3719 | DAG.getConstant(0, DL, MVT::i32), VL); | |||
3720 | ||||
3721 | MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); | |||
3722 | SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); | |||
3723 | SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL); | |||
3724 | SDValue SelectCond = | |||
3725 | DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx, | |||
3726 | DAG.getCondCode(ISD::SETEQ), Mask, VL); | |||
3727 | return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal, | |||
3728 | Vec, VL); | |||
3729 | } | |||
3730 | case Intrinsic::riscv_vslide1up: | |||
3731 | case Intrinsic::riscv_vslide1down: | |||
3732 | case Intrinsic::riscv_vslide1up_mask: | |||
3733 | case Intrinsic::riscv_vslide1down_mask: { | |||
3734 | // We need to special case these when the scalar is larger than XLen. | |||
3735 | unsigned NumOps = Op.getNumOperands(); | |||
3736 | bool IsMasked = NumOps == 6; | |||
3737 | unsigned OpOffset = IsMasked ? 1 : 0; | |||
3738 | SDValue Scalar = Op.getOperand(2 + OpOffset); | |||
3739 | if (Scalar.getValueType().bitsLE(XLenVT)) | |||
3740 | break; | |||
3741 | ||||
3742 | // Splatting a sign extended constant is fine. | |||
3743 | if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar)) | |||
3744 | if (isInt<32>(CVal->getSExtValue())) | |||
3745 | break; | |||
3746 | ||||
3747 | MVT VT = Op.getSimpleValueType(); | |||
3748 | assert(VT.getVectorElementType() == MVT::i64 &&(static_cast <bool> (VT.getVectorElementType() == MVT:: i64 && Scalar.getValueType() == MVT::i64 && "Unexpected VTs" ) ? void (0) : __assert_fail ("VT.getVectorElementType() == MVT::i64 && Scalar.getValueType() == MVT::i64 && \"Unexpected VTs\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3749, __extension__ __PRETTY_FUNCTION__)) | |||
3749 | Scalar.getValueType() == MVT::i64 && "Unexpected VTs")(static_cast <bool> (VT.getVectorElementType() == MVT:: i64 && Scalar.getValueType() == MVT::i64 && "Unexpected VTs" ) ? void (0) : __assert_fail ("VT.getVectorElementType() == MVT::i64 && Scalar.getValueType() == MVT::i64 && \"Unexpected VTs\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3749, __extension__ __PRETTY_FUNCTION__)); | |||
3750 | ||||
3751 | // Convert the vector source to the equivalent nxvXi32 vector. | |||
3752 | MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2); | |||
3753 | SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset)); | |||
3754 | ||||
3755 | SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, | |||
3756 | DAG.getConstant(0, DL, XLenVT)); | |||
3757 | SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, | |||
3758 | DAG.getConstant(1, DL, XLenVT)); | |||
3759 | ||||
3760 | // Double the VL since we halved SEW. | |||
3761 | SDValue VL = Op.getOperand(NumOps - 1); | |||
3762 | SDValue I32VL = | |||
3763 | DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT)); | |||
3764 | ||||
3765 | MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount()); | |||
3766 | SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL); | |||
3767 | ||||
3768 | // Shift the two scalar parts in using SEW=32 slide1up/slide1down | |||
3769 | // instructions. | |||
3770 | if (IntNo == Intrinsic::riscv_vslide1up || | |||
3771 | IntNo == Intrinsic::riscv_vslide1up_mask) { | |||
3772 | Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi, | |||
3773 | I32Mask, I32VL); | |||
3774 | Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo, | |||
3775 | I32Mask, I32VL); | |||
3776 | } else { | |||
3777 | Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo, | |||
3778 | I32Mask, I32VL); | |||
3779 | Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi, | |||
3780 | I32Mask, I32VL); | |||
3781 | } | |||
3782 | ||||
3783 | // Convert back to nxvXi64. | |||
3784 | Vec = DAG.getBitcast(VT, Vec); | |||
3785 | ||||
3786 | if (!IsMasked) | |||
3787 | return Vec; | |||
3788 | ||||
3789 | // Apply mask after the operation. | |||
3790 | SDValue Mask = Op.getOperand(NumOps - 2); | |||
3791 | SDValue MaskedOff = Op.getOperand(1); | |||
3792 | return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL); | |||
3793 | } | |||
3794 | } | |||
3795 | ||||
3796 | return lowerVectorIntrinsicSplats(Op, DAG, Subtarget); | |||
3797 | } | |||
3798 | ||||
3799 | SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, | |||
3800 | SelectionDAG &DAG) const { | |||
3801 | return lowerVectorIntrinsicSplats(Op, DAG, Subtarget); | |||
3802 | } | |||
3803 | ||||
3804 | static MVT getLMUL1VT(MVT VT) { | |||
3805 | assert(VT.getVectorElementType().getSizeInBits() <= 64 &&(static_cast <bool> (VT.getVectorElementType().getSizeInBits () <= 64 && "Unexpected vector MVT") ? void (0) : __assert_fail ("VT.getVectorElementType().getSizeInBits() <= 64 && \"Unexpected vector MVT\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3806, __extension__ __PRETTY_FUNCTION__)) | |||
3806 | "Unexpected vector MVT")(static_cast <bool> (VT.getVectorElementType().getSizeInBits () <= 64 && "Unexpected vector MVT") ? void (0) : __assert_fail ("VT.getVectorElementType().getSizeInBits() <= 64 && \"Unexpected vector MVT\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3806, __extension__ __PRETTY_FUNCTION__)); | |||
3807 | return MVT::getScalableVectorVT( | |||
3808 | VT.getVectorElementType(), | |||
3809 | RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits()); | |||
3810 | } | |||
3811 | ||||
3812 | static unsigned getRVVReductionOp(unsigned ISDOpcode) { | |||
3813 | switch (ISDOpcode) { | |||
3814 | default: | |||
3815 | llvm_unreachable("Unhandled reduction")::llvm::llvm_unreachable_internal("Unhandled reduction", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3815); | |||
3816 | case ISD::VECREDUCE_ADD: | |||
3817 | return RISCVISD::VECREDUCE_ADD_VL; | |||
3818 | case ISD::VECREDUCE_UMAX: | |||
3819 | return RISCVISD::VECREDUCE_UMAX_VL; | |||
3820 | case ISD::VECREDUCE_SMAX: | |||
3821 | return RISCVISD::VECREDUCE_SMAX_VL; | |||
3822 | case ISD::VECREDUCE_UMIN: | |||
3823 | return RISCVISD::VECREDUCE_UMIN_VL; | |||
3824 | case ISD::VECREDUCE_SMIN: | |||
3825 | return RISCVISD::VECREDUCE_SMIN_VL; | |||
3826 | case ISD::VECREDUCE_AND: | |||
3827 | return RISCVISD::VECREDUCE_AND_VL; | |||
3828 | case ISD::VECREDUCE_OR: | |||
3829 | return RISCVISD::VECREDUCE_OR_VL; | |||
3830 | case ISD::VECREDUCE_XOR: | |||
3831 | return RISCVISD::VECREDUCE_XOR_VL; | |||
3832 | } | |||
3833 | } | |||
3834 | ||||
3835 | SDValue RISCVTargetLowering::lowerVectorMaskVECREDUCE(SDValue Op, | |||
3836 | SelectionDAG &DAG) const { | |||
3837 | SDLoc DL(Op); | |||
3838 | SDValue Vec = Op.getOperand(0); | |||
3839 | MVT VecVT = Vec.getSimpleValueType(); | |||
3840 | assert((Op.getOpcode() == ISD::VECREDUCE_AND ||(static_cast <bool> ((Op.getOpcode() == ISD::VECREDUCE_AND || Op.getOpcode() == ISD::VECREDUCE_OR || Op.getOpcode() == ISD ::VECREDUCE_XOR) && "Unexpected reduction lowering") ? void (0) : __assert_fail ("(Op.getOpcode() == ISD::VECREDUCE_AND || Op.getOpcode() == ISD::VECREDUCE_OR || Op.getOpcode() == ISD::VECREDUCE_XOR) && \"Unexpected reduction lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3843, __extension__ __PRETTY_FUNCTION__)) | |||
3841 | Op.getOpcode() == ISD::VECREDUCE_OR ||(static_cast <bool> ((Op.getOpcode() == ISD::VECREDUCE_AND || Op.getOpcode() == ISD::VECREDUCE_OR || Op.getOpcode() == ISD ::VECREDUCE_XOR) && "Unexpected reduction lowering") ? void (0) : __assert_fail ("(Op.getOpcode() == ISD::VECREDUCE_AND || Op.getOpcode() == ISD::VECREDUCE_OR || Op.getOpcode() == ISD::VECREDUCE_XOR) && \"Unexpected reduction lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3843, __extension__ __PRETTY_FUNCTION__)) | |||
3842 | Op.getOpcode() == ISD::VECREDUCE_XOR) &&(static_cast <bool> ((Op.getOpcode() == ISD::VECREDUCE_AND || Op.getOpcode() == ISD::VECREDUCE_OR || Op.getOpcode() == ISD ::VECREDUCE_XOR) && "Unexpected reduction lowering") ? void (0) : __assert_fail ("(Op.getOpcode() == ISD::VECREDUCE_AND || Op.getOpcode() == ISD::VECREDUCE_OR || Op.getOpcode() == ISD::VECREDUCE_XOR) && \"Unexpected reduction lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3843, __extension__ __PRETTY_FUNCTION__)) | |||
3843 | "Unexpected reduction lowering")(static_cast <bool> ((Op.getOpcode() == ISD::VECREDUCE_AND || Op.getOpcode() == ISD::VECREDUCE_OR || Op.getOpcode() == ISD ::VECREDUCE_XOR) && "Unexpected reduction lowering") ? void (0) : __assert_fail ("(Op.getOpcode() == ISD::VECREDUCE_AND || Op.getOpcode() == ISD::VECREDUCE_OR || Op.getOpcode() == ISD::VECREDUCE_XOR) && \"Unexpected reduction lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3843, __extension__ __PRETTY_FUNCTION__)); | |||
3844 | ||||
3845 | MVT XLenVT = Subtarget.getXLenVT(); | |||
3846 | assert(Op.getValueType() == XLenVT &&(static_cast <bool> (Op.getValueType() == XLenVT && "Expected reduction output to be legalized to XLenVT") ? void (0) : __assert_fail ("Op.getValueType() == XLenVT && \"Expected reduction output to be legalized to XLenVT\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3847, __extension__ __PRETTY_FUNCTION__)) | |||
3847 | "Expected reduction output to be legalized to XLenVT")(static_cast <bool> (Op.getValueType() == XLenVT && "Expected reduction output to be legalized to XLenVT") ? void (0) : __assert_fail ("Op.getValueType() == XLenVT && \"Expected reduction output to be legalized to XLenVT\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3847, __extension__ __PRETTY_FUNCTION__)); | |||
3848 | ||||
3849 | MVT ContainerVT = VecVT; | |||
3850 | if (VecVT.isFixedLengthVector()) { | |||
3851 | ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
3852 | Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); | |||
3853 | } | |||
3854 | ||||
3855 | SDValue Mask, VL; | |||
3856 | std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); | |||
3857 | SDValue Zero = DAG.getConstant(0, DL, XLenVT); | |||
3858 | ||||
3859 | switch (Op.getOpcode()) { | |||
3860 | default: | |||
3861 | llvm_unreachable("Unhandled reduction")::llvm::llvm_unreachable_internal("Unhandled reduction", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3861); | |||
3862 | case ISD::VECREDUCE_AND: | |||
3863 | // vpopc ~x == 0 | |||
3864 | Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, Mask, VL); | |||
3865 | Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL); | |||
3866 | return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETEQ); | |||
3867 | case ISD::VECREDUCE_OR: | |||
3868 | // vpopc x != 0 | |||
3869 | Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL); | |||
3870 | return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE); | |||
3871 | case ISD::VECREDUCE_XOR: { | |||
3872 | // ((vpopc x) & 1) != 0 | |||
3873 | SDValue One = DAG.getConstant(1, DL, XLenVT); | |||
3874 | Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL); | |||
3875 | Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One); | |||
3876 | return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE); | |||
3877 | } | |||
3878 | } | |||
3879 | } | |||
3880 | ||||
3881 | SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op, | |||
3882 | SelectionDAG &DAG) const { | |||
3883 | SDLoc DL(Op); | |||
3884 | SDValue Vec = Op.getOperand(0); | |||
3885 | EVT VecEVT = Vec.getValueType(); | |||
3886 | ||||
3887 | unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode()); | |||
3888 | ||||
3889 | // Due to ordering in legalize types we may have a vector type that needs to | |||
3890 | // be split. Do that manually so we can get down to a legal type. | |||
3891 | while (getTypeAction(*DAG.getContext(), VecEVT) == | |||
3892 | TargetLowering::TypeSplitVector) { | |||
3893 | SDValue Lo, Hi; | |||
3894 | std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL); | |||
3895 | VecEVT = Lo.getValueType(); | |||
3896 | Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi); | |||
3897 | } | |||
3898 | ||||
3899 | // TODO: The type may need to be widened rather than split. Or widened before | |||
3900 | // it can be split. | |||
3901 | if (!isTypeLegal(VecEVT)) | |||
3902 | return SDValue(); | |||
3903 | ||||
3904 | MVT VecVT = VecEVT.getSimpleVT(); | |||
3905 | MVT VecEltVT = VecVT.getVectorElementType(); | |||
3906 | unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode()); | |||
3907 | ||||
3908 | MVT ContainerVT = VecVT; | |||
3909 | if (VecVT.isFixedLengthVector()) { | |||
3910 | ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
3911 | Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); | |||
3912 | } | |||
3913 | ||||
3914 | MVT M1VT = getLMUL1VT(ContainerVT); | |||
3915 | ||||
3916 | SDValue Mask, VL; | |||
3917 | std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); | |||
3918 | ||||
3919 | // FIXME: This is a VLMAX splat which might be too large and can prevent | |||
3920 | // vsetvli removal. | |||
3921 | SDValue NeutralElem = | |||
3922 | DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags()); | |||
3923 | SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem); | |||
3924 | SDValue Reduction = | |||
3925 | DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL); | |||
3926 | SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, | |||
3927 | DAG.getConstant(0, DL, Subtarget.getXLenVT())); | |||
3928 | return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType()); | |||
3929 | } | |||
3930 | ||||
3931 | // Given a reduction op, this function returns the matching reduction opcode, | |||
3932 | // the vector SDValue and the scalar SDValue required to lower this to a | |||
3933 | // RISCVISD node. | |||
3934 | static std::tuple<unsigned, SDValue, SDValue> | |||
3935 | getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) { | |||
3936 | SDLoc DL(Op); | |||
3937 | auto Flags = Op->getFlags(); | |||
3938 | unsigned Opcode = Op.getOpcode(); | |||
3939 | unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode); | |||
3940 | switch (Opcode) { | |||
3941 | default: | |||
3942 | llvm_unreachable("Unhandled reduction")::llvm::llvm_unreachable_internal("Unhandled reduction", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 3942); | |||
3943 | case ISD::VECREDUCE_FADD: | |||
3944 | return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), | |||
3945 | DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags)); | |||
3946 | case ISD::VECREDUCE_SEQ_FADD: | |||
3947 | return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1), | |||
3948 | Op.getOperand(0)); | |||
3949 | case ISD::VECREDUCE_FMIN: | |||
3950 | return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0), | |||
3951 | DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags)); | |||
3952 | case ISD::VECREDUCE_FMAX: | |||
3953 | return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0), | |||
3954 | DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags)); | |||
3955 | } | |||
3956 | } | |||
3957 | ||||
3958 | SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op, | |||
3959 | SelectionDAG &DAG) const { | |||
3960 | SDLoc DL(Op); | |||
3961 | MVT VecEltVT = Op.getSimpleValueType(); | |||
3962 | ||||
3963 | unsigned RVVOpcode; | |||
3964 | SDValue VectorVal, ScalarVal; | |||
3965 | std::tie(RVVOpcode, VectorVal, ScalarVal) = | |||
3966 | getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT); | |||
3967 | MVT VecVT = VectorVal.getSimpleValueType(); | |||
3968 | ||||
3969 | MVT ContainerVT = VecVT; | |||
3970 | if (VecVT.isFixedLengthVector()) { | |||
3971 | ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
3972 | VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget); | |||
3973 | } | |||
3974 | ||||
3975 | MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType()); | |||
3976 | ||||
3977 | SDValue Mask, VL; | |||
3978 | std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); | |||
3979 | ||||
3980 | // FIXME: This is a VLMAX splat which might be too large and can prevent | |||
3981 | // vsetvli removal. | |||
3982 | SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal); | |||
3983 | SDValue Reduction = | |||
3984 | DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL); | |||
3985 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, | |||
3986 | DAG.getConstant(0, DL, Subtarget.getXLenVT())); | |||
3987 | } | |||
3988 | ||||
3989 | SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, | |||
3990 | SelectionDAG &DAG) const { | |||
3991 | SDValue Vec = Op.getOperand(0); | |||
3992 | SDValue SubVec = Op.getOperand(1); | |||
3993 | MVT VecVT = Vec.getSimpleValueType(); | |||
3994 | MVT SubVecVT = SubVec.getSimpleValueType(); | |||
3995 | ||||
3996 | SDLoc DL(Op); | |||
3997 | MVT XLenVT = Subtarget.getXLenVT(); | |||
3998 | unsigned OrigIdx = Op.getConstantOperandVal(2); | |||
3999 | const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
4000 | ||||
4001 | // We don't have the ability to slide mask vectors up indexed by their i1 | |||
4002 | // elements; the smallest we can do is i8. Often we are able to bitcast to | |||
4003 | // equivalent i8 vectors. Note that when inserting a fixed-length vector | |||
4004 | // into a scalable one, we might not necessarily have enough scalable | |||
4005 | // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid. | |||
4006 | if (SubVecVT.getVectorElementType() == MVT::i1 && | |||
4007 | (OrigIdx != 0 || !Vec.isUndef())) { | |||
4008 | if (VecVT.getVectorMinNumElements() >= 8 && | |||
4009 | SubVecVT.getVectorMinNumElements() >= 8) { | |||
4010 | assert(OrigIdx % 8 == 0 && "Invalid index")(static_cast <bool> (OrigIdx % 8 == 0 && "Invalid index" ) ? void (0) : __assert_fail ("OrigIdx % 8 == 0 && \"Invalid index\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4010, __extension__ __PRETTY_FUNCTION__)); | |||
4011 | assert(VecVT.getVectorMinNumElements() % 8 == 0 &&(static_cast <bool> (VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && "Unexpected mask vector lowering") ? void (0) : __assert_fail ("VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && \"Unexpected mask vector lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4013, __extension__ __PRETTY_FUNCTION__)) | |||
4012 | SubVecVT.getVectorMinNumElements() % 8 == 0 &&(static_cast <bool> (VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && "Unexpected mask vector lowering") ? void (0) : __assert_fail ("VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && \"Unexpected mask vector lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4013, __extension__ __PRETTY_FUNCTION__)) | |||
4013 | "Unexpected mask vector lowering")(static_cast <bool> (VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && "Unexpected mask vector lowering") ? void (0) : __assert_fail ("VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && \"Unexpected mask vector lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4013, __extension__ __PRETTY_FUNCTION__)); | |||
4014 | OrigIdx /= 8; | |||
4015 | SubVecVT = | |||
4016 | MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8, | |||
4017 | SubVecVT.isScalableVector()); | |||
4018 | VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8, | |||
4019 | VecVT.isScalableVector()); | |||
4020 | Vec = DAG.getBitcast(VecVT, Vec); | |||
4021 | SubVec = DAG.getBitcast(SubVecVT, SubVec); | |||
4022 | } else { | |||
4023 | // We can't slide this mask vector up indexed by its i1 elements. | |||
4024 | // This poses a problem when we wish to insert a scalable vector which | |||
4025 | // can't be re-expressed as a larger type. Just choose the slow path and | |||
4026 | // extend to a larger type, then truncate back down. | |||
4027 | MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8); | |||
4028 | MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8); | |||
4029 | Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec); | |||
4030 | SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec); | |||
4031 | Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec, | |||
4032 | Op.getOperand(2)); | |||
4033 | SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT); | |||
4034 | return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE); | |||
4035 | } | |||
4036 | } | |||
4037 | ||||
4038 | // If the subvector vector is a fixed-length type, we cannot use subregister | |||
4039 | // manipulation to simplify the codegen; we don't know which register of a | |||
4040 | // LMUL group contains the specific subvector as we only know the minimum | |||
4041 | // register size. Therefore we must slide the vector group up the full | |||
4042 | // amount. | |||
4043 | if (SubVecVT.isFixedLengthVector()) { | |||
4044 | if (OrigIdx == 0 && Vec.isUndef()) | |||
4045 | return Op; | |||
4046 | MVT ContainerVT = VecVT; | |||
4047 | if (VecVT.isFixedLengthVector()) { | |||
4048 | ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
4049 | Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); | |||
4050 | } | |||
4051 | SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT, | |||
4052 | DAG.getUNDEF(ContainerVT), SubVec, | |||
4053 | DAG.getConstant(0, DL, XLenVT)); | |||
4054 | SDValue Mask = | |||
4055 | getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first; | |||
4056 | // Set the vector length to only the number of elements we care about. Note | |||
4057 | // that for slideup this includes the offset. | |||
4058 | SDValue VL = | |||
4059 | DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT); | |||
4060 | SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT); | |||
4061 | SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec, | |||
4062 | SubVec, SlideupAmt, Mask, VL); | |||
4063 | if (VecVT.isFixedLengthVector()) | |||
4064 | Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget); | |||
4065 | return DAG.getBitcast(Op.getValueType(), Slideup); | |||
4066 | } | |||
4067 | ||||
4068 | unsigned SubRegIdx, RemIdx; | |||
4069 | std::tie(SubRegIdx, RemIdx) = | |||
4070 | RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( | |||
4071 | VecVT, SubVecVT, OrigIdx, TRI); | |||
4072 | ||||
4073 | RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT); | |||
4074 | bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 || | |||
4075 | SubVecLMUL == RISCVII::VLMUL::LMUL_F4 || | |||
4076 | SubVecLMUL == RISCVII::VLMUL::LMUL_F8; | |||
4077 | ||||
4078 | // 1. If the Idx has been completely eliminated and this subvector's size is | |||
4079 | // a vector register or a multiple thereof, or the surrounding elements are | |||
4080 | // undef, then this is a subvector insert which naturally aligns to a vector | |||
4081 | // register. These can easily be handled using subregister manipulation. | |||
4082 | // 2. If the subvector is smaller than a vector register, then the insertion | |||
4083 | // must preserve the undisturbed elements of the register. We do this by | |||
4084 | // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type | |||
4085 | // (which resolves to a subregister copy), performing a VSLIDEUP to place the | |||
4086 | // subvector within the vector register, and an INSERT_SUBVECTOR of that | |||
4087 | // LMUL=1 type back into the larger vector (resolving to another subregister | |||
4088 | // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type | |||
4089 | // to avoid allocating a large register group to hold our subvector. | |||
4090 | if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef())) | |||
4091 | return Op; | |||
4092 | ||||
4093 | // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements | |||
4094 | // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy | |||
4095 | // (in our case undisturbed). This means we can set up a subvector insertion | |||
4096 | // where OFFSET is the insertion offset, and the VL is the OFFSET plus the | |||
4097 | // size of the subvector. | |||
4098 | MVT InterSubVT = VecVT; | |||
4099 | SDValue AlignedExtract = Vec; | |||
4100 | unsigned AlignedIdx = OrigIdx - RemIdx; | |||
4101 | if (VecVT.bitsGT(getLMUL1VT(VecVT))) { | |||
4102 | InterSubVT = getLMUL1VT(VecVT); | |||
4103 | // Extract a subvector equal to the nearest full vector register type. This | |||
4104 | // should resolve to a EXTRACT_SUBREG instruction. | |||
4105 | AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec, | |||
4106 | DAG.getConstant(AlignedIdx, DL, XLenVT)); | |||
4107 | } | |||
4108 | ||||
4109 | SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT); | |||
4110 | // For scalable vectors this must be further multiplied by vscale. | |||
4111 | SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt); | |||
4112 | ||||
4113 | SDValue Mask, VL; | |||
4114 | std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget); | |||
4115 | ||||
4116 | // Construct the vector length corresponding to RemIdx + length(SubVecVT). | |||
4117 | VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT); | |||
4118 | VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL); | |||
4119 | VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL); | |||
4120 | ||||
4121 | SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT, | |||
4122 | DAG.getUNDEF(InterSubVT), SubVec, | |||
4123 | DAG.getConstant(0, DL, XLenVT)); | |||
4124 | ||||
4125 | SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT, | |||
4126 | AlignedExtract, SubVec, SlideupAmt, Mask, VL); | |||
4127 | ||||
4128 | // If required, insert this subvector back into the correct vector register. | |||
4129 | // This should resolve to an INSERT_SUBREG instruction. | |||
4130 | if (VecVT.bitsGT(InterSubVT)) | |||
4131 | Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup, | |||
4132 | DAG.getConstant(AlignedIdx, DL, XLenVT)); | |||
4133 | ||||
4134 | // We might have bitcast from a mask type: cast back to the original type if | |||
4135 | // required. | |||
4136 | return DAG.getBitcast(Op.getSimpleValueType(), Slideup); | |||
4137 | } | |||
4138 | ||||
4139 | SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op, | |||
4140 | SelectionDAG &DAG) const { | |||
4141 | SDValue Vec = Op.getOperand(0); | |||
4142 | MVT SubVecVT = Op.getSimpleValueType(); | |||
4143 | MVT VecVT = Vec.getSimpleValueType(); | |||
4144 | ||||
4145 | SDLoc DL(Op); | |||
4146 | MVT XLenVT = Subtarget.getXLenVT(); | |||
4147 | unsigned OrigIdx = Op.getConstantOperandVal(1); | |||
4148 | const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
4149 | ||||
4150 | // We don't have the ability to slide mask vectors down indexed by their i1 | |||
4151 | // elements; the smallest we can do is i8. Often we are able to bitcast to | |||
4152 | // equivalent i8 vectors. Note that when extracting a fixed-length vector | |||
4153 | // from a scalable one, we might not necessarily have enough scalable | |||
4154 | // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid. | |||
4155 | if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) { | |||
4156 | if (VecVT.getVectorMinNumElements() >= 8 && | |||
4157 | SubVecVT.getVectorMinNumElements() >= 8) { | |||
4158 | assert(OrigIdx % 8 == 0 && "Invalid index")(static_cast <bool> (OrigIdx % 8 == 0 && "Invalid index" ) ? void (0) : __assert_fail ("OrigIdx % 8 == 0 && \"Invalid index\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4158, __extension__ __PRETTY_FUNCTION__)); | |||
4159 | assert(VecVT.getVectorMinNumElements() % 8 == 0 &&(static_cast <bool> (VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && "Unexpected mask vector lowering") ? void (0) : __assert_fail ("VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && \"Unexpected mask vector lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4161, __extension__ __PRETTY_FUNCTION__)) | |||
4160 | SubVecVT.getVectorMinNumElements() % 8 == 0 &&(static_cast <bool> (VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && "Unexpected mask vector lowering") ? void (0) : __assert_fail ("VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && \"Unexpected mask vector lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4161, __extension__ __PRETTY_FUNCTION__)) | |||
4161 | "Unexpected mask vector lowering")(static_cast <bool> (VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && "Unexpected mask vector lowering") ? void (0) : __assert_fail ("VecVT.getVectorMinNumElements() % 8 == 0 && SubVecVT.getVectorMinNumElements() % 8 == 0 && \"Unexpected mask vector lowering\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4161, __extension__ __PRETTY_FUNCTION__)); | |||
4162 | OrigIdx /= 8; | |||
4163 | SubVecVT = | |||
4164 | MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8, | |||
4165 | SubVecVT.isScalableVector()); | |||
4166 | VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8, | |||
4167 | VecVT.isScalableVector()); | |||
4168 | Vec = DAG.getBitcast(VecVT, Vec); | |||
4169 | } else { | |||
4170 | // We can't slide this mask vector down, indexed by its i1 elements. | |||
4171 | // This poses a problem when we wish to extract a scalable vector which | |||
4172 | // can't be re-expressed as a larger type. Just choose the slow path and | |||
4173 | // extend to a larger type, then truncate back down. | |||
4174 | // TODO: We could probably improve this when extracting certain fixed | |||
4175 | // from fixed, where we can extract as i8 and shift the correct element | |||
4176 | // right to reach the desired subvector? | |||
4177 | MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8); | |||
4178 | MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8); | |||
4179 | Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec); | |||
4180 | Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec, | |||
4181 | Op.getOperand(1)); | |||
4182 | SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT); | |||
4183 | return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE); | |||
4184 | } | |||
4185 | } | |||
4186 | ||||
4187 | // If the subvector vector is a fixed-length type, we cannot use subregister | |||
4188 | // manipulation to simplify the codegen; we don't know which register of a | |||
4189 | // LMUL group contains the specific subvector as we only know the minimum | |||
4190 | // register size. Therefore we must slide the vector group down the full | |||
4191 | // amount. | |||
4192 | if (SubVecVT.isFixedLengthVector()) { | |||
4193 | // With an index of 0 this is a cast-like subvector, which can be performed | |||
4194 | // with subregister operations. | |||
4195 | if (OrigIdx == 0) | |||
4196 | return Op; | |||
4197 | MVT ContainerVT = VecVT; | |||
4198 | if (VecVT.isFixedLengthVector()) { | |||
4199 | ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
4200 | Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); | |||
4201 | } | |||
4202 | SDValue Mask = | |||
4203 | getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first; | |||
4204 | // Set the vector length to only the number of elements we care about. This | |||
4205 | // avoids sliding down elements we're going to discard straight away. | |||
4206 | SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT); | |||
4207 | SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT); | |||
4208 | SDValue Slidedown = | |||
4209 | DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, | |||
4210 | DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL); | |||
4211 | // Now we can use a cast-like subvector extract to get the result. | |||
4212 | Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown, | |||
4213 | DAG.getConstant(0, DL, XLenVT)); | |||
4214 | return DAG.getBitcast(Op.getValueType(), Slidedown); | |||
4215 | } | |||
4216 | ||||
4217 | unsigned SubRegIdx, RemIdx; | |||
4218 | std::tie(SubRegIdx, RemIdx) = | |||
4219 | RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( | |||
4220 | VecVT, SubVecVT, OrigIdx, TRI); | |||
4221 | ||||
4222 | // If the Idx has been completely eliminated then this is a subvector extract | |||
4223 | // which naturally aligns to a vector register. These can easily be handled | |||
4224 | // using subregister manipulation. | |||
4225 | if (RemIdx == 0) | |||
4226 | return Op; | |||
4227 | ||||
4228 | // Else we must shift our vector register directly to extract the subvector. | |||
4229 | // Do this using VSLIDEDOWN. | |||
4230 | ||||
4231 | // If the vector type is an LMUL-group type, extract a subvector equal to the | |||
4232 | // nearest full vector register type. This should resolve to a EXTRACT_SUBREG | |||
4233 | // instruction. | |||
4234 | MVT InterSubVT = VecVT; | |||
4235 | if (VecVT.bitsGT(getLMUL1VT(VecVT))) { | |||
4236 | InterSubVT = getLMUL1VT(VecVT); | |||
4237 | Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec, | |||
4238 | DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT)); | |||
4239 | } | |||
4240 | ||||
4241 | // Slide this vector register down by the desired number of elements in order | |||
4242 | // to place the desired subvector starting at element 0. | |||
4243 | SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT); | |||
4244 | // For scalable vectors this must be further multiplied by vscale. | |||
4245 | SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt); | |||
4246 | ||||
4247 | SDValue Mask, VL; | |||
4248 | std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget); | |||
4249 | SDValue Slidedown = | |||
4250 | DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT, | |||
4251 | DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL); | |||
4252 | ||||
4253 | // Now the vector is in the right position, extract our final subvector. This | |||
4254 | // should resolve to a COPY. | |||
4255 | Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown, | |||
4256 | DAG.getConstant(0, DL, XLenVT)); | |||
4257 | ||||
4258 | // We might have bitcast from a mask type: cast back to the original type if | |||
4259 | // required. | |||
4260 | return DAG.getBitcast(Op.getSimpleValueType(), Slidedown); | |||
4261 | } | |||
4262 | ||||
4263 | // Lower step_vector to the vid instruction. Any non-identity step value must | |||
4264 | // be accounted for my manual expansion. | |||
4265 | SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op, | |||
4266 | SelectionDAG &DAG) const { | |||
4267 | SDLoc DL(Op); | |||
4268 | MVT VT = Op.getSimpleValueType(); | |||
4269 | MVT XLenVT = Subtarget.getXLenVT(); | |||
4270 | SDValue Mask, VL; | |||
4271 | std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget); | |||
4272 | SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL); | |||
4273 | uint64_t StepValImm = Op.getConstantOperandVal(0); | |||
4274 | if (StepValImm != 1) { | |||
4275 | if (isPowerOf2_64(StepValImm)) { | |||
4276 | SDValue StepVal = | |||
4277 | DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, | |||
4278 | DAG.getConstant(Log2_64(StepValImm), DL, XLenVT)); | |||
4279 | StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal); | |||
4280 | } else { | |||
4281 | SDValue StepVal = lowerScalarSplat( | |||
4282 | DAG.getConstant(StepValImm, DL, VT.getVectorElementType()), VL, VT, | |||
4283 | DL, DAG, Subtarget); | |||
4284 | StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal); | |||
4285 | } | |||
4286 | } | |||
4287 | return StepVec; | |||
4288 | } | |||
4289 | ||||
4290 | // Implement vector_reverse using vrgather.vv with indices determined by | |||
4291 | // subtracting the id of each element from (VLMAX-1). This will convert | |||
4292 | // the indices like so: | |||
4293 | // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0). | |||
4294 | // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16. | |||
4295 | SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op, | |||
4296 | SelectionDAG &DAG) const { | |||
4297 | SDLoc DL(Op); | |||
4298 | MVT VecVT = Op.getSimpleValueType(); | |||
4299 | unsigned EltSize = VecVT.getScalarSizeInBits(); | |||
4300 | unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue(); | |||
4301 | ||||
4302 | unsigned MaxVLMAX = 0; | |||
4303 | unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits(); | |||
4304 | if (VectorBitsMax != 0) | |||
4305 | MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock; | |||
4306 | ||||
4307 | unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL; | |||
4308 | MVT IntVT = VecVT.changeVectorElementTypeToInteger(); | |||
4309 | ||||
4310 | // If this is SEW=8 and VLMAX is unknown or more than 256, we need | |||
4311 | // to use vrgatherei16.vv. | |||
4312 | // TODO: It's also possible to use vrgatherei16.vv for other types to | |||
4313 | // decrease register width for the index calculation. | |||
4314 | if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) { | |||
4315 | // If this is LMUL=8, we have to split before can use vrgatherei16.vv. | |||
4316 | // Reverse each half, then reassemble them in reverse order. | |||
4317 | // NOTE: It's also possible that after splitting that VLMAX no longer | |||
4318 | // requires vrgatherei16.vv. | |||
4319 | if (MinSize == (8 * RISCV::RVVBitsPerBlock)) { | |||
4320 | SDValue Lo, Hi; | |||
4321 | std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0); | |||
4322 | EVT LoVT, HiVT; | |||
4323 | std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT); | |||
4324 | Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo); | |||
4325 | Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi); | |||
4326 | // Reassemble the low and high pieces reversed. | |||
4327 | // FIXME: This is a CONCAT_VECTORS. | |||
4328 | SDValue Res = | |||
4329 | DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi, | |||
4330 | DAG.getIntPtrConstant(0, DL)); | |||
4331 | return DAG.getNode( | |||
4332 | ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo, | |||
4333 | DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL)); | |||
4334 | } | |||
4335 | ||||
4336 | // Just promote the int type to i16 which will double the LMUL. | |||
4337 | IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount()); | |||
4338 | GatherOpc = RISCVISD::VRGATHEREI16_VV_VL; | |||
4339 | } | |||
4340 | ||||
4341 | MVT XLenVT = Subtarget.getXLenVT(); | |||
4342 | SDValue Mask, VL; | |||
4343 | std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget); | |||
4344 | ||||
4345 | // Calculate VLMAX-1 for the desired SEW. | |||
4346 | unsigned MinElts = VecVT.getVectorMinNumElements(); | |||
4347 | SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT, | |||
4348 | DAG.getConstant(MinElts, DL, XLenVT)); | |||
4349 | SDValue VLMinus1 = | |||
4350 | DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT)); | |||
4351 | ||||
4352 | // Splat VLMAX-1 taking care to handle SEW==64 on RV32. | |||
4353 | bool IsRV32E64 = | |||
4354 | !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64; | |||
4355 | SDValue SplatVL; | |||
4356 | if (!IsRV32E64) | |||
4357 | SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1); | |||
4358 | else | |||
4359 | SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1); | |||
4360 | ||||
4361 | SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL); | |||
4362 | SDValue Indices = | |||
4363 | DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL); | |||
4364 | ||||
4365 | return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL); | |||
4366 | } | |||
4367 | ||||
4368 | SDValue | |||
4369 | RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op, | |||
4370 | SelectionDAG &DAG) const { | |||
4371 | SDLoc DL(Op); | |||
4372 | auto *Load = cast<LoadSDNode>(Op); | |||
4373 | ||||
4374 | assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),(static_cast <bool> (allowsMemoryAccessForAlignment(*DAG .getContext(), DAG.getDataLayout(), Load->getMemoryVT(), * Load->getMemOperand()) && "Expecting a correctly-aligned load" ) ? void (0) : __assert_fail ("allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), Load->getMemoryVT(), *Load->getMemOperand()) && \"Expecting a correctly-aligned load\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4377, __extension__ __PRETTY_FUNCTION__)) | |||
4375 | Load->getMemoryVT(),(static_cast <bool> (allowsMemoryAccessForAlignment(*DAG .getContext(), DAG.getDataLayout(), Load->getMemoryVT(), * Load->getMemOperand()) && "Expecting a correctly-aligned load" ) ? void (0) : __assert_fail ("allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), Load->getMemoryVT(), *Load->getMemOperand()) && \"Expecting a correctly-aligned load\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4377, __extension__ __PRETTY_FUNCTION__)) | |||
4376 | *Load->getMemOperand()) &&(static_cast <bool> (allowsMemoryAccessForAlignment(*DAG .getContext(), DAG.getDataLayout(), Load->getMemoryVT(), * Load->getMemOperand()) && "Expecting a correctly-aligned load" ) ? void (0) : __assert_fail ("allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), Load->getMemoryVT(), *Load->getMemOperand()) && \"Expecting a correctly-aligned load\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4377, __extension__ __PRETTY_FUNCTION__)) | |||
4377 | "Expecting a correctly-aligned load")(static_cast <bool> (allowsMemoryAccessForAlignment(*DAG .getContext(), DAG.getDataLayout(), Load->getMemoryVT(), * Load->getMemOperand()) && "Expecting a correctly-aligned load" ) ? void (0) : __assert_fail ("allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), Load->getMemoryVT(), *Load->getMemOperand()) && \"Expecting a correctly-aligned load\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4377, __extension__ __PRETTY_FUNCTION__)); | |||
4378 | ||||
4379 | MVT VT = Op.getSimpleValueType(); | |||
4380 | MVT ContainerVT = getContainerForFixedLengthVector(VT); | |||
4381 | ||||
4382 | SDValue VL = | |||
4383 | DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); | |||
4384 | ||||
4385 | SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); | |||
4386 | SDValue NewLoad = DAG.getMemIntrinsicNode( | |||
4387 | RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL}, | |||
4388 | Load->getMemoryVT(), Load->getMemOperand()); | |||
4389 | ||||
4390 | SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget); | |||
4391 | return DAG.getMergeValues({Result, Load->getChain()}, DL); | |||
4392 | } | |||
4393 | ||||
4394 | SDValue | |||
4395 | RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op, | |||
4396 | SelectionDAG &DAG) const { | |||
4397 | SDLoc DL(Op); | |||
4398 | auto *Store = cast<StoreSDNode>(Op); | |||
4399 | ||||
4400 | assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),(static_cast <bool> (allowsMemoryAccessForAlignment(*DAG .getContext(), DAG.getDataLayout(), Store->getMemoryVT(), * Store->getMemOperand()) && "Expecting a correctly-aligned store" ) ? void (0) : __assert_fail ("allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), Store->getMemoryVT(), *Store->getMemOperand()) && \"Expecting a correctly-aligned store\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4403, __extension__ __PRETTY_FUNCTION__)) | |||
4401 | Store->getMemoryVT(),(static_cast <bool> (allowsMemoryAccessForAlignment(*DAG .getContext(), DAG.getDataLayout(), Store->getMemoryVT(), * Store->getMemOperand()) && "Expecting a correctly-aligned store" ) ? void (0) : __assert_fail ("allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), Store->getMemoryVT(), *Store->getMemOperand()) && \"Expecting a correctly-aligned store\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4403, __extension__ __PRETTY_FUNCTION__)) | |||
4402 | *Store->getMemOperand()) &&(static_cast <bool> (allowsMemoryAccessForAlignment(*DAG .getContext(), DAG.getDataLayout(), Store->getMemoryVT(), * Store->getMemOperand()) && "Expecting a correctly-aligned store" ) ? void (0) : __assert_fail ("allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), Store->getMemoryVT(), *Store->getMemOperand()) && \"Expecting a correctly-aligned store\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4403, __extension__ __PRETTY_FUNCTION__)) | |||
4403 | "Expecting a correctly-aligned store")(static_cast <bool> (allowsMemoryAccessForAlignment(*DAG .getContext(), DAG.getDataLayout(), Store->getMemoryVT(), * Store->getMemOperand()) && "Expecting a correctly-aligned store" ) ? void (0) : __assert_fail ("allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), Store->getMemoryVT(), *Store->getMemOperand()) && \"Expecting a correctly-aligned store\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4403, __extension__ __PRETTY_FUNCTION__)); | |||
4404 | ||||
4405 | SDValue StoreVal = Store->getValue(); | |||
4406 | MVT VT = StoreVal.getSimpleValueType(); | |||
4407 | ||||
4408 | // If the size less than a byte, we need to pad with zeros to make a byte. | |||
4409 | if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) { | |||
4410 | VT = MVT::v8i1; | |||
4411 | StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, | |||
4412 | DAG.getConstant(0, DL, VT), StoreVal, | |||
4413 | DAG.getIntPtrConstant(0, DL)); | |||
4414 | } | |||
4415 | ||||
4416 | MVT ContainerVT = getContainerForFixedLengthVector(VT); | |||
4417 | ||||
4418 | SDValue VL = | |||
4419 | DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); | |||
4420 | ||||
4421 | SDValue NewValue = | |||
4422 | convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget); | |||
4423 | return DAG.getMemIntrinsicNode( | |||
4424 | RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other), | |||
4425 | {Store->getChain(), NewValue, Store->getBasePtr(), VL}, | |||
4426 | Store->getMemoryVT(), Store->getMemOperand()); | |||
4427 | } | |||
4428 | ||||
4429 | SDValue RISCVTargetLowering::lowerMLOAD(SDValue Op, SelectionDAG &DAG) const { | |||
4430 | auto *Load = cast<MaskedLoadSDNode>(Op); | |||
4431 | ||||
4432 | SDLoc DL(Op); | |||
4433 | MVT VT = Op.getSimpleValueType(); | |||
4434 | MVT XLenVT = Subtarget.getXLenVT(); | |||
4435 | ||||
4436 | SDValue Mask = Load->getMask(); | |||
4437 | SDValue PassThru = Load->getPassThru(); | |||
4438 | SDValue VL; | |||
4439 | ||||
4440 | MVT ContainerVT = VT; | |||
4441 | if (VT.isFixedLengthVector()) { | |||
4442 | ContainerVT = getContainerForFixedLengthVector(VT); | |||
4443 | MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); | |||
4444 | ||||
4445 | Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); | |||
4446 | PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget); | |||
4447 | VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); | |||
4448 | } else | |||
4449 | VL = DAG.getRegister(RISCV::X0, XLenVT); | |||
4450 | ||||
4451 | SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); | |||
4452 | SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT); | |||
4453 | SDValue Ops[] = {Load->getChain(), IntID, PassThru, | |||
4454 | Load->getBasePtr(), Mask, VL}; | |||
4455 | SDValue Result = | |||
4456 | DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, | |||
4457 | Load->getMemoryVT(), Load->getMemOperand()); | |||
4458 | SDValue Chain = Result.getValue(1); | |||
4459 | ||||
4460 | if (VT.isFixedLengthVector()) | |||
4461 | Result = convertFromScalableVector(VT, Result, DAG, Subtarget); | |||
4462 | ||||
4463 | return DAG.getMergeValues({Result, Chain}, DL); | |||
4464 | } | |||
4465 | ||||
4466 | SDValue RISCVTargetLowering::lowerMSTORE(SDValue Op, SelectionDAG &DAG) const { | |||
4467 | auto *Store = cast<MaskedStoreSDNode>(Op); | |||
4468 | ||||
4469 | SDLoc DL(Op); | |||
4470 | SDValue Val = Store->getValue(); | |||
4471 | SDValue Mask = Store->getMask(); | |||
4472 | MVT VT = Val.getSimpleValueType(); | |||
4473 | MVT XLenVT = Subtarget.getXLenVT(); | |||
4474 | SDValue VL; | |||
4475 | ||||
4476 | MVT ContainerVT = VT; | |||
4477 | if (VT.isFixedLengthVector()) { | |||
4478 | ContainerVT = getContainerForFixedLengthVector(VT); | |||
4479 | MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); | |||
4480 | ||||
4481 | Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget); | |||
4482 | Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); | |||
4483 | VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); | |||
4484 | } else | |||
4485 | VL = DAG.getRegister(RISCV::X0, XLenVT); | |||
4486 | ||||
4487 | SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT); | |||
4488 | return DAG.getMemIntrinsicNode( | |||
4489 | ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), | |||
4490 | {Store->getChain(), IntID, Val, Store->getBasePtr(), Mask, VL}, | |||
4491 | Store->getMemoryVT(), Store->getMemOperand()); | |||
4492 | } | |||
4493 | ||||
4494 | SDValue | |||
4495 | RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op, | |||
4496 | SelectionDAG &DAG) const { | |||
4497 | MVT InVT = Op.getOperand(0).getSimpleValueType(); | |||
4498 | MVT ContainerVT = getContainerForFixedLengthVector(InVT); | |||
4499 | ||||
4500 | MVT VT = Op.getSimpleValueType(); | |||
4501 | ||||
4502 | SDValue Op1 = | |||
4503 | convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget); | |||
4504 | SDValue Op2 = | |||
4505 | convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget); | |||
4506 | ||||
4507 | SDLoc DL(Op); | |||
4508 | SDValue VL = | |||
4509 | DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); | |||
4510 | ||||
4511 | MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); | |||
4512 | SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); | |||
4513 | ||||
4514 | SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2, | |||
4515 | Op.getOperand(2), Mask, VL); | |||
4516 | ||||
4517 | return convertFromScalableVector(VT, Cmp, DAG, Subtarget); | |||
4518 | } | |||
4519 | ||||
4520 | SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV( | |||
4521 | SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const { | |||
4522 | MVT VT = Op.getSimpleValueType(); | |||
4523 | ||||
4524 | if (VT.getVectorElementType() == MVT::i1) | |||
4525 | return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false); | |||
4526 | ||||
4527 | return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true); | |||
4528 | } | |||
4529 | ||||
4530 | SDValue | |||
4531 | RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op, | |||
4532 | SelectionDAG &DAG) const { | |||
4533 | unsigned Opc; | |||
4534 | switch (Op.getOpcode()) { | |||
4535 | default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4535); | |||
4536 | case ISD::SHL: Opc = RISCVISD::SHL_VL; break; | |||
4537 | case ISD::SRA: Opc = RISCVISD::SRA_VL; break; | |||
4538 | case ISD::SRL: Opc = RISCVISD::SRL_VL; break; | |||
4539 | } | |||
4540 | ||||
4541 | return lowerToScalableOp(Op, DAG, Opc); | |||
4542 | } | |||
4543 | ||||
4544 | // Lower vector ABS to smax(X, sub(0, X)). | |||
4545 | SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const { | |||
4546 | SDLoc DL(Op); | |||
4547 | MVT VT = Op.getSimpleValueType(); | |||
4548 | SDValue X = Op.getOperand(0); | |||
4549 | ||||
4550 | assert(VT.isFixedLengthVector() && "Unexpected type")(static_cast <bool> (VT.isFixedLengthVector() && "Unexpected type") ? void (0) : __assert_fail ("VT.isFixedLengthVector() && \"Unexpected type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4550, __extension__ __PRETTY_FUNCTION__)); | |||
4551 | ||||
4552 | MVT ContainerVT = getContainerForFixedLengthVector(VT); | |||
4553 | X = convertToScalableVector(ContainerVT, X, DAG, Subtarget); | |||
4554 | ||||
4555 | SDValue Mask, VL; | |||
4556 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
4557 | ||||
4558 | SDValue SplatZero = | |||
4559 | DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, | |||
4560 | DAG.getConstant(0, DL, Subtarget.getXLenVT())); | |||
4561 | SDValue NegX = | |||
4562 | DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL); | |||
4563 | SDValue Max = | |||
4564 | DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL); | |||
4565 | ||||
4566 | return convertFromScalableVector(VT, Max, DAG, Subtarget); | |||
4567 | } | |||
4568 | ||||
4569 | SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV( | |||
4570 | SDValue Op, SelectionDAG &DAG) const { | |||
4571 | SDLoc DL(Op); | |||
4572 | MVT VT = Op.getSimpleValueType(); | |||
4573 | SDValue Mag = Op.getOperand(0); | |||
4574 | SDValue Sign = Op.getOperand(1); | |||
4575 | assert(Mag.getValueType() == Sign.getValueType() &&(static_cast <bool> (Mag.getValueType() == Sign.getValueType () && "Can only handle COPYSIGN with matching types." ) ? void (0) : __assert_fail ("Mag.getValueType() == Sign.getValueType() && \"Can only handle COPYSIGN with matching types.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4576, __extension__ __PRETTY_FUNCTION__)) | |||
4576 | "Can only handle COPYSIGN with matching types.")(static_cast <bool> (Mag.getValueType() == Sign.getValueType () && "Can only handle COPYSIGN with matching types." ) ? void (0) : __assert_fail ("Mag.getValueType() == Sign.getValueType() && \"Can only handle COPYSIGN with matching types.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4576, __extension__ __PRETTY_FUNCTION__)); | |||
4577 | ||||
4578 | MVT ContainerVT = getContainerForFixedLengthVector(VT); | |||
4579 | Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget); | |||
4580 | Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget); | |||
4581 | ||||
4582 | SDValue Mask, VL; | |||
4583 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
4584 | ||||
4585 | SDValue CopySign = | |||
4586 | DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL); | |||
4587 | ||||
4588 | return convertFromScalableVector(VT, CopySign, DAG, Subtarget); | |||
4589 | } | |||
4590 | ||||
4591 | SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV( | |||
4592 | SDValue Op, SelectionDAG &DAG) const { | |||
4593 | MVT VT = Op.getSimpleValueType(); | |||
4594 | MVT ContainerVT = getContainerForFixedLengthVector(VT); | |||
4595 | ||||
4596 | MVT I1ContainerVT = | |||
4597 | MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); | |||
4598 | ||||
4599 | SDValue CC = | |||
4600 | convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget); | |||
4601 | SDValue Op1 = | |||
4602 | convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget); | |||
4603 | SDValue Op2 = | |||
4604 | convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget); | |||
4605 | ||||
4606 | SDLoc DL(Op); | |||
4607 | SDValue Mask, VL; | |||
4608 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
4609 | ||||
4610 | SDValue Select = | |||
4611 | DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL); | |||
4612 | ||||
4613 | return convertFromScalableVector(VT, Select, DAG, Subtarget); | |||
4614 | } | |||
4615 | ||||
4616 | SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG, | |||
4617 | unsigned NewOpc, | |||
4618 | bool HasMask) const { | |||
4619 | MVT VT = Op.getSimpleValueType(); | |||
4620 | MVT ContainerVT = getContainerForFixedLengthVector(VT); | |||
4621 | ||||
4622 | // Create list of operands by converting existing ones to scalable types. | |||
4623 | SmallVector<SDValue, 6> Ops; | |||
4624 | for (const SDValue &V : Op->op_values()) { | |||
4625 | assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!")(static_cast <bool> (!isa<VTSDNode>(V) && "Unexpected VTSDNode node!") ? void (0) : __assert_fail ("!isa<VTSDNode>(V) && \"Unexpected VTSDNode node!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4625, __extension__ __PRETTY_FUNCTION__)); | |||
4626 | ||||
4627 | // Pass through non-vector operands. | |||
4628 | if (!V.getValueType().isVector()) { | |||
4629 | Ops.push_back(V); | |||
4630 | continue; | |||
4631 | } | |||
4632 | ||||
4633 | // "cast" fixed length vector to a scalable vector. | |||
4634 | assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&(static_cast <bool> (useRVVForFixedLengthVectorVT(V.getSimpleValueType ()) && "Only fixed length vectors are supported!") ? void (0) : __assert_fail ("useRVVForFixedLengthVectorVT(V.getSimpleValueType()) && \"Only fixed length vectors are supported!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4635, __extension__ __PRETTY_FUNCTION__)) | |||
4635 | "Only fixed length vectors are supported!")(static_cast <bool> (useRVVForFixedLengthVectorVT(V.getSimpleValueType ()) && "Only fixed length vectors are supported!") ? void (0) : __assert_fail ("useRVVForFixedLengthVectorVT(V.getSimpleValueType()) && \"Only fixed length vectors are supported!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4635, __extension__ __PRETTY_FUNCTION__)); | |||
4636 | Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget)); | |||
4637 | } | |||
4638 | ||||
4639 | SDLoc DL(Op); | |||
4640 | SDValue Mask, VL; | |||
4641 | std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); | |||
4642 | if (HasMask) | |||
4643 | Ops.push_back(Mask); | |||
4644 | Ops.push_back(VL); | |||
4645 | ||||
4646 | SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops); | |||
4647 | return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget); | |||
4648 | } | |||
4649 | ||||
4650 | // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node: | |||
4651 | // * Operands of each node are assumed to be in the same order. | |||
4652 | // * The EVL operand is promoted from i32 to i64 on RV64. | |||
4653 | // * Fixed-length vectors are converted to their scalable-vector container | |||
4654 | // types. | |||
4655 | SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG, | |||
4656 | unsigned RISCVISDOpc) const { | |||
4657 | SDLoc DL(Op); | |||
4658 | MVT VT = Op.getSimpleValueType(); | |||
4659 | SmallVector<SDValue, 4> Ops; | |||
4660 | ||||
4661 | for (const auto &OpIdx : enumerate(Op->ops())) { | |||
4662 | SDValue V = OpIdx.value(); | |||
4663 | assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!")(static_cast <bool> (!isa<VTSDNode>(V) && "Unexpected VTSDNode node!") ? void (0) : __assert_fail ("!isa<VTSDNode>(V) && \"Unexpected VTSDNode node!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4663, __extension__ __PRETTY_FUNCTION__)); | |||
4664 | // Pass through operands which aren't fixed-length vectors. | |||
4665 | if (!V.getValueType().isFixedLengthVector()) { | |||
4666 | Ops.push_back(V); | |||
4667 | continue; | |||
4668 | } | |||
4669 | // "cast" fixed length vector to a scalable vector. | |||
4670 | MVT OpVT = V.getSimpleValueType(); | |||
4671 | MVT ContainerVT = getContainerForFixedLengthVector(OpVT); | |||
4672 | assert(useRVVForFixedLengthVectorVT(OpVT) &&(static_cast <bool> (useRVVForFixedLengthVectorVT(OpVT) && "Only fixed length vectors are supported!") ? void (0) : __assert_fail ("useRVVForFixedLengthVectorVT(OpVT) && \"Only fixed length vectors are supported!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4673, __extension__ __PRETTY_FUNCTION__)) | |||
4673 | "Only fixed length vectors are supported!")(static_cast <bool> (useRVVForFixedLengthVectorVT(OpVT) && "Only fixed length vectors are supported!") ? void (0) : __assert_fail ("useRVVForFixedLengthVectorVT(OpVT) && \"Only fixed length vectors are supported!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4673, __extension__ __PRETTY_FUNCTION__)); | |||
4674 | Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget)); | |||
4675 | } | |||
4676 | ||||
4677 | if (!VT.isFixedLengthVector()) | |||
4678 | return DAG.getNode(RISCVISDOpc, DL, VT, Ops); | |||
4679 | ||||
4680 | MVT ContainerVT = getContainerForFixedLengthVector(VT); | |||
4681 | ||||
4682 | SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops); | |||
4683 | ||||
4684 | return convertFromScalableVector(VT, VPOp, DAG, Subtarget); | |||
4685 | } | |||
4686 | ||||
4687 | // Custom lower MGATHER to a legalized form for RVV. It will then be matched to | |||
4688 | // a RVV indexed load. The RVV indexed load instructions only support the | |||
4689 | // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or | |||
4690 | // truncated to XLEN and are treated as byte offsets. Any signed or scaled | |||
4691 | // indexing is extended to the XLEN value type and scaled accordingly. | |||
4692 | SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const { | |||
4693 | auto *MGN = cast<MaskedGatherSDNode>(Op.getNode()); | |||
4694 | SDLoc DL(Op); | |||
4695 | ||||
4696 | SDValue Index = MGN->getIndex(); | |||
4697 | SDValue Mask = MGN->getMask(); | |||
4698 | SDValue PassThru = MGN->getPassThru(); | |||
4699 | ||||
4700 | MVT VT = Op.getSimpleValueType(); | |||
4701 | MVT IndexVT = Index.getSimpleValueType(); | |||
4702 | MVT XLenVT = Subtarget.getXLenVT(); | |||
4703 | ||||
4704 | assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&(static_cast <bool> (VT.getVectorElementCount() == IndexVT .getVectorElementCount() && "Unexpected VTs!") ? void (0) : __assert_fail ("VT.getVectorElementCount() == IndexVT.getVectorElementCount() && \"Unexpected VTs!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4705, __extension__ __PRETTY_FUNCTION__)) | |||
4705 | "Unexpected VTs!")(static_cast <bool> (VT.getVectorElementCount() == IndexVT .getVectorElementCount() && "Unexpected VTs!") ? void (0) : __assert_fail ("VT.getVectorElementCount() == IndexVT.getVectorElementCount() && \"Unexpected VTs!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4705, __extension__ __PRETTY_FUNCTION__)); | |||
4706 | assert(MGN->getBasePtr().getSimpleValueType() == XLenVT &&(static_cast <bool> (MGN->getBasePtr().getSimpleValueType () == XLenVT && "Unexpected pointer type") ? void (0) : __assert_fail ("MGN->getBasePtr().getSimpleValueType() == XLenVT && \"Unexpected pointer type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4707, __extension__ __PRETTY_FUNCTION__)) | |||
4707 | "Unexpected pointer type")(static_cast <bool> (MGN->getBasePtr().getSimpleValueType () == XLenVT && "Unexpected pointer type") ? void (0) : __assert_fail ("MGN->getBasePtr().getSimpleValueType() == XLenVT && \"Unexpected pointer type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4707, __extension__ __PRETTY_FUNCTION__)); | |||
4708 | // Targets have to explicitly opt-in for extending vector loads. | |||
4709 | assert(MGN->getExtensionType() == ISD::NON_EXTLOAD &&(static_cast <bool> (MGN->getExtensionType() == ISD:: NON_EXTLOAD && "Unexpected extending MGATHER") ? void (0) : __assert_fail ("MGN->getExtensionType() == ISD::NON_EXTLOAD && \"Unexpected extending MGATHER\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4710, __extension__ __PRETTY_FUNCTION__)) | |||
4710 | "Unexpected extending MGATHER")(static_cast <bool> (MGN->getExtensionType() == ISD:: NON_EXTLOAD && "Unexpected extending MGATHER") ? void (0) : __assert_fail ("MGN->getExtensionType() == ISD::NON_EXTLOAD && \"Unexpected extending MGATHER\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4710, __extension__ __PRETTY_FUNCTION__)); | |||
4711 | ||||
4712 | // If the mask is known to be all ones, optimize to an unmasked intrinsic; | |||
4713 | // the selection of the masked intrinsics doesn't do this for us. | |||
4714 | bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); | |||
4715 | ||||
4716 | SDValue VL; | |||
4717 | MVT ContainerVT = VT; | |||
4718 | if (VT.isFixedLengthVector()) { | |||
4719 | // We need to use the larger of the result and index type to determine the | |||
4720 | // scalable type to use so we don't increase LMUL for any operand/result. | |||
4721 | if (VT.bitsGE(IndexVT)) { | |||
4722 | ContainerVT = getContainerForFixedLengthVector(VT); | |||
4723 | IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), | |||
4724 | ContainerVT.getVectorElementCount()); | |||
4725 | } else { | |||
4726 | IndexVT = getContainerForFixedLengthVector(IndexVT); | |||
4727 | ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(), | |||
4728 | IndexVT.getVectorElementCount()); | |||
4729 | } | |||
4730 | ||||
4731 | Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget); | |||
4732 | ||||
4733 | if (!IsUnmasked) { | |||
4734 | MVT MaskVT = | |||
4735 | MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); | |||
4736 | Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); | |||
4737 | PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget); | |||
4738 | } | |||
4739 | ||||
4740 | VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); | |||
4741 | } else | |||
4742 | VL = DAG.getRegister(RISCV::X0, XLenVT); | |||
4743 | ||||
4744 | unsigned IntID = | |||
4745 | IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask; | |||
4746 | SmallVector<SDValue, 8> Ops{MGN->getChain(), | |||
4747 | DAG.getTargetConstant(IntID, DL, XLenVT)}; | |||
4748 | if (!IsUnmasked) | |||
4749 | Ops.push_back(PassThru); | |||
4750 | Ops.push_back(MGN->getBasePtr()); | |||
4751 | Ops.push_back(Index); | |||
4752 | if (!IsUnmasked) | |||
4753 | Ops.push_back(Mask); | |||
4754 | Ops.push_back(VL); | |||
4755 | ||||
4756 | SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); | |||
4757 | SDValue Result = | |||
4758 | DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, | |||
4759 | MGN->getMemoryVT(), MGN->getMemOperand()); | |||
4760 | SDValue Chain = Result.getValue(1); | |||
4761 | ||||
4762 | if (VT.isFixedLengthVector()) | |||
4763 | Result = convertFromScalableVector(VT, Result, DAG, Subtarget); | |||
4764 | ||||
4765 | return DAG.getMergeValues({Result, Chain}, DL); | |||
4766 | } | |||
4767 | ||||
4768 | // Custom lower MSCATTER to a legalized form for RVV. It will then be matched to | |||
4769 | // a RVV indexed store. The RVV indexed store instructions only support the | |||
4770 | // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or | |||
4771 | // truncated to XLEN and are treated as byte offsets. Any signed or scaled | |||
4772 | // indexing is extended to the XLEN value type and scaled accordingly. | |||
4773 | SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op, | |||
4774 | SelectionDAG &DAG) const { | |||
4775 | auto *MSN = cast<MaskedScatterSDNode>(Op.getNode()); | |||
4776 | SDLoc DL(Op); | |||
4777 | SDValue Index = MSN->getIndex(); | |||
4778 | SDValue Mask = MSN->getMask(); | |||
4779 | SDValue Val = MSN->getValue(); | |||
4780 | ||||
4781 | MVT VT = Val.getSimpleValueType(); | |||
4782 | MVT IndexVT = Index.getSimpleValueType(); | |||
4783 | MVT XLenVT = Subtarget.getXLenVT(); | |||
4784 | ||||
4785 | assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&(static_cast <bool> (VT.getVectorElementCount() == IndexVT .getVectorElementCount() && "Unexpected VTs!") ? void (0) : __assert_fail ("VT.getVectorElementCount() == IndexVT.getVectorElementCount() && \"Unexpected VTs!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4786, __extension__ __PRETTY_FUNCTION__)) | |||
4786 | "Unexpected VTs!")(static_cast <bool> (VT.getVectorElementCount() == IndexVT .getVectorElementCount() && "Unexpected VTs!") ? void (0) : __assert_fail ("VT.getVectorElementCount() == IndexVT.getVectorElementCount() && \"Unexpected VTs!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4786, __extension__ __PRETTY_FUNCTION__)); | |||
4787 | assert(MSN->getBasePtr().getSimpleValueType() == XLenVT &&(static_cast <bool> (MSN->getBasePtr().getSimpleValueType () == XLenVT && "Unexpected pointer type") ? void (0) : __assert_fail ("MSN->getBasePtr().getSimpleValueType() == XLenVT && \"Unexpected pointer type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4788, __extension__ __PRETTY_FUNCTION__)) | |||
4788 | "Unexpected pointer type")(static_cast <bool> (MSN->getBasePtr().getSimpleValueType () == XLenVT && "Unexpected pointer type") ? void (0) : __assert_fail ("MSN->getBasePtr().getSimpleValueType() == XLenVT && \"Unexpected pointer type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4788, __extension__ __PRETTY_FUNCTION__)); | |||
4789 | // Targets have to explicitly opt-in for extending vector loads and | |||
4790 | // truncating vector stores. | |||
4791 | assert(!MSN->isTruncatingStore() && "Unexpected extending MSCATTER")(static_cast <bool> (!MSN->isTruncatingStore() && "Unexpected extending MSCATTER") ? void (0) : __assert_fail ( "!MSN->isTruncatingStore() && \"Unexpected extending MSCATTER\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4791, __extension__ __PRETTY_FUNCTION__)); | |||
4792 | ||||
4793 | // If the mask is known to be all ones, optimize to an unmasked intrinsic; | |||
4794 | // the selection of the masked intrinsics doesn't do this for us. | |||
4795 | bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); | |||
4796 | ||||
4797 | SDValue VL; | |||
4798 | if (VT.isFixedLengthVector()) { | |||
4799 | // We need to use the larger of the value and index type to determine the | |||
4800 | // scalable type to use so we don't increase LMUL for any operand/result. | |||
4801 | MVT ContainerVT; | |||
4802 | if (VT.bitsGE(IndexVT)) { | |||
4803 | ContainerVT = getContainerForFixedLengthVector(VT); | |||
4804 | IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), | |||
4805 | ContainerVT.getVectorElementCount()); | |||
4806 | } else { | |||
4807 | IndexVT = getContainerForFixedLengthVector(IndexVT); | |||
4808 | ContainerVT = MVT::getVectorVT(VT.getVectorElementType(), | |||
4809 | IndexVT.getVectorElementCount()); | |||
4810 | } | |||
4811 | ||||
4812 | Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget); | |||
4813 | Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget); | |||
4814 | ||||
4815 | if (!IsUnmasked) { | |||
4816 | MVT MaskVT = | |||
4817 | MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); | |||
4818 | Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); | |||
4819 | } | |||
4820 | ||||
4821 | VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); | |||
4822 | } else | |||
4823 | VL = DAG.getRegister(RISCV::X0, XLenVT); | |||
4824 | ||||
4825 | unsigned IntID = | |||
4826 | IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask; | |||
4827 | SmallVector<SDValue, 8> Ops{MSN->getChain(), | |||
4828 | DAG.getTargetConstant(IntID, DL, XLenVT)}; | |||
4829 | Ops.push_back(Val); | |||
4830 | Ops.push_back(MSN->getBasePtr()); | |||
4831 | Ops.push_back(Index); | |||
4832 | if (!IsUnmasked) | |||
4833 | Ops.push_back(Mask); | |||
4834 | Ops.push_back(VL); | |||
4835 | ||||
4836 | return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, MSN->getVTList(), Ops, | |||
4837 | MSN->getMemoryVT(), MSN->getMemOperand()); | |||
4838 | } | |||
4839 | ||||
4840 | SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op, | |||
4841 | SelectionDAG &DAG) const { | |||
4842 | const MVT XLenVT = Subtarget.getXLenVT(); | |||
4843 | SDLoc DL(Op); | |||
4844 | SDValue Chain = Op->getOperand(0); | |||
4845 | SDValue SysRegNo = DAG.getConstant( | |||
4846 | RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT); | |||
4847 | SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other); | |||
4848 | SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo); | |||
4849 | ||||
4850 | // Encoding used for rounding mode in RISCV differs from that used in | |||
4851 | // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a | |||
4852 | // table, which consists of a sequence of 4-bit fields, each representing | |||
4853 | // corresponding FLT_ROUNDS mode. | |||
4854 | static const int Table = | |||
4855 | (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) | | |||
4856 | (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) | | |||
4857 | (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) | | |||
4858 | (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) | | |||
4859 | (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM); | |||
4860 | ||||
4861 | SDValue Shift = | |||
4862 | DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT)); | |||
4863 | SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT, | |||
4864 | DAG.getConstant(Table, DL, XLenVT), Shift); | |||
4865 | SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted, | |||
4866 | DAG.getConstant(7, DL, XLenVT)); | |||
4867 | ||||
4868 | return DAG.getMergeValues({Masked, Chain}, DL); | |||
4869 | } | |||
4870 | ||||
4871 | SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op, | |||
4872 | SelectionDAG &DAG) const { | |||
4873 | const MVT XLenVT = Subtarget.getXLenVT(); | |||
4874 | SDLoc DL(Op); | |||
4875 | SDValue Chain = Op->getOperand(0); | |||
4876 | SDValue RMValue = Op->getOperand(1); | |||
4877 | SDValue SysRegNo = DAG.getConstant( | |||
4878 | RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT); | |||
4879 | ||||
4880 | // Encoding used for rounding mode in RISCV differs from that used in | |||
4881 | // FLT_ROUNDS. To convert it the C rounding mode is used as an index in | |||
4882 | // a table, which consists of a sequence of 4-bit fields, each representing | |||
4883 | // corresponding RISCV mode. | |||
4884 | static const unsigned Table = | |||
4885 | (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) | | |||
4886 | (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) | | |||
4887 | (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) | | |||
4888 | (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) | | |||
4889 | (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway)); | |||
4890 | ||||
4891 | SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue, | |||
4892 | DAG.getConstant(2, DL, XLenVT)); | |||
4893 | SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT, | |||
4894 | DAG.getConstant(Table, DL, XLenVT), Shift); | |||
4895 | RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted, | |||
4896 | DAG.getConstant(0x7, DL, XLenVT)); | |||
4897 | return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo, | |||
4898 | RMValue); | |||
4899 | } | |||
4900 | ||||
4901 | // Returns the opcode of the target-specific SDNode that implements the 32-bit | |||
4902 | // form of the given Opcode. | |||
4903 | static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { | |||
4904 | switch (Opcode) { | |||
4905 | default: | |||
4906 | llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4906); | |||
4907 | case ISD::SHL: | |||
4908 | return RISCVISD::SLLW; | |||
4909 | case ISD::SRA: | |||
4910 | return RISCVISD::SRAW; | |||
4911 | case ISD::SRL: | |||
4912 | return RISCVISD::SRLW; | |||
4913 | case ISD::SDIV: | |||
4914 | return RISCVISD::DIVW; | |||
4915 | case ISD::UDIV: | |||
4916 | return RISCVISD::DIVUW; | |||
4917 | case ISD::UREM: | |||
4918 | return RISCVISD::REMUW; | |||
4919 | case ISD::ROTL: | |||
4920 | return RISCVISD::ROLW; | |||
4921 | case ISD::ROTR: | |||
4922 | return RISCVISD::RORW; | |||
4923 | case RISCVISD::GREV: | |||
4924 | return RISCVISD::GREVW; | |||
4925 | case RISCVISD::GORC: | |||
4926 | return RISCVISD::GORCW; | |||
4927 | } | |||
4928 | } | |||
4929 | ||||
4930 | // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG | |||
4931 | // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would | |||
4932 | // otherwise be promoted to i64, making it difficult to select the | |||
4933 | // SLLW/DIVUW/.../*W later one because the fact the operation was originally of | |||
4934 | // type i8/i16/i32 is lost. | |||
4935 | static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, | |||
4936 | unsigned ExtOpc = ISD::ANY_EXTEND) { | |||
4937 | SDLoc DL(N); | |||
4938 | RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); | |||
4939 | SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0)); | |||
4940 | SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1)); | |||
4941 | SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1); | |||
4942 | // ReplaceNodeResults requires we maintain the same type for the return value. | |||
4943 | return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes); | |||
4944 | } | |||
4945 | ||||
4946 | // Converts the given 32-bit operation to a i64 operation with signed extension | |||
4947 | // semantic to reduce the signed extension instructions. | |||
4948 | static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) { | |||
4949 | SDLoc DL(N); | |||
4950 | SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); | |||
4951 | SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); | |||
4952 | SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1); | |||
4953 | SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp, | |||
4954 | DAG.getValueType(MVT::i32)); | |||
4955 | return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); | |||
4956 | } | |||
4957 | ||||
4958 | void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, | |||
4959 | SmallVectorImpl<SDValue> &Results, | |||
4960 | SelectionDAG &DAG) const { | |||
4961 | SDLoc DL(N); | |||
4962 | switch (N->getOpcode()) { | |||
4963 | default: | |||
4964 | llvm_unreachable("Don't know how to custom type legalize this operation!")::llvm::llvm_unreachable_internal("Don't know how to custom type legalize this operation!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4964); | |||
4965 | case ISD::STRICT_FP_TO_SINT: | |||
4966 | case ISD::STRICT_FP_TO_UINT: | |||
4967 | case ISD::FP_TO_SINT: | |||
4968 | case ISD::FP_TO_UINT: { | |||
4969 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4970, __extension__ __PRETTY_FUNCTION__)) | |||
4970 | "Unexpected custom legalisation")(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 4970, __extension__ __PRETTY_FUNCTION__)); | |||
4971 | bool IsStrict = N->isStrictFPOpcode(); | |||
4972 | bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT || | |||
4973 | N->getOpcode() == ISD::STRICT_FP_TO_SINT; | |||
4974 | SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0); | |||
4975 | if (getTypeAction(*DAG.getContext(), Op0.getValueType()) != | |||
4976 | TargetLowering::TypeSoftenFloat) { | |||
4977 | // FIXME: Support strict FP. | |||
4978 | if (IsStrict) | |||
4979 | return; | |||
4980 | if (!isTypeLegal(Op0.getValueType())) | |||
4981 | return; | |||
4982 | unsigned Opc = | |||
4983 | IsSigned ? RISCVISD::FCVT_W_RTZ_RV64 : RISCVISD::FCVT_WU_RTZ_RV64; | |||
4984 | SDValue Res = DAG.getNode(Opc, DL, MVT::i64, Op0); | |||
4985 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); | |||
4986 | return; | |||
4987 | } | |||
4988 | // If the FP type needs to be softened, emit a library call using the 'si' | |||
4989 | // version. If we left it to default legalization we'd end up with 'di'. If | |||
4990 | // the FP type doesn't need to be softened just let generic type | |||
4991 | // legalization promote the result type. | |||
4992 | RTLIB::Libcall LC; | |||
4993 | if (IsSigned) | |||
4994 | LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0)); | |||
4995 | else | |||
4996 | LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0)); | |||
4997 | MakeLibCallOptions CallOptions; | |||
4998 | EVT OpVT = Op0.getValueType(); | |||
4999 | CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true); | |||
5000 | SDValue Chain = IsStrict ? N->getOperand(0) : SDValue(); | |||
5001 | SDValue Result; | |||
5002 | std::tie(Result, Chain) = | |||
5003 | makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain); | |||
5004 | Results.push_back(Result); | |||
5005 | if (IsStrict) | |||
5006 | Results.push_back(Chain); | |||
5007 | break; | |||
5008 | } | |||
5009 | case ISD::READCYCLECOUNTER: { | |||
5010 | assert(!Subtarget.is64Bit() &&(static_cast <bool> (!Subtarget.is64Bit() && "READCYCLECOUNTER only has custom type legalization on riscv32" ) ? void (0) : __assert_fail ("!Subtarget.is64Bit() && \"READCYCLECOUNTER only has custom type legalization on riscv32\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5011, __extension__ __PRETTY_FUNCTION__)) | |||
5011 | "READCYCLECOUNTER only has custom type legalization on riscv32")(static_cast <bool> (!Subtarget.is64Bit() && "READCYCLECOUNTER only has custom type legalization on riscv32" ) ? void (0) : __assert_fail ("!Subtarget.is64Bit() && \"READCYCLECOUNTER only has custom type legalization on riscv32\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5011, __extension__ __PRETTY_FUNCTION__)); | |||
5012 | ||||
5013 | SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); | |||
5014 | SDValue RCW = | |||
5015 | DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0)); | |||
5016 | ||||
5017 | Results.push_back( | |||
5018 | DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1))); | |||
5019 | Results.push_back(RCW.getValue(2)); | |||
5020 | break; | |||
5021 | } | |||
5022 | case ISD::MUL: { | |||
5023 | unsigned Size = N->getSimpleValueType(0).getSizeInBits(); | |||
5024 | unsigned XLen = Subtarget.getXLen(); | |||
5025 | // This multiply needs to be expanded, try to use MULHSU+MUL if possible. | |||
5026 | if (Size > XLen) { | |||
5027 | assert(Size == (XLen * 2) && "Unexpected custom legalisation")(static_cast <bool> (Size == (XLen * 2) && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("Size == (XLen * 2) && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5027, __extension__ __PRETTY_FUNCTION__)); | |||
5028 | SDValue LHS = N->getOperand(0); | |||
5029 | SDValue RHS = N->getOperand(1); | |||
5030 | APInt HighMask = APInt::getHighBitsSet(Size, XLen); | |||
5031 | ||||
5032 | bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask); | |||
5033 | bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask); | |||
5034 | // We need exactly one side to be unsigned. | |||
5035 | if (LHSIsU == RHSIsU) | |||
5036 | return; | |||
5037 | ||||
5038 | auto MakeMULPair = [&](SDValue S, SDValue U) { | |||
5039 | MVT XLenVT = Subtarget.getXLenVT(); | |||
5040 | S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S); | |||
5041 | U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U); | |||
5042 | SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U); | |||
5043 | SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U); | |||
5044 | return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi); | |||
5045 | }; | |||
5046 | ||||
5047 | bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen; | |||
5048 | bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen; | |||
5049 | ||||
5050 | // The other operand should be signed, but still prefer MULH when | |||
5051 | // possible. | |||
5052 | if (RHSIsU && LHSIsS && !RHSIsS) | |||
5053 | Results.push_back(MakeMULPair(LHS, RHS)); | |||
5054 | else if (LHSIsU && RHSIsS && !LHSIsS) | |||
5055 | Results.push_back(MakeMULPair(RHS, LHS)); | |||
5056 | ||||
5057 | return; | |||
5058 | } | |||
5059 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
5060 | } | |||
5061 | case ISD::ADD: | |||
5062 | case ISD::SUB: | |||
5063 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5064, __extension__ __PRETTY_FUNCTION__)) | |||
5064 | "Unexpected custom legalisation")(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5064, __extension__ __PRETTY_FUNCTION__)); | |||
5065 | Results.push_back(customLegalizeToWOpWithSExt(N, DAG)); | |||
5066 | break; | |||
5067 | case ISD::SHL: | |||
5068 | case ISD::SRA: | |||
5069 | case ISD::SRL: | |||
5070 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5071, __extension__ __PRETTY_FUNCTION__)) | |||
5071 | "Unexpected custom legalisation")(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5071, __extension__ __PRETTY_FUNCTION__)); | |||
5072 | if (N->getOperand(1).getOpcode() != ISD::Constant) { | |||
5073 | Results.push_back(customLegalizeToWOp(N, DAG)); | |||
5074 | break; | |||
5075 | } | |||
5076 | ||||
5077 | // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is | |||
5078 | // similar to customLegalizeToWOpWithSExt, but we must zero_extend the | |||
5079 | // shift amount. | |||
5080 | if (N->getOpcode() == ISD::SHL) { | |||
5081 | SDLoc DL(N); | |||
5082 | SDValue NewOp0 = | |||
5083 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); | |||
5084 | SDValue NewOp1 = | |||
5085 | DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1)); | |||
5086 | SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1); | |||
5087 | SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp, | |||
5088 | DAG.getValueType(MVT::i32)); | |||
5089 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes)); | |||
5090 | } | |||
5091 | ||||
5092 | break; | |||
5093 | case ISD::ROTL: | |||
5094 | case ISD::ROTR: | |||
5095 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5096, __extension__ __PRETTY_FUNCTION__)) | |||
5096 | "Unexpected custom legalisation")(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5096, __extension__ __PRETTY_FUNCTION__)); | |||
5097 | Results.push_back(customLegalizeToWOp(N, DAG)); | |||
5098 | break; | |||
5099 | case ISD::CTTZ: | |||
5100 | case ISD::CTTZ_ZERO_UNDEF: | |||
5101 | case ISD::CTLZ: | |||
5102 | case ISD::CTLZ_ZERO_UNDEF: { | |||
5103 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5104, __extension__ __PRETTY_FUNCTION__)) | |||
5104 | "Unexpected custom legalisation")(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5104, __extension__ __PRETTY_FUNCTION__)); | |||
5105 | ||||
5106 | SDValue NewOp0 = | |||
5107 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); | |||
5108 | bool IsCTZ = | |||
5109 | N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF; | |||
5110 | unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW; | |||
5111 | SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0); | |||
5112 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); | |||
5113 | return; | |||
5114 | } | |||
5115 | case ISD::SDIV: | |||
5116 | case ISD::UDIV: | |||
5117 | case ISD::UREM: { | |||
5118 | MVT VT = N->getSimpleValueType(0); | |||
5119 | assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&(static_cast <bool> ((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && Subtarget.is64Bit() && Subtarget .hasStdExtM() && "Unexpected custom legalisation") ? void (0) : __assert_fail ("(VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && Subtarget.is64Bit() && Subtarget.hasStdExtM() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5121, __extension__ __PRETTY_FUNCTION__)) | |||
5120 | Subtarget.is64Bit() && Subtarget.hasStdExtM() &&(static_cast <bool> ((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && Subtarget.is64Bit() && Subtarget .hasStdExtM() && "Unexpected custom legalisation") ? void (0) : __assert_fail ("(VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && Subtarget.is64Bit() && Subtarget.hasStdExtM() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5121, __extension__ __PRETTY_FUNCTION__)) | |||
5121 | "Unexpected custom legalisation")(static_cast <bool> ((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && Subtarget.is64Bit() && Subtarget .hasStdExtM() && "Unexpected custom legalisation") ? void (0) : __assert_fail ("(VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && Subtarget.is64Bit() && Subtarget.hasStdExtM() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5121, __extension__ __PRETTY_FUNCTION__)); | |||
5122 | // Don't promote division/remainder by constant since we should expand those | |||
5123 | // to multiply by magic constant. | |||
5124 | // FIXME: What if the expansion is disabled for minsize. | |||
5125 | if (N->getOperand(1).getOpcode() == ISD::Constant) | |||
5126 | return; | |||
5127 | ||||
5128 | // If the input is i32, use ANY_EXTEND since the W instructions don't read | |||
5129 | // the upper 32 bits. For other types we need to sign or zero extend | |||
5130 | // based on the opcode. | |||
5131 | unsigned ExtOpc = ISD::ANY_EXTEND; | |||
5132 | if (VT != MVT::i32) | |||
5133 | ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND | |||
5134 | : ISD::ZERO_EXTEND; | |||
5135 | ||||
5136 | Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc)); | |||
5137 | break; | |||
5138 | } | |||
5139 | case ISD::UADDO: | |||
5140 | case ISD::USUBO: { | |||
5141 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5142, __extension__ __PRETTY_FUNCTION__)) | |||
5142 | "Unexpected custom legalisation")(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5142, __extension__ __PRETTY_FUNCTION__)); | |||
5143 | bool IsAdd = N->getOpcode() == ISD::UADDO; | |||
5144 | // Create an ADDW or SUBW. | |||
5145 | SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); | |||
5146 | SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); | |||
5147 | SDValue Res = | |||
5148 | DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS); | |||
5149 | Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res, | |||
5150 | DAG.getValueType(MVT::i32)); | |||
5151 | ||||
5152 | // Sign extend the LHS and perform an unsigned compare with the ADDW result. | |||
5153 | // Since the inputs are sign extended from i32, this is equivalent to | |||
5154 | // comparing the lower 32 bits. | |||
5155 | LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0)); | |||
5156 | SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS, | |||
5157 | IsAdd ? ISD::SETULT : ISD::SETUGT); | |||
5158 | ||||
5159 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); | |||
5160 | Results.push_back(Overflow); | |||
5161 | return; | |||
5162 | } | |||
5163 | case ISD::UADDSAT: | |||
5164 | case ISD::USUBSAT: { | |||
5165 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5166, __extension__ __PRETTY_FUNCTION__)) | |||
5166 | "Unexpected custom legalisation")(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5166, __extension__ __PRETTY_FUNCTION__)); | |||
5167 | if (Subtarget.hasStdExtZbb()) { | |||
5168 | // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using | |||
5169 | // sign extend allows overflow of the lower 32 bits to be detected on | |||
5170 | // the promoted size. | |||
5171 | SDValue LHS = | |||
5172 | DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0)); | |||
5173 | SDValue RHS = | |||
5174 | DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1)); | |||
5175 | SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS); | |||
5176 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); | |||
5177 | return; | |||
5178 | } | |||
5179 | ||||
5180 | // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom | |||
5181 | // promotion for UADDO/USUBO. | |||
5182 | Results.push_back(expandAddSubSat(N, DAG)); | |||
5183 | return; | |||
5184 | } | |||
5185 | case ISD::BITCAST: { | |||
5186 | EVT VT = N->getValueType(0); | |||
5187 | assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!")(static_cast <bool> (VT.isInteger() && !VT.isVector () && "Unexpected VT!") ? void (0) : __assert_fail ("VT.isInteger() && !VT.isVector() && \"Unexpected VT!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5187, __extension__ __PRETTY_FUNCTION__)); | |||
5188 | SDValue Op0 = N->getOperand(0); | |||
5189 | EVT Op0VT = Op0.getValueType(); | |||
5190 | MVT XLenVT = Subtarget.getXLenVT(); | |||
5191 | if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) { | |||
5192 | SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0); | |||
5193 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv)); | |||
5194 | } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() && | |||
5195 | Subtarget.hasStdExtF()) { | |||
5196 | SDValue FPConv = | |||
5197 | DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0); | |||
5198 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv)); | |||
5199 | } else if (!VT.isVector() && Op0VT.isFixedLengthVector() && | |||
5200 | isTypeLegal(Op0VT)) { | |||
5201 | // Custom-legalize bitcasts from fixed-length vector types to illegal | |||
5202 | // scalar types in order to improve codegen. Bitcast the vector to a | |||
5203 | // one-element vector type whose element type is the same as the result | |||
5204 | // type, and extract the first element. | |||
5205 | LLVMContext &Context = *DAG.getContext(); | |||
5206 | SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0); | |||
5207 | Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec, | |||
5208 | DAG.getConstant(0, DL, XLenVT))); | |||
5209 | } | |||
5210 | break; | |||
5211 | } | |||
5212 | case RISCVISD::GREV: | |||
5213 | case RISCVISD::GORC: { | |||
5214 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5215, __extension__ __PRETTY_FUNCTION__)) | |||
5215 | "Unexpected custom legalisation")(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5215, __extension__ __PRETTY_FUNCTION__)); | |||
5216 | assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant")(static_cast <bool> (isa<ConstantSDNode>(N->getOperand (1)) && "Expected constant") ? void (0) : __assert_fail ("isa<ConstantSDNode>(N->getOperand(1)) && \"Expected constant\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5216, __extension__ __PRETTY_FUNCTION__)); | |||
5217 | // This is similar to customLegalizeToWOp, except that we pass the second | |||
5218 | // operand (a TargetConstant) straight through: it is already of type | |||
5219 | // XLenVT. | |||
5220 | RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); | |||
5221 | SDValue NewOp0 = | |||
5222 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); | |||
5223 | SDValue NewOp1 = | |||
5224 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); | |||
5225 | SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1); | |||
5226 | // ReplaceNodeResults requires we maintain the same type for the return | |||
5227 | // value. | |||
5228 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes)); | |||
5229 | break; | |||
5230 | } | |||
5231 | case RISCVISD::SHFL: { | |||
5232 | // There is no SHFLIW instruction, but we can just promote the operation. | |||
5233 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5234, __extension__ __PRETTY_FUNCTION__)) | |||
5234 | "Unexpected custom legalisation")(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5234, __extension__ __PRETTY_FUNCTION__)); | |||
5235 | assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant")(static_cast <bool> (isa<ConstantSDNode>(N->getOperand (1)) && "Expected constant") ? void (0) : __assert_fail ("isa<ConstantSDNode>(N->getOperand(1)) && \"Expected constant\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5235, __extension__ __PRETTY_FUNCTION__)); | |||
5236 | SDValue NewOp0 = | |||
5237 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); | |||
5238 | SDValue NewOp1 = | |||
5239 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); | |||
5240 | SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1); | |||
5241 | // ReplaceNodeResults requires we maintain the same type for the return | |||
5242 | // value. | |||
5243 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes)); | |||
5244 | break; | |||
5245 | } | |||
5246 | case ISD::BSWAP: | |||
5247 | case ISD::BITREVERSE: { | |||
5248 | MVT VT = N->getSimpleValueType(0); | |||
5249 | MVT XLenVT = Subtarget.getXLenVT(); | |||
5250 | assert((VT == MVT::i8 || VT == MVT::i16 ||(static_cast <bool> ((VT == MVT::i8 || VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) && Subtarget .hasStdExtZbp() && "Unexpected custom legalisation") ? void (0) : __assert_fail ("(VT == MVT::i8 || VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) && Subtarget.hasStdExtZbp() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5252, __extension__ __PRETTY_FUNCTION__)) | |||
5251 | (VT == MVT::i32 && Subtarget.is64Bit())) &&(static_cast <bool> ((VT == MVT::i8 || VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) && Subtarget .hasStdExtZbp() && "Unexpected custom legalisation") ? void (0) : __assert_fail ("(VT == MVT::i8 || VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) && Subtarget.hasStdExtZbp() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5252, __extension__ __PRETTY_FUNCTION__)) | |||
5252 | Subtarget.hasStdExtZbp() && "Unexpected custom legalisation")(static_cast <bool> ((VT == MVT::i8 || VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) && Subtarget .hasStdExtZbp() && "Unexpected custom legalisation") ? void (0) : __assert_fail ("(VT == MVT::i8 || VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) && Subtarget.hasStdExtZbp() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5252, __extension__ __PRETTY_FUNCTION__)); | |||
5253 | SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0)); | |||
5254 | unsigned Imm = VT.getSizeInBits() - 1; | |||
5255 | // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits. | |||
5256 | if (N->getOpcode() == ISD::BSWAP) | |||
5257 | Imm &= ~0x7U; | |||
5258 | unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV; | |||
5259 | SDValue GREVI = | |||
5260 | DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT)); | |||
5261 | // ReplaceNodeResults requires we maintain the same type for the return | |||
5262 | // value. | |||
5263 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI)); | |||
5264 | break; | |||
5265 | } | |||
5266 | case ISD::FSHL: | |||
5267 | case ISD::FSHR: { | |||
5268 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtZbt() && "Unexpected custom legalisation") ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtZbt() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5269, __extension__ __PRETTY_FUNCTION__)) | |||
5269 | Subtarget.hasStdExtZbt() && "Unexpected custom legalisation")(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtZbt() && "Unexpected custom legalisation") ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtZbt() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5269, __extension__ __PRETTY_FUNCTION__)); | |||
5270 | SDValue NewOp0 = | |||
5271 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); | |||
5272 | SDValue NewOp1 = | |||
5273 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); | |||
5274 | SDValue NewOp2 = | |||
5275 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); | |||
5276 | // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits. | |||
5277 | // Mask the shift amount to 5 bits. | |||
5278 | NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2, | |||
5279 | DAG.getConstant(0x1f, DL, MVT::i64)); | |||
5280 | unsigned Opc = | |||
5281 | N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW; | |||
5282 | SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2); | |||
5283 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp)); | |||
5284 | break; | |||
5285 | } | |||
5286 | case ISD::EXTRACT_VECTOR_ELT: { | |||
5287 | // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element | |||
5288 | // type is illegal (currently only vXi64 RV32). | |||
5289 | // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are | |||
5290 | // transferred to the destination register. We issue two of these from the | |||
5291 | // upper- and lower- halves of the SEW-bit vector element, slid down to the | |||
5292 | // first element. | |||
5293 | SDValue Vec = N->getOperand(0); | |||
5294 | SDValue Idx = N->getOperand(1); | |||
5295 | ||||
5296 | // The vector type hasn't been legalized yet so we can't issue target | |||
5297 | // specific nodes if it needs legalization. | |||
5298 | // FIXME: We would manually legalize if it's important. | |||
5299 | if (!isTypeLegal(Vec.getValueType())) | |||
5300 | return; | |||
5301 | ||||
5302 | MVT VecVT = Vec.getSimpleValueType(); | |||
5303 | ||||
5304 | assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&(static_cast <bool> (!Subtarget.is64Bit() && N-> getValueType(0) == MVT::i64 && VecVT.getVectorElementType () == MVT::i64 && "Unexpected EXTRACT_VECTOR_ELT legalization" ) ? void (0) : __assert_fail ("!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 && VecVT.getVectorElementType() == MVT::i64 && \"Unexpected EXTRACT_VECTOR_ELT legalization\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5306, __extension__ __PRETTY_FUNCTION__)) | |||
5305 | VecVT.getVectorElementType() == MVT::i64 &&(static_cast <bool> (!Subtarget.is64Bit() && N-> getValueType(0) == MVT::i64 && VecVT.getVectorElementType () == MVT::i64 && "Unexpected EXTRACT_VECTOR_ELT legalization" ) ? void (0) : __assert_fail ("!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 && VecVT.getVectorElementType() == MVT::i64 && \"Unexpected EXTRACT_VECTOR_ELT legalization\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5306, __extension__ __PRETTY_FUNCTION__)) | |||
5306 | "Unexpected EXTRACT_VECTOR_ELT legalization")(static_cast <bool> (!Subtarget.is64Bit() && N-> getValueType(0) == MVT::i64 && VecVT.getVectorElementType () == MVT::i64 && "Unexpected EXTRACT_VECTOR_ELT legalization" ) ? void (0) : __assert_fail ("!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 && VecVT.getVectorElementType() == MVT::i64 && \"Unexpected EXTRACT_VECTOR_ELT legalization\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5306, __extension__ __PRETTY_FUNCTION__)); | |||
5307 | ||||
5308 | // If this is a fixed vector, we need to convert it to a scalable vector. | |||
5309 | MVT ContainerVT = VecVT; | |||
5310 | if (VecVT.isFixedLengthVector()) { | |||
5311 | ContainerVT = getContainerForFixedLengthVector(VecVT); | |||
5312 | Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); | |||
5313 | } | |||
5314 | ||||
5315 | MVT XLenVT = Subtarget.getXLenVT(); | |||
5316 | ||||
5317 | // Use a VL of 1 to avoid processing more elements than we need. | |||
5318 | MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount()); | |||
5319 | SDValue VL = DAG.getConstant(1, DL, XLenVT); | |||
5320 | SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); | |||
5321 | ||||
5322 | // Unless the index is known to be 0, we must slide the vector down to get | |||
5323 | // the desired element into index 0. | |||
5324 | if (!isNullConstant(Idx)) { | |||
5325 | Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, | |||
5326 | DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL); | |||
5327 | } | |||
5328 | ||||
5329 | // Extract the lower XLEN bits of the correct vector element. | |||
5330 | SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); | |||
5331 | ||||
5332 | // To extract the upper XLEN bits of the vector element, shift the first | |||
5333 | // element right by 32 bits and re-extract the lower XLEN bits. | |||
5334 | SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, | |||
5335 | DAG.getConstant(32, DL, XLenVT), VL); | |||
5336 | SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec, | |||
5337 | ThirtyTwoV, Mask, VL); | |||
5338 | ||||
5339 | SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32); | |||
5340 | ||||
5341 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi)); | |||
5342 | break; | |||
5343 | } | |||
5344 | case ISD::INTRINSIC_WO_CHAIN: { | |||
5345 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); | |||
5346 | switch (IntNo) { | |||
5347 | default: | |||
5348 | llvm_unreachable(::llvm::llvm_unreachable_internal("Don't know how to custom type legalize this intrinsic!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5349) | |||
5349 | "Don't know how to custom type legalize this intrinsic!")::llvm::llvm_unreachable_internal("Don't know how to custom type legalize this intrinsic!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5349); | |||
5350 | case Intrinsic::riscv_orc_b: { | |||
5351 | // Lower to the GORCI encoding for orc.b with the operand extended. | |||
5352 | SDValue NewOp = | |||
5353 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); | |||
5354 | // If Zbp is enabled, use GORCIW which will sign extend the result. | |||
5355 | unsigned Opc = | |||
5356 | Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC; | |||
5357 | SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp, | |||
5358 | DAG.getConstant(7, DL, MVT::i64)); | |||
5359 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); | |||
5360 | return; | |||
5361 | } | |||
5362 | case Intrinsic::riscv_grev: | |||
5363 | case Intrinsic::riscv_gorc: { | |||
5364 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5365, __extension__ __PRETTY_FUNCTION__)) | |||
5365 | "Unexpected custom legalisation")(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5365, __extension__ __PRETTY_FUNCTION__)); | |||
5366 | SDValue NewOp1 = | |||
5367 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); | |||
5368 | SDValue NewOp2 = | |||
5369 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); | |||
5370 | unsigned Opc = | |||
5371 | IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW; | |||
5372 | SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2); | |||
5373 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); | |||
5374 | break; | |||
5375 | } | |||
5376 | case Intrinsic::riscv_shfl: | |||
5377 | case Intrinsic::riscv_unshfl: { | |||
5378 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5379, __extension__ __PRETTY_FUNCTION__)) | |||
5379 | "Unexpected custom legalisation")(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5379, __extension__ __PRETTY_FUNCTION__)); | |||
5380 | SDValue NewOp1 = | |||
5381 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); | |||
5382 | SDValue NewOp2 = | |||
5383 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); | |||
5384 | unsigned Opc = | |||
5385 | IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW; | |||
5386 | if (isa<ConstantSDNode>(N->getOperand(2))) { | |||
5387 | NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2, | |||
5388 | DAG.getConstant(0xf, DL, MVT::i64)); | |||
5389 | Opc = | |||
5390 | IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL; | |||
5391 | } | |||
5392 | SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2); | |||
5393 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); | |||
5394 | break; | |||
5395 | } | |||
5396 | case Intrinsic::riscv_bcompress: | |||
5397 | case Intrinsic::riscv_bdecompress: { | |||
5398 | assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5399, __extension__ __PRETTY_FUNCTION__)) | |||
5399 | "Unexpected custom legalisation")(static_cast <bool> (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5399, __extension__ __PRETTY_FUNCTION__)); | |||
5400 | SDValue NewOp1 = | |||
5401 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); | |||
5402 | SDValue NewOp2 = | |||
5403 | DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); | |||
5404 | unsigned Opc = IntNo == Intrinsic::riscv_bcompress | |||
5405 | ? RISCVISD::BCOMPRESSW | |||
5406 | : RISCVISD::BDECOMPRESSW; | |||
5407 | SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2); | |||
5408 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); | |||
5409 | break; | |||
5410 | } | |||
5411 | case Intrinsic::riscv_vmv_x_s: { | |||
5412 | EVT VT = N->getValueType(0); | |||
5413 | MVT XLenVT = Subtarget.getXLenVT(); | |||
5414 | if (VT.bitsLT(XLenVT)) { | |||
5415 | // Simple case just extract using vmv.x.s and truncate. | |||
5416 | SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL, | |||
5417 | Subtarget.getXLenVT(), N->getOperand(1)); | |||
5418 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract)); | |||
5419 | return; | |||
5420 | } | |||
5421 | ||||
5422 | assert(VT == MVT::i64 && !Subtarget.is64Bit() &&(static_cast <bool> (VT == MVT::i64 && !Subtarget .is64Bit() && "Unexpected custom legalization") ? void (0) : __assert_fail ("VT == MVT::i64 && !Subtarget.is64Bit() && \"Unexpected custom legalization\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5423, __extension__ __PRETTY_FUNCTION__)) | |||
5423 | "Unexpected custom legalization")(static_cast <bool> (VT == MVT::i64 && !Subtarget .is64Bit() && "Unexpected custom legalization") ? void (0) : __assert_fail ("VT == MVT::i64 && !Subtarget.is64Bit() && \"Unexpected custom legalization\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5423, __extension__ __PRETTY_FUNCTION__)); | |||
5424 | ||||
5425 | // We need to do the move in two steps. | |||
5426 | SDValue Vec = N->getOperand(1); | |||
5427 | MVT VecVT = Vec.getSimpleValueType(); | |||
5428 | ||||
5429 | // First extract the lower XLEN bits of the element. | |||
5430 | SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); | |||
5431 | ||||
5432 | // To extract the upper XLEN bits of the vector element, shift the first | |||
5433 | // element right by 32 bits and re-extract the lower XLEN bits. | |||
5434 | SDValue VL = DAG.getConstant(1, DL, XLenVT); | |||
5435 | MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount()); | |||
5436 | SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); | |||
5437 | SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, | |||
5438 | DAG.getConstant(32, DL, XLenVT), VL); | |||
5439 | SDValue LShr32 = | |||
5440 | DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL); | |||
5441 | SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32); | |||
5442 | ||||
5443 | Results.push_back( | |||
5444 | DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi)); | |||
5445 | break; | |||
5446 | } | |||
5447 | } | |||
5448 | break; | |||
5449 | } | |||
5450 | case ISD::VECREDUCE_ADD: | |||
5451 | case ISD::VECREDUCE_AND: | |||
5452 | case ISD::VECREDUCE_OR: | |||
5453 | case ISD::VECREDUCE_XOR: | |||
5454 | case ISD::VECREDUCE_SMAX: | |||
5455 | case ISD::VECREDUCE_UMAX: | |||
5456 | case ISD::VECREDUCE_SMIN: | |||
5457 | case ISD::VECREDUCE_UMIN: | |||
5458 | if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG)) | |||
5459 | Results.push_back(V); | |||
5460 | break; | |||
5461 | case ISD::FLT_ROUNDS_: { | |||
5462 | SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other); | |||
5463 | SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0)); | |||
5464 | Results.push_back(Res.getValue(0)); | |||
5465 | Results.push_back(Res.getValue(1)); | |||
5466 | break; | |||
5467 | } | |||
5468 | } | |||
5469 | } | |||
5470 | ||||
5471 | // A structure to hold one of the bit-manipulation patterns below. Together, a | |||
5472 | // SHL and non-SHL pattern may form a bit-manipulation pair on a single source: | |||
5473 | // (or (and (shl x, 1), 0xAAAAAAAA), | |||
5474 | // (and (srl x, 1), 0x55555555)) | |||
5475 | struct RISCVBitmanipPat { | |||
5476 | SDValue Op; | |||
5477 | unsigned ShAmt; | |||
5478 | bool IsSHL; | |||
5479 | ||||
5480 | bool formsPairWith(const RISCVBitmanipPat &Other) const { | |||
5481 | return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL; | |||
5482 | } | |||
5483 | }; | |||
5484 | ||||
5485 | // Matches patterns of the form | |||
5486 | // (and (shl x, C2), (C1 << C2)) | |||
5487 | // (and (srl x, C2), C1) | |||
5488 | // (shl (and x, C1), C2) | |||
5489 | // (srl (and x, (C1 << C2)), C2) | |||
5490 | // Where C2 is a power of 2 and C1 has at least that many leading zeroes. | |||
5491 | // The expected masks for each shift amount are specified in BitmanipMasks where | |||
5492 | // BitmanipMasks[log2(C2)] specifies the expected C1 value. | |||
5493 | // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether | |||
5494 | // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible | |||
5495 | // XLen is 64. | |||
5496 | static Optional<RISCVBitmanipPat> | |||
5497 | matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) { | |||
5498 | assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&(static_cast <bool> ((BitmanipMasks.size() == 5 || BitmanipMasks .size() == 6) && "Unexpected number of masks") ? void (0) : __assert_fail ("(BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) && \"Unexpected number of masks\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5499, __extension__ __PRETTY_FUNCTION__)) | |||
5499 | "Unexpected number of masks")(static_cast <bool> ((BitmanipMasks.size() == 5 || BitmanipMasks .size() == 6) && "Unexpected number of masks") ? void (0) : __assert_fail ("(BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) && \"Unexpected number of masks\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5499, __extension__ __PRETTY_FUNCTION__)); | |||
5500 | Optional<uint64_t> Mask; | |||
5501 | // Optionally consume a mask around the shift operation. | |||
5502 | if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) { | |||
5503 | Mask = Op.getConstantOperandVal(1); | |||
5504 | Op = Op.getOperand(0); | |||
5505 | } | |||
5506 | if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL) | |||
5507 | return None; | |||
5508 | bool IsSHL = Op.getOpcode() == ISD::SHL; | |||
5509 | ||||
5510 | if (!isa<ConstantSDNode>(Op.getOperand(1))) | |||
5511 | return None; | |||
5512 | uint64_t ShAmt = Op.getConstantOperandVal(1); | |||
5513 | ||||
5514 | unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32; | |||
5515 | if (ShAmt >= Width || !isPowerOf2_64(ShAmt)) | |||
5516 | return None; | |||
5517 | // If we don't have enough masks for 64 bit, then we must be trying to | |||
5518 | // match SHFL so we're only allowed to shift 1/4 of the width. | |||
5519 | if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2)) | |||
5520 | return None; | |||
5521 | ||||
5522 | SDValue Src = Op.getOperand(0); | |||
5523 | ||||
5524 | // The expected mask is shifted left when the AND is found around SHL | |||
5525 | // patterns. | |||
5526 | // ((x >> 1) & 0x55555555) | |||
5527 | // ((x << 1) & 0xAAAAAAAA) | |||
5528 | bool SHLExpMask = IsSHL; | |||
5529 | ||||
5530 | if (!Mask) { | |||
5531 | // Sometimes LLVM keeps the mask as an operand of the shift, typically when | |||
5532 | // the mask is all ones: consume that now. | |||
5533 | if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) { | |||
5534 | Mask = Src.getConstantOperandVal(1); | |||
5535 | Src = Src.getOperand(0); | |||
5536 | // The expected mask is now in fact shifted left for SRL, so reverse the | |||
5537 | // decision. | |||
5538 | // ((x & 0xAAAAAAAA) >> 1) | |||
5539 | // ((x & 0x55555555) << 1) | |||
5540 | SHLExpMask = !SHLExpMask; | |||
5541 | } else { | |||
5542 | // Use a default shifted mask of all-ones if there's no AND, truncated | |||
5543 | // down to the expected width. This simplifies the logic later on. | |||
5544 | Mask = maskTrailingOnes<uint64_t>(Width); | |||
5545 | *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt); | |||
5546 | } | |||
5547 | } | |||
5548 | ||||
5549 | unsigned MaskIdx = Log2_32(ShAmt); | |||
5550 | uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width); | |||
5551 | ||||
5552 | if (SHLExpMask) | |||
5553 | ExpMask <<= ShAmt; | |||
5554 | ||||
5555 | if (Mask != ExpMask) | |||
5556 | return None; | |||
5557 | ||||
5558 | return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL}; | |||
5559 | } | |||
5560 | ||||
5561 | // Matches any of the following bit-manipulation patterns: | |||
5562 | // (and (shl x, 1), (0x55555555 << 1)) | |||
5563 | // (and (srl x, 1), 0x55555555) | |||
5564 | // (shl (and x, 0x55555555), 1) | |||
5565 | // (srl (and x, (0x55555555 << 1)), 1) | |||
5566 | // where the shift amount and mask may vary thus: | |||
5567 | // [1] = 0x55555555 / 0xAAAAAAAA | |||
5568 | // [2] = 0x33333333 / 0xCCCCCCCC | |||
5569 | // [4] = 0x0F0F0F0F / 0xF0F0F0F0 | |||
5570 | // [8] = 0x00FF00FF / 0xFF00FF00 | |||
5571 | // [16] = 0x0000FFFF / 0xFFFFFFFF | |||
5572 | // [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64) | |||
5573 | static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) { | |||
5574 | // These are the unshifted masks which we use to match bit-manipulation | |||
5575 | // patterns. They may be shifted left in certain circumstances. | |||
5576 | static const uint64_t BitmanipMasks[] = { | |||
5577 | 0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL, | |||
5578 | 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL}; | |||
5579 | ||||
5580 | return matchRISCVBitmanipPat(Op, BitmanipMasks); | |||
5581 | } | |||
5582 | ||||
5583 | // Match the following pattern as a GREVI(W) operation | |||
5584 | // (or (BITMANIP_SHL x), (BITMANIP_SRL x)) | |||
5585 | static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG, | |||
5586 | const RISCVSubtarget &Subtarget) { | |||
5587 | assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson")(static_cast <bool> (Subtarget.hasStdExtZbp() && "Expected Zbp extenson") ? void (0) : __assert_fail ("Subtarget.hasStdExtZbp() && \"Expected Zbp extenson\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5587, __extension__ __PRETTY_FUNCTION__)); | |||
5588 | EVT VT = Op.getValueType(); | |||
5589 | ||||
5590 | if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { | |||
5591 | auto LHS = matchGREVIPat(Op.getOperand(0)); | |||
5592 | auto RHS = matchGREVIPat(Op.getOperand(1)); | |||
5593 | if (LHS && RHS && LHS->formsPairWith(*RHS)) { | |||
5594 | SDLoc DL(Op); | |||
5595 | return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op, | |||
5596 | DAG.getConstant(LHS->ShAmt, DL, VT)); | |||
5597 | } | |||
5598 | } | |||
5599 | return SDValue(); | |||
5600 | } | |||
5601 | ||||
5602 | // Matches any the following pattern as a GORCI(W) operation | |||
5603 | // 1. (or (GREVI x, shamt), x) if shamt is a power of 2 | |||
5604 | // 2. (or x, (GREVI x, shamt)) if shamt is a power of 2 | |||
5605 | // 3. (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x)) | |||
5606 | // Note that with the variant of 3., | |||
5607 | // (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x) | |||
5608 | // the inner pattern will first be matched as GREVI and then the outer | |||
5609 | // pattern will be matched to GORC via the first rule above. | |||
5610 | // 4. (or (rotl/rotr x, bitwidth/2), x) | |||
5611 | static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG, | |||
5612 | const RISCVSubtarget &Subtarget) { | |||
5613 | assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson")(static_cast <bool> (Subtarget.hasStdExtZbp() && "Expected Zbp extenson") ? void (0) : __assert_fail ("Subtarget.hasStdExtZbp() && \"Expected Zbp extenson\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5613, __extension__ __PRETTY_FUNCTION__)); | |||
5614 | EVT VT = Op.getValueType(); | |||
5615 | ||||
5616 | if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { | |||
5617 | SDLoc DL(Op); | |||
5618 | SDValue Op0 = Op.getOperand(0); | |||
5619 | SDValue Op1 = Op.getOperand(1); | |||
5620 | ||||
5621 | auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) { | |||
5622 | if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X && | |||
5623 | isa<ConstantSDNode>(Reverse.getOperand(1)) && | |||
5624 | isPowerOf2_32(Reverse.getConstantOperandVal(1))) | |||
5625 | return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1)); | |||
5626 | // We can also form GORCI from ROTL/ROTR by half the bitwidth. | |||
5627 | if ((Reverse.getOpcode() == ISD::ROTL || | |||
5628 | Reverse.getOpcode() == ISD::ROTR) && | |||
5629 | Reverse.getOperand(0) == X && | |||
5630 | isa<ConstantSDNode>(Reverse.getOperand(1))) { | |||
5631 | uint64_t RotAmt = Reverse.getConstantOperandVal(1); | |||
5632 | if (RotAmt == (VT.getSizeInBits() / 2)) | |||
5633 | return DAG.getNode(RISCVISD::GORC, DL, VT, X, | |||
5634 | DAG.getConstant(RotAmt, DL, VT)); | |||
5635 | } | |||
5636 | return SDValue(); | |||
5637 | }; | |||
5638 | ||||
5639 | // Check for either commutable permutation of (or (GREVI x, shamt), x) | |||
5640 | if (SDValue V = MatchOROfReverse(Op0, Op1)) | |||
5641 | return V; | |||
5642 | if (SDValue V = MatchOROfReverse(Op1, Op0)) | |||
5643 | return V; | |||
5644 | ||||
5645 | // OR is commutable so canonicalize its OR operand to the left | |||
5646 | if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR) | |||
5647 | std::swap(Op0, Op1); | |||
5648 | if (Op0.getOpcode() != ISD::OR) | |||
5649 | return SDValue(); | |||
5650 | SDValue OrOp0 = Op0.getOperand(0); | |||
5651 | SDValue OrOp1 = Op0.getOperand(1); | |||
5652 | auto LHS = matchGREVIPat(OrOp0); | |||
5653 | // OR is commutable so swap the operands and try again: x might have been | |||
5654 | // on the left | |||
5655 | if (!LHS) { | |||
5656 | std::swap(OrOp0, OrOp1); | |||
5657 | LHS = matchGREVIPat(OrOp0); | |||
5658 | } | |||
5659 | auto RHS = matchGREVIPat(Op1); | |||
5660 | if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) { | |||
5661 | return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op, | |||
5662 | DAG.getConstant(LHS->ShAmt, DL, VT)); | |||
5663 | } | |||
5664 | } | |||
5665 | return SDValue(); | |||
5666 | } | |||
5667 | ||||
5668 | // Matches any of the following bit-manipulation patterns: | |||
5669 | // (and (shl x, 1), (0x22222222 << 1)) | |||
5670 | // (and (srl x, 1), 0x22222222) | |||
5671 | // (shl (and x, 0x22222222), 1) | |||
5672 | // (srl (and x, (0x22222222 << 1)), 1) | |||
5673 | // where the shift amount and mask may vary thus: | |||
5674 | // [1] = 0x22222222 / 0x44444444 | |||
5675 | // [2] = 0x0C0C0C0C / 0x3C3C3C3C | |||
5676 | // [4] = 0x00F000F0 / 0x0F000F00 | |||
5677 | // [8] = 0x0000FF00 / 0x00FF0000 | |||
5678 | // [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64) | |||
5679 | static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) { | |||
5680 | // These are the unshifted masks which we use to match bit-manipulation | |||
5681 | // patterns. They may be shifted left in certain circumstances. | |||
5682 | static const uint64_t BitmanipMasks[] = { | |||
5683 | 0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL, | |||
5684 | 0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL}; | |||
5685 | ||||
5686 | return matchRISCVBitmanipPat(Op, BitmanipMasks); | |||
5687 | } | |||
5688 | ||||
5689 | // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x) | |||
5690 | static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG, | |||
5691 | const RISCVSubtarget &Subtarget) { | |||
5692 | assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson")(static_cast <bool> (Subtarget.hasStdExtZbp() && "Expected Zbp extenson") ? void (0) : __assert_fail ("Subtarget.hasStdExtZbp() && \"Expected Zbp extenson\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5692, __extension__ __PRETTY_FUNCTION__)); | |||
5693 | EVT VT = Op.getValueType(); | |||
5694 | ||||
5695 | if (VT != MVT::i32 && VT != Subtarget.getXLenVT()) | |||
5696 | return SDValue(); | |||
5697 | ||||
5698 | SDValue Op0 = Op.getOperand(0); | |||
5699 | SDValue Op1 = Op.getOperand(1); | |||
5700 | ||||
5701 | // Or is commutable so canonicalize the second OR to the LHS. | |||
5702 | if (Op0.getOpcode() != ISD::OR) | |||
5703 | std::swap(Op0, Op1); | |||
5704 | if (Op0.getOpcode() != ISD::OR) | |||
5705 | return SDValue(); | |||
5706 | ||||
5707 | // We found an inner OR, so our operands are the operands of the inner OR | |||
5708 | // and the other operand of the outer OR. | |||
5709 | SDValue A = Op0.getOperand(0); | |||
5710 | SDValue B = Op0.getOperand(1); | |||
5711 | SDValue C = Op1; | |||
5712 | ||||
5713 | auto Match1 = matchSHFLPat(A); | |||
5714 | auto Match2 = matchSHFLPat(B); | |||
5715 | ||||
5716 | // If neither matched, we failed. | |||
5717 | if (!Match1 && !Match2) | |||
5718 | return SDValue(); | |||
5719 | ||||
5720 | // We had at least one match. if one failed, try the remaining C operand. | |||
5721 | if (!Match1) { | |||
5722 | std::swap(A, C); | |||
5723 | Match1 = matchSHFLPat(A); | |||
5724 | if (!Match1) | |||
5725 | return SDValue(); | |||
5726 | } else if (!Match2) { | |||
5727 | std::swap(B, C); | |||
5728 | Match2 = matchSHFLPat(B); | |||
5729 | if (!Match2) | |||
5730 | return SDValue(); | |||
5731 | } | |||
5732 | assert(Match1 && Match2)(static_cast <bool> (Match1 && Match2) ? void ( 0) : __assert_fail ("Match1 && Match2", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 5732, __extension__ __PRETTY_FUNCTION__)); | |||
5733 | ||||
5734 | // Make sure our matches pair up. | |||
5735 | if (!Match1->formsPairWith(*Match2)) | |||
5736 | return SDValue(); | |||
5737 | ||||
5738 | // All the remains is to make sure C is an AND with the same input, that masks | |||
5739 | // out the bits that are being shuffled. | |||
5740 | if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) || | |||
5741 | C.getOperand(0) != Match1->Op) | |||
5742 | return SDValue(); | |||
5743 | ||||
5744 | uint64_t Mask = C.getConstantOperandVal(1); | |||
5745 | ||||
5746 | static const uint64_t BitmanipMasks[] = { | |||
5747 | 0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL, | |||
5748 | 0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL, | |||
5749 | }; | |||
5750 | ||||
5751 | unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32; | |||
5752 | unsigned MaskIdx = Log2_32(Match1->ShAmt); | |||
5753 | uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width); | |||
5754 | ||||
5755 | if (Mask != ExpMask) | |||
5756 | return SDValue(); | |||
5757 | ||||
5758 | SDLoc DL(Op); | |||
5759 | return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op, | |||
5760 | DAG.getConstant(Match1->ShAmt, DL, VT)); | |||
5761 | } | |||
5762 | ||||
5763 | // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is | |||
5764 | // non-zero, and to x when it is. Any repeated GREVI stage undoes itself. | |||
5765 | // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does | |||
5766 | // not undo itself, but they are redundant. | |||
5767 | static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) { | |||
5768 | SDValue Src = N->getOperand(0); | |||
5769 | ||||
5770 | if (Src.getOpcode() != N->getOpcode()) | |||
5771 | return SDValue(); | |||
5772 | ||||
5773 | if (!isa<ConstantSDNode>(N->getOperand(1)) || | |||
5774 | !isa<ConstantSDNode>(Src.getOperand(1))) | |||
5775 | return SDValue(); | |||
5776 | ||||
5777 | unsigned ShAmt1 = N->getConstantOperandVal(1); | |||
5778 | unsigned ShAmt2 = Src.getConstantOperandVal(1); | |||
5779 | Src = Src.getOperand(0); | |||
5780 | ||||
5781 | unsigned CombinedShAmt; | |||
5782 | if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW) | |||
5783 | CombinedShAmt = ShAmt1 | ShAmt2; | |||
5784 | else | |||
5785 | CombinedShAmt = ShAmt1 ^ ShAmt2; | |||
5786 | ||||
5787 | if (CombinedShAmt == 0) | |||
5788 | return Src; | |||
5789 | ||||
5790 | SDLoc DL(N); | |||
5791 | return DAG.getNode( | |||
5792 | N->getOpcode(), DL, N->getValueType(0), Src, | |||
5793 | DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType())); | |||
5794 | } | |||
5795 | ||||
5796 | // Combine a constant select operand into its use: | |||
5797 | // | |||
5798 | // (and (select cond, -1, c), x) | |||
5799 | // -> (select cond, x, (and x, c)) [AllOnes=1] | |||
5800 | // (or (select cond, 0, c), x) | |||
5801 | // -> (select cond, x, (or x, c)) [AllOnes=0] | |||
5802 | // (xor (select cond, 0, c), x) | |||
5803 | // -> (select cond, x, (xor x, c)) [AllOnes=0] | |||
5804 | // (add (select cond, 0, c), x) | |||
5805 | // -> (select cond, x, (add x, c)) [AllOnes=0] | |||
5806 | // (sub x, (select cond, 0, c)) | |||
5807 | // -> (select cond, x, (sub x, c)) [AllOnes=0] | |||
5808 | static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, | |||
5809 | SelectionDAG &DAG, bool AllOnes) { | |||
5810 | EVT VT = N->getValueType(0); | |||
5811 | ||||
5812 | // Skip vectors. | |||
5813 | if (VT.isVector()) | |||
5814 | return SDValue(); | |||
5815 | ||||
5816 | if ((Slct.getOpcode() != ISD::SELECT && | |||
5817 | Slct.getOpcode() != RISCVISD::SELECT_CC) || | |||
5818 | !Slct.hasOneUse()) | |||
5819 | return SDValue(); | |||
5820 | ||||
5821 | auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) { | |||
5822 | return AllOnes ? isAllOnesConstant(N) : isNullConstant(N); | |||
5823 | }; | |||
5824 | ||||
5825 | bool SwapSelectOps; | |||
5826 | unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0; | |||
5827 | SDValue TrueVal = Slct.getOperand(1 + OpOffset); | |||
5828 | SDValue FalseVal = Slct.getOperand(2 + OpOffset); | |||
5829 | SDValue NonConstantVal; | |||
5830 | if (isZeroOrAllOnes(TrueVal, AllOnes)) { | |||
5831 | SwapSelectOps = false; | |||
5832 | NonConstantVal = FalseVal; | |||
5833 | } else if (isZeroOrAllOnes(FalseVal, AllOnes)) { | |||
5834 | SwapSelectOps = true; | |||
5835 | NonConstantVal = TrueVal; | |||
5836 | } else | |||
5837 | return SDValue(); | |||
5838 | ||||
5839 | // Slct is now know to be the desired identity constant when CC is true. | |||
5840 | TrueVal = OtherOp; | |||
5841 | FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal); | |||
5842 | // Unless SwapSelectOps says the condition should be false. | |||
5843 | if (SwapSelectOps) | |||
5844 | std::swap(TrueVal, FalseVal); | |||
5845 | ||||
5846 | if (Slct.getOpcode() == RISCVISD::SELECT_CC) | |||
5847 | return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT, | |||
5848 | {Slct.getOperand(0), Slct.getOperand(1), | |||
5849 | Slct.getOperand(2), TrueVal, FalseVal}); | |||
5850 | ||||
5851 | return DAG.getNode(ISD::SELECT, SDLoc(N), VT, | |||
5852 | {Slct.getOperand(0), TrueVal, FalseVal}); | |||
5853 | } | |||
5854 | ||||
5855 | // Attempt combineSelectAndUse on each operand of a commutative operator N. | |||
5856 | static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG, | |||
5857 | bool AllOnes) { | |||
5858 | SDValue N0 = N->getOperand(0); | |||
5859 | SDValue N1 = N->getOperand(1); | |||
5860 | if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes)) | |||
5861 | return Result; | |||
5862 | if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes)) | |||
5863 | return Result; | |||
5864 | return SDValue(); | |||
5865 | } | |||
5866 | ||||
5867 | static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG) { | |||
5868 | // fold (add (select lhs, rhs, cc, 0, y), x) -> | |||
5869 | // (select lhs, rhs, cc, x, (add x, y)) | |||
5870 | return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false); | |||
5871 | } | |||
5872 | ||||
5873 | static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) { | |||
5874 | // fold (sub x, (select lhs, rhs, cc, 0, y)) -> | |||
5875 | // (select lhs, rhs, cc, x, (sub x, y)) | |||
5876 | SDValue N0 = N->getOperand(0); | |||
5877 | SDValue N1 = N->getOperand(1); | |||
5878 | return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false); | |||
5879 | } | |||
5880 | ||||
5881 | static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) { | |||
5882 | // fold (and (select lhs, rhs, cc, -1, y), x) -> | |||
5883 | // (select lhs, rhs, cc, x, (and x, y)) | |||
5884 | return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true); | |||
5885 | } | |||
5886 | ||||
5887 | static SDValue performORCombine(SDNode *N, SelectionDAG &DAG, | |||
5888 | const RISCVSubtarget &Subtarget) { | |||
5889 | if (Subtarget.hasStdExtZbp()) { | |||
5890 | if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget)) | |||
5891 | return GREV; | |||
5892 | if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget)) | |||
5893 | return GORC; | |||
5894 | if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget)) | |||
5895 | return SHFL; | |||
5896 | } | |||
5897 | ||||
5898 | // fold (or (select cond, 0, y), x) -> | |||
5899 | // (select cond, x, (or x, y)) | |||
5900 | return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false); | |||
5901 | } | |||
5902 | ||||
5903 | static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) { | |||
5904 | // fold (xor (select cond, 0, y), x) -> | |||
5905 | // (select cond, x, (xor x, y)) | |||
5906 | return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false); | |||
5907 | } | |||
5908 | ||||
5909 | // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND | |||
5910 | // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free | |||
5911 | // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be | |||
5912 | // removed during type legalization leaving an ADD/SUB/MUL use that won't use | |||
5913 | // ADDW/SUBW/MULW. | |||
5914 | static SDValue performANY_EXTENDCombine(SDNode *N, | |||
5915 | TargetLowering::DAGCombinerInfo &DCI, | |||
5916 | const RISCVSubtarget &Subtarget) { | |||
5917 | if (!Subtarget.is64Bit()) | |||
5918 | return SDValue(); | |||
5919 | ||||
5920 | SelectionDAG &DAG = DCI.DAG; | |||
5921 | ||||
5922 | SDValue Src = N->getOperand(0); | |||
5923 | EVT VT = N->getValueType(0); | |||
5924 | if (VT != MVT::i64 || Src.getValueType() != MVT::i32) | |||
5925 | return SDValue(); | |||
5926 | ||||
5927 | // The opcode must be one that can implicitly sign_extend. | |||
5928 | // FIXME: Additional opcodes. | |||
5929 | switch (Src.getOpcode()) { | |||
5930 | default: | |||
5931 | return SDValue(); | |||
5932 | case ISD::MUL: | |||
5933 | if (!Subtarget.hasStdExtM()) | |||
5934 | return SDValue(); | |||
5935 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
5936 | case ISD::ADD: | |||
5937 | case ISD::SUB: | |||
5938 | break; | |||
5939 | } | |||
5940 | ||||
5941 | // Only handle cases where the result is used by a CopyToReg. That likely | |||
5942 | // means the value is a liveout of the basic block. This helps prevent | |||
5943 | // infinite combine loops like PR51206. | |||
5944 | if (none_of(N->uses(), | |||
5945 | [](SDNode *User) { return User->getOpcode() == ISD::CopyToReg; })) | |||
5946 | return SDValue(); | |||
5947 | ||||
5948 | SmallVector<SDNode *, 4> SetCCs; | |||
5949 | for (SDNode::use_iterator UI = Src.getNode()->use_begin(), | |||
5950 | UE = Src.getNode()->use_end(); | |||
5951 | UI != UE; ++UI) { | |||
5952 | SDNode *User = *UI; | |||
5953 | if (User == N) | |||
5954 | continue; | |||
5955 | if (UI.getUse().getResNo() != Src.getResNo()) | |||
5956 | continue; | |||
5957 | // All i32 setccs are legalized by sign extending operands. | |||
5958 | if (User->getOpcode() == ISD::SETCC) { | |||
5959 | SetCCs.push_back(User); | |||
5960 | continue; | |||
5961 | } | |||
5962 | // We don't know if we can extend this user. | |||
5963 | break; | |||
5964 | } | |||
5965 | ||||
5966 | // If we don't have any SetCCs, this isn't worthwhile. | |||
5967 | if (SetCCs.empty()) | |||
5968 | return SDValue(); | |||
5969 | ||||
5970 | SDLoc DL(N); | |||
5971 | SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src); | |||
5972 | DCI.CombineTo(N, SExt); | |||
5973 | ||||
5974 | // Promote all the setccs. | |||
5975 | for (SDNode *SetCC : SetCCs) { | |||
5976 | SmallVector<SDValue, 4> Ops; | |||
5977 | ||||
5978 | for (unsigned j = 0; j != 2; ++j) { | |||
5979 | SDValue SOp = SetCC->getOperand(j); | |||
5980 | if (SOp == Src) | |||
5981 | Ops.push_back(SExt); | |||
5982 | else | |||
5983 | Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp)); | |||
5984 | } | |||
5985 | ||||
5986 | Ops.push_back(SetCC->getOperand(2)); | |||
5987 | DCI.CombineTo(SetCC, | |||
5988 | DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops)); | |||
5989 | } | |||
5990 | return SDValue(N, 0); | |||
5991 | } | |||
5992 | ||||
5993 | SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, | |||
5994 | DAGCombinerInfo &DCI) const { | |||
5995 | SelectionDAG &DAG = DCI.DAG; | |||
5996 | ||||
5997 | // Helper to call SimplifyDemandedBits on an operand of N where only some low | |||
5998 | // bits are demanded. N will be added to the Worklist if it was not deleted. | |||
5999 | // Caller should return SDValue(N, 0) if this returns true. | |||
6000 | auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) { | |||
6001 | SDValue Op = N->getOperand(OpNo); | |||
6002 | APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits); | |||
6003 | if (!SimplifyDemandedBits(Op, Mask, DCI)) | |||
6004 | return false; | |||
6005 | ||||
6006 | if (N->getOpcode() != ISD::DELETED_NODE) | |||
6007 | DCI.AddToWorklist(N); | |||
6008 | return true; | |||
6009 | }; | |||
6010 | ||||
6011 | switch (N->getOpcode()) { | |||
6012 | default: | |||
6013 | break; | |||
6014 | case RISCVISD::SplitF64: { | |||
6015 | SDValue Op0 = N->getOperand(0); | |||
6016 | // If the input to SplitF64 is just BuildPairF64 then the operation is | |||
6017 | // redundant. Instead, use BuildPairF64's operands directly. | |||
6018 | if (Op0->getOpcode() == RISCVISD::BuildPairF64) | |||
6019 | return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1)); | |||
6020 | ||||
6021 | SDLoc DL(N); | |||
6022 | ||||
6023 | // It's cheaper to materialise two 32-bit integers than to load a double | |||
6024 | // from the constant pool and transfer it to integer registers through the | |||
6025 | // stack. | |||
6026 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) { | |||
6027 | APInt V = C->getValueAPF().bitcastToAPInt(); | |||
6028 | SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32); | |||
6029 | SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32); | |||
6030 | return DCI.CombineTo(N, Lo, Hi); | |||
6031 | } | |||
6032 | ||||
6033 | // This is a target-specific version of a DAGCombine performed in | |||
6034 | // DAGCombiner::visitBITCAST. It performs the equivalent of: | |||
6035 | // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) | |||
6036 | // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) | |||
6037 | if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || | |||
6038 | !Op0.getNode()->hasOneUse()) | |||
6039 | break; | |||
6040 | SDValue NewSplitF64 = | |||
6041 | DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), | |||
6042 | Op0.getOperand(0)); | |||
6043 | SDValue Lo = NewSplitF64.getValue(0); | |||
6044 | SDValue Hi = NewSplitF64.getValue(1); | |||
6045 | APInt SignBit = APInt::getSignMask(32); | |||
6046 | if (Op0.getOpcode() == ISD::FNEG) { | |||
6047 | SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi, | |||
6048 | DAG.getConstant(SignBit, DL, MVT::i32)); | |||
6049 | return DCI.CombineTo(N, Lo, NewHi); | |||
6050 | } | |||
6051 | assert(Op0.getOpcode() == ISD::FABS)(static_cast <bool> (Op0.getOpcode() == ISD::FABS) ? void (0) : __assert_fail ("Op0.getOpcode() == ISD::FABS", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6051, __extension__ __PRETTY_FUNCTION__)); | |||
6052 | SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi, | |||
6053 | DAG.getConstant(~SignBit, DL, MVT::i32)); | |||
6054 | return DCI.CombineTo(N, Lo, NewHi); | |||
6055 | } | |||
6056 | case RISCVISD::SLLW: | |||
6057 | case RISCVISD::SRAW: | |||
6058 | case RISCVISD::SRLW: | |||
6059 | case RISCVISD::ROLW: | |||
6060 | case RISCVISD::RORW: { | |||
6061 | // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. | |||
6062 | if (SimplifyDemandedLowBitsHelper(0, 32) || | |||
6063 | SimplifyDemandedLowBitsHelper(1, 5)) | |||
6064 | return SDValue(N, 0); | |||
6065 | break; | |||
6066 | } | |||
6067 | case RISCVISD::CLZW: | |||
6068 | case RISCVISD::CTZW: { | |||
6069 | // Only the lower 32 bits of the first operand are read | |||
6070 | if (SimplifyDemandedLowBitsHelper(0, 32)) | |||
6071 | return SDValue(N, 0); | |||
6072 | break; | |||
6073 | } | |||
6074 | case RISCVISD::FSL: | |||
6075 | case RISCVISD::FSR: { | |||
6076 | // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read. | |||
6077 | unsigned BitWidth = N->getOperand(2).getValueSizeInBits(); | |||
6078 | assert(isPowerOf2_32(BitWidth) && "Unexpected bit width")(static_cast <bool> (isPowerOf2_32(BitWidth) && "Unexpected bit width") ? void (0) : __assert_fail ("isPowerOf2_32(BitWidth) && \"Unexpected bit width\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6078, __extension__ __PRETTY_FUNCTION__)); | |||
6079 | if (SimplifyDemandedLowBitsHelper(2, Log2_32(BitWidth) + 1)) | |||
6080 | return SDValue(N, 0); | |||
6081 | break; | |||
6082 | } | |||
6083 | case RISCVISD::FSLW: | |||
6084 | case RISCVISD::FSRW: { | |||
6085 | // Only the lower 32 bits of Values and lower 6 bits of shift amount are | |||
6086 | // read. | |||
6087 | if (SimplifyDemandedLowBitsHelper(0, 32) || | |||
6088 | SimplifyDemandedLowBitsHelper(1, 32) || | |||
6089 | SimplifyDemandedLowBitsHelper(2, 6)) | |||
6090 | return SDValue(N, 0); | |||
6091 | break; | |||
6092 | } | |||
6093 | case RISCVISD::GREV: | |||
6094 | case RISCVISD::GORC: { | |||
6095 | // Only the lower log2(Bitwidth) bits of the the shift amount are read. | |||
6096 | unsigned BitWidth = N->getOperand(1).getValueSizeInBits(); | |||
6097 | assert(isPowerOf2_32(BitWidth) && "Unexpected bit width")(static_cast <bool> (isPowerOf2_32(BitWidth) && "Unexpected bit width") ? void (0) : __assert_fail ("isPowerOf2_32(BitWidth) && \"Unexpected bit width\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6097, __extension__ __PRETTY_FUNCTION__)); | |||
6098 | if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth))) | |||
6099 | return SDValue(N, 0); | |||
6100 | ||||
6101 | return combineGREVI_GORCI(N, DCI.DAG); | |||
6102 | } | |||
6103 | case RISCVISD::GREVW: | |||
6104 | case RISCVISD::GORCW: { | |||
6105 | // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. | |||
6106 | if (SimplifyDemandedLowBitsHelper(0, 32) || | |||
6107 | SimplifyDemandedLowBitsHelper(1, 5)) | |||
6108 | return SDValue(N, 0); | |||
6109 | ||||
6110 | return combineGREVI_GORCI(N, DCI.DAG); | |||
6111 | } | |||
6112 | case RISCVISD::SHFL: | |||
6113 | case RISCVISD::UNSHFL: { | |||
6114 | // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read. | |||
6115 | unsigned BitWidth = N->getOperand(1).getValueSizeInBits(); | |||
6116 | assert(isPowerOf2_32(BitWidth) && "Unexpected bit width")(static_cast <bool> (isPowerOf2_32(BitWidth) && "Unexpected bit width") ? void (0) : __assert_fail ("isPowerOf2_32(BitWidth) && \"Unexpected bit width\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6116, __extension__ __PRETTY_FUNCTION__)); | |||
6117 | if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1)) | |||
6118 | return SDValue(N, 0); | |||
6119 | ||||
6120 | break; | |||
6121 | } | |||
6122 | case RISCVISD::SHFLW: | |||
6123 | case RISCVISD::UNSHFLW: { | |||
6124 | // Only the lower 32 bits of LHS and lower 4 bits of RHS are read. | |||
6125 | SDValue LHS = N->getOperand(0); | |||
6126 | SDValue RHS = N->getOperand(1); | |||
6127 | APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32); | |||
6128 | APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4); | |||
6129 | if (SimplifyDemandedLowBitsHelper(0, 32) || | |||
6130 | SimplifyDemandedLowBitsHelper(1, 4)) | |||
6131 | return SDValue(N, 0); | |||
6132 | ||||
6133 | break; | |||
6134 | } | |||
6135 | case RISCVISD::BCOMPRESSW: | |||
6136 | case RISCVISD::BDECOMPRESSW: { | |||
6137 | // Only the lower 32 bits of LHS and RHS are read. | |||
6138 | if (SimplifyDemandedLowBitsHelper(0, 32) || | |||
6139 | SimplifyDemandedLowBitsHelper(1, 32)) | |||
6140 | return SDValue(N, 0); | |||
6141 | ||||
6142 | break; | |||
6143 | } | |||
6144 | case RISCVISD::FMV_X_ANYEXTH: | |||
6145 | case RISCVISD::FMV_X_ANYEXTW_RV64: { | |||
6146 | SDLoc DL(N); | |||
6147 | SDValue Op0 = N->getOperand(0); | |||
6148 | MVT VT = N->getSimpleValueType(0); | |||
6149 | // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the | |||
6150 | // conversion is unnecessary and can be replaced with the FMV_W_X_RV64 | |||
6151 | // operand. Similar for FMV_X_ANYEXTH and FMV_H_X. | |||
6152 | if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 && | |||
6153 | Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) || | |||
6154 | (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH && | |||
6155 | Op0->getOpcode() == RISCVISD::FMV_H_X)) { | |||
6156 | assert(Op0.getOperand(0).getValueType() == VT &&(static_cast <bool> (Op0.getOperand(0).getValueType() == VT && "Unexpected value type!") ? void (0) : __assert_fail ("Op0.getOperand(0).getValueType() == VT && \"Unexpected value type!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6157, __extension__ __PRETTY_FUNCTION__)) | |||
6157 | "Unexpected value type!")(static_cast <bool> (Op0.getOperand(0).getValueType() == VT && "Unexpected value type!") ? void (0) : __assert_fail ("Op0.getOperand(0).getValueType() == VT && \"Unexpected value type!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6157, __extension__ __PRETTY_FUNCTION__)); | |||
6158 | return Op0.getOperand(0); | |||
6159 | } | |||
6160 | ||||
6161 | // This is a target-specific version of a DAGCombine performed in | |||
6162 | // DAGCombiner::visitBITCAST. It performs the equivalent of: | |||
6163 | // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) | |||
6164 | // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) | |||
6165 | if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || | |||
6166 | !Op0.getNode()->hasOneUse()) | |||
6167 | break; | |||
6168 | SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0)); | |||
6169 | unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16; | |||
6170 | APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits()); | |||
6171 | if (Op0.getOpcode() == ISD::FNEG) | |||
6172 | return DAG.getNode(ISD::XOR, DL, VT, NewFMV, | |||
6173 | DAG.getConstant(SignBit, DL, VT)); | |||
6174 | ||||
6175 | assert(Op0.getOpcode() == ISD::FABS)(static_cast <bool> (Op0.getOpcode() == ISD::FABS) ? void (0) : __assert_fail ("Op0.getOpcode() == ISD::FABS", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6175, __extension__ __PRETTY_FUNCTION__)); | |||
6176 | return DAG.getNode(ISD::AND, DL, VT, NewFMV, | |||
6177 | DAG.getConstant(~SignBit, DL, VT)); | |||
6178 | } | |||
6179 | case ISD::ADD: | |||
6180 | return performADDCombine(N, DAG); | |||
6181 | case ISD::SUB: | |||
6182 | return performSUBCombine(N, DAG); | |||
6183 | case ISD::AND: | |||
6184 | return performANDCombine(N, DAG); | |||
6185 | case ISD::OR: | |||
6186 | return performORCombine(N, DAG, Subtarget); | |||
6187 | case ISD::XOR: | |||
6188 | return performXORCombine(N, DAG); | |||
6189 | case ISD::ANY_EXTEND: | |||
6190 | return performANY_EXTENDCombine(N, DCI, Subtarget); | |||
6191 | case ISD::ZERO_EXTEND: | |||
6192 | // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during | |||
6193 | // type legalization. This is safe because fp_to_uint produces poison if | |||
6194 | // it overflows. | |||
6195 | if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit() && | |||
6196 | N->getOperand(0).getOpcode() == ISD::FP_TO_UINT && | |||
6197 | isTypeLegal(N->getOperand(0).getOperand(0).getValueType())) | |||
6198 | return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64, | |||
6199 | N->getOperand(0).getOperand(0)); | |||
6200 | return SDValue(); | |||
6201 | case RISCVISD::SELECT_CC: { | |||
6202 | // Transform | |||
6203 | SDValue LHS = N->getOperand(0); | |||
6204 | SDValue RHS = N->getOperand(1); | |||
6205 | ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get(); | |||
6206 | if (!ISD::isIntEqualitySetCC(CCVal)) | |||
6207 | break; | |||
6208 | ||||
6209 | // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) -> | |||
6210 | // (select_cc X, Y, lt, trueV, falseV) | |||
6211 | // Sometimes the setcc is introduced after select_cc has been formed. | |||
6212 | if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) && | |||
6213 | LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) { | |||
6214 | // If we're looking for eq 0 instead of ne 0, we need to invert the | |||
6215 | // condition. | |||
6216 | bool Invert = CCVal == ISD::SETEQ; | |||
6217 | CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); | |||
6218 | if (Invert) | |||
6219 | CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); | |||
6220 | ||||
6221 | SDLoc DL(N); | |||
6222 | RHS = LHS.getOperand(1); | |||
6223 | LHS = LHS.getOperand(0); | |||
6224 | translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); | |||
6225 | ||||
6226 | SDValue TargetCC = DAG.getCondCode(CCVal); | |||
6227 | return DAG.getNode( | |||
6228 | RISCVISD::SELECT_CC, DL, N->getValueType(0), | |||
6229 | {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)}); | |||
6230 | } | |||
6231 | ||||
6232 | // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) -> | |||
6233 | // (select_cc X, Y, eq/ne, trueV, falseV) | |||
6234 | if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS)) | |||
6235 | return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0), | |||
6236 | {LHS.getOperand(0), LHS.getOperand(1), | |||
6237 | N->getOperand(2), N->getOperand(3), | |||
6238 | N->getOperand(4)}); | |||
6239 | // (select_cc X, 1, setne, trueV, falseV) -> | |||
6240 | // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1. | |||
6241 | // This can occur when legalizing some floating point comparisons. | |||
6242 | APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); | |||
6243 | if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) { | |||
6244 | SDLoc DL(N); | |||
6245 | CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); | |||
6246 | SDValue TargetCC = DAG.getCondCode(CCVal); | |||
6247 | RHS = DAG.getConstant(0, DL, LHS.getValueType()); | |||
6248 | return DAG.getNode( | |||
6249 | RISCVISD::SELECT_CC, DL, N->getValueType(0), | |||
6250 | {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)}); | |||
6251 | } | |||
6252 | ||||
6253 | break; | |||
6254 | } | |||
6255 | case RISCVISD::BR_CC: { | |||
6256 | SDValue LHS = N->getOperand(1); | |||
6257 | SDValue RHS = N->getOperand(2); | |||
6258 | ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get(); | |||
6259 | if (!ISD::isIntEqualitySetCC(CCVal)) | |||
6260 | break; | |||
6261 | ||||
6262 | // Fold (br_cc (setlt X, Y), 0, ne, dest) -> | |||
6263 | // (br_cc X, Y, lt, dest) | |||
6264 | // Sometimes the setcc is introduced after br_cc has been formed. | |||
6265 | if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) && | |||
6266 | LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) { | |||
6267 | // If we're looking for eq 0 instead of ne 0, we need to invert the | |||
6268 | // condition. | |||
6269 | bool Invert = CCVal == ISD::SETEQ; | |||
6270 | CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); | |||
6271 | if (Invert) | |||
6272 | CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); | |||
6273 | ||||
6274 | SDLoc DL(N); | |||
6275 | RHS = LHS.getOperand(1); | |||
6276 | LHS = LHS.getOperand(0); | |||
6277 | translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); | |||
6278 | ||||
6279 | return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0), | |||
6280 | N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal), | |||
6281 | N->getOperand(4)); | |||
6282 | } | |||
6283 | ||||
6284 | // Fold (br_cc (xor X, Y), 0, eq/ne, dest) -> | |||
6285 | // (br_cc X, Y, eq/ne, trueV, falseV) | |||
6286 | if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS)) | |||
6287 | return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0), | |||
6288 | N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1), | |||
6289 | N->getOperand(3), N->getOperand(4)); | |||
6290 | ||||
6291 | // (br_cc X, 1, setne, br_cc) -> | |||
6292 | // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1. | |||
6293 | // This can occur when legalizing some floating point comparisons. | |||
6294 | APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); | |||
6295 | if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) { | |||
6296 | SDLoc DL(N); | |||
6297 | CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); | |||
6298 | SDValue TargetCC = DAG.getCondCode(CCVal); | |||
6299 | RHS = DAG.getConstant(0, DL, LHS.getValueType()); | |||
6300 | return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0), | |||
6301 | N->getOperand(0), LHS, RHS, TargetCC, | |||
6302 | N->getOperand(4)); | |||
6303 | } | |||
6304 | break; | |||
6305 | } | |||
6306 | case ISD::FCOPYSIGN: { | |||
6307 | EVT VT = N->getValueType(0); | |||
6308 | if (!VT.isVector()) | |||
6309 | break; | |||
6310 | // There is a form of VFSGNJ which injects the negated sign of its second | |||
6311 | // operand. Try and bubble any FNEG up after the extend/round to produce | |||
6312 | // this optimized pattern. Avoid modifying cases where FP_ROUND and | |||
6313 | // TRUNC=1. | |||
6314 | SDValue In2 = N->getOperand(1); | |||
6315 | // Avoid cases where the extend/round has multiple uses, as duplicating | |||
6316 | // those is typically more expensive than removing a fneg. | |||
6317 | if (!In2.hasOneUse()) | |||
6318 | break; | |||
6319 | if (In2.getOpcode() != ISD::FP_EXTEND && | |||
6320 | (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0)) | |||
6321 | break; | |||
6322 | In2 = In2.getOperand(0); | |||
6323 | if (In2.getOpcode() != ISD::FNEG) | |||
6324 | break; | |||
6325 | SDLoc DL(N); | |||
6326 | SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT); | |||
6327 | return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0), | |||
6328 | DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound)); | |||
6329 | } | |||
6330 | case ISD::MGATHER: | |||
6331 | case ISD::MSCATTER: { | |||
6332 | if (!DCI.isBeforeLegalize()) | |||
6333 | break; | |||
6334 | MaskedGatherScatterSDNode *MGSN = cast<MaskedGatherScatterSDNode>(N); | |||
6335 | SDValue Index = MGSN->getIndex(); | |||
6336 | EVT IndexVT = Index.getValueType(); | |||
6337 | MVT XLenVT = Subtarget.getXLenVT(); | |||
6338 | // RISCV indexed loads only support the "unsigned unscaled" addressing | |||
6339 | // mode, so anything else must be manually legalized. | |||
6340 | bool NeedsIdxLegalization = MGSN->isIndexScaled() || | |||
6341 | (MGSN->isIndexSigned() && | |||
6342 | IndexVT.getVectorElementType().bitsLT(XLenVT)); | |||
6343 | if (!NeedsIdxLegalization) | |||
6344 | break; | |||
6345 | ||||
6346 | SDLoc DL(N); | |||
6347 | ||||
6348 | // Any index legalization should first promote to XLenVT, so we don't lose | |||
6349 | // bits when scaling. This may create an illegal index type so we let | |||
6350 | // LLVM's legalization take care of the splitting. | |||
6351 | if (IndexVT.getVectorElementType().bitsLT(XLenVT)) { | |||
6352 | IndexVT = IndexVT.changeVectorElementType(XLenVT); | |||
6353 | Index = DAG.getNode(MGSN->isIndexSigned() ? ISD::SIGN_EXTEND | |||
6354 | : ISD::ZERO_EXTEND, | |||
6355 | DL, IndexVT, Index); | |||
6356 | } | |||
6357 | ||||
6358 | unsigned Scale = N->getConstantOperandVal(5); | |||
6359 | if (MGSN->isIndexScaled() && Scale != 1) { | |||
6360 | // Manually scale the indices by the element size. | |||
6361 | // TODO: Sanitize the scale operand here? | |||
6362 | assert(isPowerOf2_32(Scale) && "Expecting power-of-two types")(static_cast <bool> (isPowerOf2_32(Scale) && "Expecting power-of-two types" ) ? void (0) : __assert_fail ("isPowerOf2_32(Scale) && \"Expecting power-of-two types\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6362, __extension__ __PRETTY_FUNCTION__)); | |||
6363 | SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT); | |||
6364 | Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale); | |||
6365 | } | |||
6366 | ||||
6367 | ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED; | |||
6368 | if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N)) { | |||
6369 | return DAG.getMaskedGather( | |||
6370 | N->getVTList(), MGSN->getMemoryVT(), DL, | |||
6371 | {MGSN->getChain(), MGN->getPassThru(), MGSN->getMask(), | |||
6372 | MGSN->getBasePtr(), Index, MGN->getScale()}, | |||
6373 | MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType()); | |||
6374 | } | |||
6375 | const auto *MSN = cast<MaskedScatterSDNode>(N); | |||
6376 | return DAG.getMaskedScatter( | |||
6377 | N->getVTList(), MGSN->getMemoryVT(), DL, | |||
6378 | {MGSN->getChain(), MSN->getValue(), MGSN->getMask(), MGSN->getBasePtr(), | |||
6379 | Index, MGSN->getScale()}, | |||
6380 | MGSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore()); | |||
6381 | } | |||
6382 | case RISCVISD::SRA_VL: | |||
6383 | case RISCVISD::SRL_VL: | |||
6384 | case RISCVISD::SHL_VL: { | |||
6385 | SDValue ShAmt = N->getOperand(1); | |||
6386 | if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) { | |||
6387 | // We don't need the upper 32 bits of a 64-bit element for a shift amount. | |||
6388 | SDLoc DL(N); | |||
6389 | SDValue VL = N->getOperand(3); | |||
6390 | EVT VT = N->getValueType(0); | |||
6391 | ShAmt = | |||
6392 | DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL); | |||
6393 | return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt, | |||
6394 | N->getOperand(2), N->getOperand(3)); | |||
6395 | } | |||
6396 | break; | |||
6397 | } | |||
6398 | case ISD::SRA: | |||
6399 | case ISD::SRL: | |||
6400 | case ISD::SHL: { | |||
6401 | SDValue ShAmt = N->getOperand(1); | |||
6402 | if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) { | |||
6403 | // We don't need the upper 32 bits of a 64-bit element for a shift amount. | |||
6404 | SDLoc DL(N); | |||
6405 | EVT VT = N->getValueType(0); | |||
6406 | ShAmt = | |||
6407 | DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VT, ShAmt.getOperand(0)); | |||
6408 | return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt); | |||
6409 | } | |||
6410 | break; | |||
6411 | } | |||
6412 | case RISCVISD::MUL_VL: { | |||
6413 | // Try to form VWMUL or VWMULU. | |||
6414 | // FIXME: Look for splat of extended scalar as well. | |||
6415 | // FIXME: Support VWMULSU. | |||
6416 | SDValue Op0 = N->getOperand(0); | |||
6417 | SDValue Op1 = N->getOperand(1); | |||
6418 | bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL; | |||
6419 | bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL; | |||
6420 | if ((!IsSignExt && !IsZeroExt) || Op0.getOpcode() != Op1.getOpcode()) | |||
6421 | return SDValue(); | |||
6422 | ||||
6423 | // Make sure the extends have a single use. | |||
6424 | if (!Op0.hasOneUse() || !Op1.hasOneUse()) | |||
6425 | return SDValue(); | |||
6426 | ||||
6427 | SDValue Mask = N->getOperand(2); | |||
6428 | SDValue VL = N->getOperand(3); | |||
6429 | if (Op0.getOperand(1) != Mask || Op1.getOperand(1) != Mask || | |||
6430 | Op0.getOperand(2) != VL || Op1.getOperand(2) != VL) | |||
6431 | return SDValue(); | |||
6432 | ||||
6433 | Op0 = Op0.getOperand(0); | |||
6434 | Op1 = Op1.getOperand(0); | |||
6435 | ||||
6436 | MVT VT = N->getSimpleValueType(0); | |||
6437 | MVT NarrowVT = | |||
6438 | MVT::getVectorVT(MVT::getIntegerVT(VT.getScalarSizeInBits() / 2), | |||
6439 | VT.getVectorElementCount()); | |||
6440 | ||||
6441 | SDLoc DL(N); | |||
6442 | ||||
6443 | // Re-introduce narrower extends if needed. | |||
6444 | unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL; | |||
6445 | if (Op0.getValueType() != NarrowVT) | |||
6446 | Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL); | |||
6447 | if (Op1.getValueType() != NarrowVT) | |||
6448 | Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL); | |||
6449 | ||||
6450 | unsigned WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL; | |||
6451 | return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL); | |||
6452 | } | |||
6453 | } | |||
6454 | ||||
6455 | return SDValue(); | |||
6456 | } | |||
6457 | ||||
6458 | bool RISCVTargetLowering::isDesirableToCommuteWithShift( | |||
6459 | const SDNode *N, CombineLevel Level) const { | |||
6460 | // The following folds are only desirable if `(OP _, c1 << c2)` can be | |||
6461 | // materialised in fewer instructions than `(OP _, c1)`: | |||
6462 | // | |||
6463 | // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) | |||
6464 | // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2) | |||
6465 | SDValue N0 = N->getOperand(0); | |||
6466 | EVT Ty = N0.getValueType(); | |||
6467 | if (Ty.isScalarInteger() && | |||
6468 | (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) { | |||
6469 | auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1)); | |||
6470 | auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); | |||
6471 | if (C1 && C2) { | |||
6472 | const APInt &C1Int = C1->getAPIntValue(); | |||
6473 | APInt ShiftedC1Int = C1Int << C2->getAPIntValue(); | |||
6474 | ||||
6475 | // We can materialise `c1 << c2` into an add immediate, so it's "free", | |||
6476 | // and the combine should happen, to potentially allow further combines | |||
6477 | // later. | |||
6478 | if (ShiftedC1Int.getMinSignedBits() <= 64 && | |||
6479 | isLegalAddImmediate(ShiftedC1Int.getSExtValue())) | |||
6480 | return true; | |||
6481 | ||||
6482 | // We can materialise `c1` in an add immediate, so it's "free", and the | |||
6483 | // combine should be prevented. | |||
6484 | if (C1Int.getMinSignedBits() <= 64 && | |||
6485 | isLegalAddImmediate(C1Int.getSExtValue())) | |||
6486 | return false; | |||
6487 | ||||
6488 | // Neither constant will fit into an immediate, so find materialisation | |||
6489 | // costs. | |||
6490 | int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(), | |||
6491 | Subtarget.getFeatureBits(), | |||
6492 | /*CompressionCost*/true); | |||
6493 | int ShiftedC1Cost = RISCVMatInt::getIntMatCost( | |||
6494 | ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(), | |||
6495 | /*CompressionCost*/true); | |||
6496 | ||||
6497 | // Materialising `c1` is cheaper than materialising `c1 << c2`, so the | |||
6498 | // combine should be prevented. | |||
6499 | if (C1Cost < ShiftedC1Cost) | |||
6500 | return false; | |||
6501 | } | |||
6502 | } | |||
6503 | return true; | |||
6504 | } | |||
6505 | ||||
6506 | bool RISCVTargetLowering::targetShrinkDemandedConstant( | |||
6507 | SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, | |||
6508 | TargetLoweringOpt &TLO) const { | |||
6509 | // Delay this optimization as late as possible. | |||
6510 | if (!TLO.LegalOps) | |||
6511 | return false; | |||
6512 | ||||
6513 | EVT VT = Op.getValueType(); | |||
6514 | if (VT.isVector()) | |||
6515 | return false; | |||
6516 | ||||
6517 | // Only handle AND for now. | |||
6518 | if (Op.getOpcode() != ISD::AND) | |||
6519 | return false; | |||
6520 | ||||
6521 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); | |||
6522 | if (!C) | |||
6523 | return false; | |||
6524 | ||||
6525 | const APInt &Mask = C->getAPIntValue(); | |||
6526 | ||||
6527 | // Clear all non-demanded bits initially. | |||
6528 | APInt ShrunkMask = Mask & DemandedBits; | |||
6529 | ||||
6530 | // Try to make a smaller immediate by setting undemanded bits. | |||
6531 | ||||
6532 | APInt ExpandedMask = Mask | ~DemandedBits; | |||
6533 | ||||
6534 | auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool { | |||
6535 | return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask); | |||
6536 | }; | |||
6537 | auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool { | |||
6538 | if (NewMask == Mask) | |||
6539 | return true; | |||
6540 | SDLoc DL(Op); | |||
6541 | SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT); | |||
6542 | SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC); | |||
6543 | return TLO.CombineTo(Op, NewOp); | |||
6544 | }; | |||
6545 | ||||
6546 | // If the shrunk mask fits in sign extended 12 bits, let the target | |||
6547 | // independent code apply it. | |||
6548 | if (ShrunkMask.isSignedIntN(12)) | |||
6549 | return false; | |||
6550 | ||||
6551 | // Preserve (and X, 0xffff) when zext.h is supported. | |||
6552 | if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) { | |||
6553 | APInt NewMask = APInt(Mask.getBitWidth(), 0xffff); | |||
6554 | if (IsLegalMask(NewMask)) | |||
6555 | return UseMask(NewMask); | |||
6556 | } | |||
6557 | ||||
6558 | // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern. | |||
6559 | if (VT == MVT::i64) { | |||
6560 | APInt NewMask = APInt(64, 0xffffffff); | |||
6561 | if (IsLegalMask(NewMask)) | |||
6562 | return UseMask(NewMask); | |||
6563 | } | |||
6564 | ||||
6565 | // For the remaining optimizations, we need to be able to make a negative | |||
6566 | // number through a combination of mask and undemanded bits. | |||
6567 | if (!ExpandedMask.isNegative()) | |||
6568 | return false; | |||
6569 | ||||
6570 | // What is the fewest number of bits we need to represent the negative number. | |||
6571 | unsigned MinSignedBits = ExpandedMask.getMinSignedBits(); | |||
6572 | ||||
6573 | // Try to make a 12 bit negative immediate. If that fails try to make a 32 | |||
6574 | // bit negative immediate unless the shrunk immediate already fits in 32 bits. | |||
6575 | APInt NewMask = ShrunkMask; | |||
6576 | if (MinSignedBits <= 12) | |||
6577 | NewMask.setBitsFrom(11); | |||
6578 | else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32)) | |||
6579 | NewMask.setBitsFrom(31); | |||
6580 | else | |||
6581 | return false; | |||
6582 | ||||
6583 | // Sanity check that our new mask is a subset of the demanded mask. | |||
6584 | assert(IsLegalMask(NewMask))(static_cast <bool> (IsLegalMask(NewMask)) ? void (0) : __assert_fail ("IsLegalMask(NewMask)", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6584, __extension__ __PRETTY_FUNCTION__)); | |||
6585 | return UseMask(NewMask); | |||
6586 | } | |||
6587 | ||||
6588 | static void computeGREV(APInt &Src, unsigned ShAmt) { | |||
6589 | ShAmt &= Src.getBitWidth() - 1; | |||
6590 | uint64_t x = Src.getZExtValue(); | |||
6591 | if (ShAmt & 1) | |||
6592 | x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); | |||
6593 | if (ShAmt & 2) | |||
6594 | x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); | |||
6595 | if (ShAmt & 4) | |||
6596 | x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); | |||
6597 | if (ShAmt & 8) | |||
6598 | x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); | |||
6599 | if (ShAmt & 16) | |||
6600 | x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); | |||
6601 | if (ShAmt & 32) | |||
6602 | x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32); | |||
6603 | Src = x; | |||
6604 | } | |||
6605 | ||||
6606 | void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, | |||
6607 | KnownBits &Known, | |||
6608 | const APInt &DemandedElts, | |||
6609 | const SelectionDAG &DAG, | |||
6610 | unsigned Depth) const { | |||
6611 | unsigned BitWidth = Known.getBitWidth(); | |||
6612 | unsigned Opc = Op.getOpcode(); | |||
6613 | assert((Opc >= ISD::BUILTIN_OP_END ||(static_cast <bool> ((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? void (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6618, __extension__ __PRETTY_FUNCTION__)) | |||
6614 | Opc == ISD::INTRINSIC_WO_CHAIN ||(static_cast <bool> ((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? void (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6618, __extension__ __PRETTY_FUNCTION__)) | |||
6615 | Opc == ISD::INTRINSIC_W_CHAIN ||(static_cast <bool> ((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? void (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6618, __extension__ __PRETTY_FUNCTION__)) | |||
6616 | Opc == ISD::INTRINSIC_VOID) &&(static_cast <bool> ((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? void (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6618, __extension__ __PRETTY_FUNCTION__)) | |||
6617 | "Should use MaskedValueIsZero if you don't know whether Op"(static_cast <bool> ((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? void (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6618, __extension__ __PRETTY_FUNCTION__)) | |||
6618 | " is a target node!")(static_cast <bool> ((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? void (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6618, __extension__ __PRETTY_FUNCTION__)); | |||
6619 | ||||
6620 | Known.resetAll(); | |||
6621 | switch (Opc) { | |||
6622 | default: break; | |||
6623 | case RISCVISD::SELECT_CC: { | |||
6624 | Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1); | |||
6625 | // If we don't know any bits, early out. | |||
6626 | if (Known.isUnknown()) | |||
6627 | break; | |||
6628 | KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1); | |||
6629 | ||||
6630 | // Only known if known in both the LHS and RHS. | |||
6631 | Known = KnownBits::commonBits(Known, Known2); | |||
6632 | break; | |||
6633 | } | |||
6634 | case RISCVISD::REMUW: { | |||
6635 | KnownBits Known2; | |||
6636 | Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | |||
6637 | Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | |||
6638 | // We only care about the lower 32 bits. | |||
6639 | Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32)); | |||
6640 | // Restore the original width by sign extending. | |||
6641 | Known = Known.sext(BitWidth); | |||
6642 | break; | |||
6643 | } | |||
6644 | case RISCVISD::DIVUW: { | |||
6645 | KnownBits Known2; | |||
6646 | Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | |||
6647 | Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | |||
6648 | // We only care about the lower 32 bits. | |||
6649 | Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32)); | |||
6650 | // Restore the original width by sign extending. | |||
6651 | Known = Known.sext(BitWidth); | |||
6652 | break; | |||
6653 | } | |||
6654 | case RISCVISD::CTZW: { | |||
6655 | KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); | |||
6656 | unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros(); | |||
6657 | unsigned LowBits = Log2_32(PossibleTZ) + 1; | |||
6658 | Known.Zero.setBitsFrom(LowBits); | |||
6659 | break; | |||
6660 | } | |||
6661 | case RISCVISD::CLZW: { | |||
6662 | KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); | |||
6663 | unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros(); | |||
6664 | unsigned LowBits = Log2_32(PossibleLZ) + 1; | |||
6665 | Known.Zero.setBitsFrom(LowBits); | |||
6666 | break; | |||
6667 | } | |||
6668 | case RISCVISD::GREV: | |||
6669 | case RISCVISD::GREVW: { | |||
6670 | if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { | |||
6671 | Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); | |||
6672 | if (Opc == RISCVISD::GREVW) | |||
6673 | Known = Known.trunc(32); | |||
6674 | unsigned ShAmt = C->getZExtValue(); | |||
6675 | computeGREV(Known.Zero, ShAmt); | |||
6676 | computeGREV(Known.One, ShAmt); | |||
6677 | if (Opc == RISCVISD::GREVW) | |||
6678 | Known = Known.sext(BitWidth); | |||
6679 | } | |||
6680 | break; | |||
6681 | } | |||
6682 | case RISCVISD::READ_VLENB: | |||
6683 | // We assume VLENB is at least 16 bytes. | |||
6684 | Known.Zero.setLowBits(4); | |||
6685 | // We assume VLENB is no more than 65536 / 8 bytes. | |||
6686 | Known.Zero.setBitsFrom(14); | |||
6687 | break; | |||
6688 | case ISD::INTRINSIC_W_CHAIN: { | |||
6689 | unsigned IntNo = Op.getConstantOperandVal(1); | |||
6690 | switch (IntNo) { | |||
6691 | default: | |||
6692 | // We can't do anything for most intrinsics. | |||
6693 | break; | |||
6694 | case Intrinsic::riscv_vsetvli: | |||
6695 | case Intrinsic::riscv_vsetvlimax: | |||
6696 | // Assume that VL output is positive and would fit in an int32_t. | |||
6697 | // TODO: VLEN might be capped at 16 bits in a future V spec update. | |||
6698 | if (BitWidth >= 32) | |||
6699 | Known.Zero.setBitsFrom(31); | |||
6700 | break; | |||
6701 | } | |||
6702 | break; | |||
6703 | } | |||
6704 | } | |||
6705 | } | |||
6706 | ||||
6707 | unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode( | |||
6708 | SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, | |||
6709 | unsigned Depth) const { | |||
6710 | switch (Op.getOpcode()) { | |||
6711 | default: | |||
6712 | break; | |||
6713 | case RISCVISD::SELECT_CC: { | |||
6714 | unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1); | |||
6715 | if (Tmp == 1) return 1; // Early out. | |||
6716 | unsigned Tmp2 = DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1); | |||
6717 | return std::min(Tmp, Tmp2); | |||
6718 | } | |||
6719 | case RISCVISD::SLLW: | |||
6720 | case RISCVISD::SRAW: | |||
6721 | case RISCVISD::SRLW: | |||
6722 | case RISCVISD::DIVW: | |||
6723 | case RISCVISD::DIVUW: | |||
6724 | case RISCVISD::REMUW: | |||
6725 | case RISCVISD::ROLW: | |||
6726 | case RISCVISD::RORW: | |||
6727 | case RISCVISD::GREVW: | |||
6728 | case RISCVISD::GORCW: | |||
6729 | case RISCVISD::FSLW: | |||
6730 | case RISCVISD::FSRW: | |||
6731 | case RISCVISD::SHFLW: | |||
6732 | case RISCVISD::UNSHFLW: | |||
6733 | case RISCVISD::BCOMPRESSW: | |||
6734 | case RISCVISD::BDECOMPRESSW: | |||
6735 | case RISCVISD::FCVT_W_RTZ_RV64: | |||
6736 | case RISCVISD::FCVT_WU_RTZ_RV64: | |||
6737 | // TODO: As the result is sign-extended, this is conservatively correct. A | |||
6738 | // more precise answer could be calculated for SRAW depending on known | |||
6739 | // bits in the shift amount. | |||
6740 | return 33; | |||
6741 | case RISCVISD::SHFL: | |||
6742 | case RISCVISD::UNSHFL: { | |||
6743 | // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word | |||
6744 | // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but | |||
6745 | // will stay within the upper 32 bits. If there were more than 32 sign bits | |||
6746 | // before there will be at least 33 sign bits after. | |||
6747 | if (Op.getValueType() == MVT::i64 && | |||
6748 | isa<ConstantSDNode>(Op.getOperand(1)) && | |||
6749 | (Op.getConstantOperandVal(1) & 0x10) == 0) { | |||
6750 | unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1); | |||
6751 | if (Tmp > 32) | |||
6752 | return 33; | |||
6753 | } | |||
6754 | break; | |||
6755 | } | |||
6756 | case RISCVISD::VMV_X_S: | |||
6757 | // The number of sign bits of the scalar result is computed by obtaining the | |||
6758 | // element type of the input vector operand, subtracting its width from the | |||
6759 | // XLEN, and then adding one (sign bit within the element type). If the | |||
6760 | // element type is wider than XLen, the least-significant XLEN bits are | |||
6761 | // taken. | |||
6762 | if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen()) | |||
6763 | return 1; | |||
6764 | return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1; | |||
6765 | } | |||
6766 | ||||
6767 | return 1; | |||
6768 | } | |||
6769 | ||||
6770 | static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI, | |||
6771 | MachineBasicBlock *BB) { | |||
6772 | assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction")(static_cast <bool> (MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction") ? void (0) : __assert_fail ("MI.getOpcode() == RISCV::ReadCycleWide && \"Unexpected instruction\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6772, __extension__ __PRETTY_FUNCTION__)); | |||
6773 | ||||
6774 | // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves. | |||
6775 | // Should the count have wrapped while it was being read, we need to try | |||
6776 | // again. | |||
6777 | // ... | |||
6778 | // read: | |||
6779 | // rdcycleh x3 # load high word of cycle | |||
6780 | // rdcycle x2 # load low word of cycle | |||
6781 | // rdcycleh x4 # load high word of cycle | |||
6782 | // bne x3, x4, read # check if high word reads match, otherwise try again | |||
6783 | // ... | |||
6784 | ||||
6785 | MachineFunction &MF = *BB->getParent(); | |||
6786 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); | |||
6787 | MachineFunction::iterator It = ++BB->getIterator(); | |||
6788 | ||||
6789 | MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB); | |||
6790 | MF.insert(It, LoopMBB); | |||
6791 | ||||
6792 | MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB); | |||
6793 | MF.insert(It, DoneMBB); | |||
6794 | ||||
6795 | // Transfer the remainder of BB and its successor edges to DoneMBB. | |||
6796 | DoneMBB->splice(DoneMBB->begin(), BB, | |||
6797 | std::next(MachineBasicBlock::iterator(MI)), BB->end()); | |||
6798 | DoneMBB->transferSuccessorsAndUpdatePHIs(BB); | |||
6799 | ||||
6800 | BB->addSuccessor(LoopMBB); | |||
6801 | ||||
6802 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); | |||
6803 | Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); | |||
6804 | Register LoReg = MI.getOperand(0).getReg(); | |||
6805 | Register HiReg = MI.getOperand(1).getReg(); | |||
6806 | DebugLoc DL = MI.getDebugLoc(); | |||
6807 | ||||
6808 | const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); | |||
6809 | BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg) | |||
6810 | .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) | |||
6811 | .addReg(RISCV::X0); | |||
6812 | BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg) | |||
6813 | .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding) | |||
6814 | .addReg(RISCV::X0); | |||
6815 | BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg) | |||
6816 | .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) | |||
6817 | .addReg(RISCV::X0); | |||
6818 | ||||
6819 | BuildMI(LoopMBB, DL, TII->get(RISCV::BNE)) | |||
6820 | .addReg(HiReg) | |||
6821 | .addReg(ReadAgainReg) | |||
6822 | .addMBB(LoopMBB); | |||
6823 | ||||
6824 | LoopMBB->addSuccessor(LoopMBB); | |||
6825 | LoopMBB->addSuccessor(DoneMBB); | |||
6826 | ||||
6827 | MI.eraseFromParent(); | |||
6828 | ||||
6829 | return DoneMBB; | |||
6830 | } | |||
6831 | ||||
6832 | static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI, | |||
6833 | MachineBasicBlock *BB) { | |||
6834 | assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction")(static_cast <bool> (MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction") ? void (0) : __assert_fail ("MI.getOpcode() == RISCV::SplitF64Pseudo && \"Unexpected instruction\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6834, __extension__ __PRETTY_FUNCTION__)); | |||
6835 | ||||
6836 | MachineFunction &MF = *BB->getParent(); | |||
6837 | DebugLoc DL = MI.getDebugLoc(); | |||
6838 | const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); | |||
6839 | const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); | |||
6840 | Register LoReg = MI.getOperand(0).getReg(); | |||
6841 | Register HiReg = MI.getOperand(1).getReg(); | |||
6842 | Register SrcReg = MI.getOperand(2).getReg(); | |||
6843 | const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass; | |||
6844 | int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); | |||
6845 | ||||
6846 | TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC, | |||
6847 | RI); | |||
6848 | MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); | |||
6849 | MachineMemOperand *MMOLo = | |||
6850 | MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8)); | |||
6851 | MachineMemOperand *MMOHi = MF.getMachineMemOperand( | |||
6852 | MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8)); | |||
6853 | BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg) | |||
6854 | .addFrameIndex(FI) | |||
6855 | .addImm(0) | |||
6856 | .addMemOperand(MMOLo); | |||
6857 | BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg) | |||
6858 | .addFrameIndex(FI) | |||
6859 | .addImm(4) | |||
6860 | .addMemOperand(MMOHi); | |||
6861 | MI.eraseFromParent(); // The pseudo instruction is gone now. | |||
6862 | return BB; | |||
6863 | } | |||
6864 | ||||
6865 | static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI, | |||
6866 | MachineBasicBlock *BB) { | |||
6867 | assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&(static_cast <bool> (MI.getOpcode() == RISCV::BuildPairF64Pseudo && "Unexpected instruction") ? void (0) : __assert_fail ("MI.getOpcode() == RISCV::BuildPairF64Pseudo && \"Unexpected instruction\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6868, __extension__ __PRETTY_FUNCTION__)) | |||
6868 | "Unexpected instruction")(static_cast <bool> (MI.getOpcode() == RISCV::BuildPairF64Pseudo && "Unexpected instruction") ? void (0) : __assert_fail ("MI.getOpcode() == RISCV::BuildPairF64Pseudo && \"Unexpected instruction\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 6868, __extension__ __PRETTY_FUNCTION__)); | |||
6869 | ||||
6870 | MachineFunction &MF = *BB->getParent(); | |||
6871 | DebugLoc DL = MI.getDebugLoc(); | |||
6872 | const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); | |||
6873 | const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); | |||
6874 | Register DstReg = MI.getOperand(0).getReg(); | |||
6875 | Register LoReg = MI.getOperand(1).getReg(); | |||
6876 | Register HiReg = MI.getOperand(2).getReg(); | |||
6877 | const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass; | |||
6878 | int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); | |||
6879 | ||||
6880 | MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); | |||
6881 | MachineMemOperand *MMOLo = | |||
6882 | MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8)); | |||
6883 | MachineMemOperand *MMOHi = MF.getMachineMemOperand( | |||
6884 | MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8)); | |||
6885 | BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) | |||
6886 | .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill())) | |||
6887 | .addFrameIndex(FI) | |||
6888 | .addImm(0) | |||
6889 | .addMemOperand(MMOLo); | |||
6890 | BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) | |||
6891 | .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill())) | |||
6892 | .addFrameIndex(FI) | |||
6893 | .addImm(4) | |||
6894 | .addMemOperand(MMOHi); | |||
6895 | TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI); | |||
6896 | MI.eraseFromParent(); // The pseudo instruction is gone now. | |||
6897 | return BB; | |||
6898 | } | |||
6899 | ||||
6900 | static bool isSelectPseudo(MachineInstr &MI) { | |||
6901 | switch (MI.getOpcode()) { | |||
6902 | default: | |||
6903 | return false; | |||
6904 | case RISCV::Select_GPR_Using_CC_GPR: | |||
6905 | case RISCV::Select_FPR16_Using_CC_GPR: | |||
6906 | case RISCV::Select_FPR32_Using_CC_GPR: | |||
6907 | case RISCV::Select_FPR64_Using_CC_GPR: | |||
6908 | return true; | |||
6909 | } | |||
6910 | } | |||
6911 | ||||
6912 | static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, | |||
6913 | MachineBasicBlock *BB, | |||
6914 | const RISCVSubtarget &Subtarget) { | |||
6915 | // To "insert" Select_* instructions, we actually have to insert the triangle | |||
6916 | // control-flow pattern. The incoming instructions know the destination vreg | |||
6917 | // to set, the condition code register to branch on, the true/false values to | |||
6918 | // select between, and the condcode to use to select the appropriate branch. | |||
6919 | // | |||
6920 | // We produce the following control flow: | |||
6921 | // HeadMBB | |||
6922 | // | \ | |||
6923 | // | IfFalseMBB | |||
6924 | // | / | |||
6925 | // TailMBB | |||
6926 | // | |||
6927 | // When we find a sequence of selects we attempt to optimize their emission | |||
6928 | // by sharing the control flow. Currently we only handle cases where we have | |||
6929 | // multiple selects with the exact same condition (same LHS, RHS and CC). | |||
6930 | // The selects may be interleaved with other instructions if the other | |||
6931 | // instructions meet some requirements we deem safe: | |||
6932 | // - They are debug instructions. Otherwise, | |||
6933 | // - They do not have side-effects, do not access memory and their inputs do | |||
6934 | // not depend on the results of the select pseudo-instructions. | |||
6935 | // The TrueV/FalseV operands of the selects cannot depend on the result of | |||
6936 | // previous selects in the sequence. | |||
6937 | // These conditions could be further relaxed. See the X86 target for a | |||
6938 | // related approach and more information. | |||
6939 | Register LHS = MI.getOperand(1).getReg(); | |||
6940 | Register RHS = MI.getOperand(2).getReg(); | |||
6941 | auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm()); | |||
6942 | ||||
6943 | SmallVector<MachineInstr *, 4> SelectDebugValues; | |||
6944 | SmallSet<Register, 4> SelectDests; | |||
6945 | SelectDests.insert(MI.getOperand(0).getReg()); | |||
6946 | ||||
6947 | MachineInstr *LastSelectPseudo = &MI; | |||
6948 | ||||
6949 | for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI); | |||
6950 | SequenceMBBI != E; ++SequenceMBBI) { | |||
6951 | if (SequenceMBBI->isDebugInstr()) | |||
6952 | continue; | |||
6953 | else if (isSelectPseudo(*SequenceMBBI)) { | |||
6954 | if (SequenceMBBI->getOperand(1).getReg() != LHS || | |||
6955 | SequenceMBBI->getOperand(2).getReg() != RHS || | |||
6956 | SequenceMBBI->getOperand(3).getImm() != CC || | |||
6957 | SelectDests.count(SequenceMBBI->getOperand(4).getReg()) || | |||
6958 | SelectDests.count(SequenceMBBI->getOperand(5).getReg())) | |||
6959 | break; | |||
6960 | LastSelectPseudo = &*SequenceMBBI; | |||
6961 | SequenceMBBI->collectDebugValues(SelectDebugValues); | |||
6962 | SelectDests.insert(SequenceMBBI->getOperand(0).getReg()); | |||
6963 | } else { | |||
6964 | if (SequenceMBBI->hasUnmodeledSideEffects() || | |||
6965 | SequenceMBBI->mayLoadOrStore()) | |||
6966 | break; | |||
6967 | if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) { | |||
6968 | return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg()); | |||
6969 | })) | |||
6970 | break; | |||
6971 | } | |||
6972 | } | |||
6973 | ||||
6974 | const RISCVInstrInfo &TII = *Subtarget.getInstrInfo(); | |||
6975 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); | |||
6976 | DebugLoc DL = MI.getDebugLoc(); | |||
6977 | MachineFunction::iterator I = ++BB->getIterator(); | |||
6978 | ||||
6979 | MachineBasicBlock *HeadMBB = BB; | |||
6980 | MachineFunction *F = BB->getParent(); | |||
6981 | MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB); | |||
6982 | MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB); | |||
6983 | ||||
6984 | F->insert(I, IfFalseMBB); | |||
6985 | F->insert(I, TailMBB); | |||
6986 | ||||
6987 | // Transfer debug instructions associated with the selects to TailMBB. | |||
6988 | for (MachineInstr *DebugInstr : SelectDebugValues) { | |||
6989 | TailMBB->push_back(DebugInstr->removeFromParent()); | |||
6990 | } | |||
6991 | ||||
6992 | // Move all instructions after the sequence to TailMBB. | |||
6993 | TailMBB->splice(TailMBB->end(), HeadMBB, | |||
6994 | std::next(LastSelectPseudo->getIterator()), HeadMBB->end()); | |||
6995 | // Update machine-CFG edges by transferring all successors of the current | |||
6996 | // block to the new block which will contain the Phi nodes for the selects. | |||
6997 | TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB); | |||
6998 | // Set the successors for HeadMBB. | |||
6999 | HeadMBB->addSuccessor(IfFalseMBB); | |||
7000 | HeadMBB->addSuccessor(TailMBB); | |||
7001 | ||||
7002 | // Insert appropriate branch. | |||
7003 | BuildMI(HeadMBB, DL, TII.getBrCond(CC)) | |||
7004 | .addReg(LHS) | |||
7005 | .addReg(RHS) | |||
7006 | .addMBB(TailMBB); | |||
7007 | ||||
7008 | // IfFalseMBB just falls through to TailMBB. | |||
7009 | IfFalseMBB->addSuccessor(TailMBB); | |||
7010 | ||||
7011 | // Create PHIs for all of the select pseudo-instructions. | |||
7012 | auto SelectMBBI = MI.getIterator(); | |||
7013 | auto SelectEnd = std::next(LastSelectPseudo->getIterator()); | |||
7014 | auto InsertionPoint = TailMBB->begin(); | |||
7015 | while (SelectMBBI != SelectEnd) { | |||
7016 | auto Next = std::next(SelectMBBI); | |||
7017 | if (isSelectPseudo(*SelectMBBI)) { | |||
7018 | // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ] | |||
7019 | BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(), | |||
7020 | TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg()) | |||
7021 | .addReg(SelectMBBI->getOperand(4).getReg()) | |||
7022 | .addMBB(HeadMBB) | |||
7023 | .addReg(SelectMBBI->getOperand(5).getReg()) | |||
7024 | .addMBB(IfFalseMBB); | |||
7025 | SelectMBBI->eraseFromParent(); | |||
7026 | } | |||
7027 | SelectMBBI = Next; | |||
7028 | } | |||
7029 | ||||
7030 | F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs); | |||
7031 | return TailMBB; | |||
7032 | } | |||
7033 | ||||
7034 | MachineBasicBlock * | |||
7035 | RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, | |||
7036 | MachineBasicBlock *BB) const { | |||
7037 | switch (MI.getOpcode()) { | |||
7038 | default: | |||
7039 | llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7039); | |||
7040 | case RISCV::ReadCycleWide: | |||
7041 | assert(!Subtarget.is64Bit() &&(static_cast <bool> (!Subtarget.is64Bit() && "ReadCycleWrite is only to be used on riscv32" ) ? void (0) : __assert_fail ("!Subtarget.is64Bit() && \"ReadCycleWrite is only to be used on riscv32\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7042, __extension__ __PRETTY_FUNCTION__)) | |||
7042 | "ReadCycleWrite is only to be used on riscv32")(static_cast <bool> (!Subtarget.is64Bit() && "ReadCycleWrite is only to be used on riscv32" ) ? void (0) : __assert_fail ("!Subtarget.is64Bit() && \"ReadCycleWrite is only to be used on riscv32\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7042, __extension__ __PRETTY_FUNCTION__)); | |||
7043 | return emitReadCycleWidePseudo(MI, BB); | |||
7044 | case RISCV::Select_GPR_Using_CC_GPR: | |||
7045 | case RISCV::Select_FPR16_Using_CC_GPR: | |||
7046 | case RISCV::Select_FPR32_Using_CC_GPR: | |||
7047 | case RISCV::Select_FPR64_Using_CC_GPR: | |||
7048 | return emitSelectPseudo(MI, BB, Subtarget); | |||
7049 | case RISCV::BuildPairF64Pseudo: | |||
7050 | return emitBuildPairF64Pseudo(MI, BB); | |||
7051 | case RISCV::SplitF64Pseudo: | |||
7052 | return emitSplitF64Pseudo(MI, BB); | |||
7053 | } | |||
7054 | } | |||
7055 | ||||
7056 | // Calling Convention Implementation. | |||
7057 | // The expectations for frontend ABI lowering vary from target to target. | |||
7058 | // Ideally, an LLVM frontend would be able to avoid worrying about many ABI | |||
7059 | // details, but this is a longer term goal. For now, we simply try to keep the | |||
7060 | // role of the frontend as simple and well-defined as possible. The rules can | |||
7061 | // be summarised as: | |||
7062 | // * Never split up large scalar arguments. We handle them here. | |||
7063 | // * If a hardfloat calling convention is being used, and the struct may be | |||
7064 | // passed in a pair of registers (fp+fp, int+fp), and both registers are | |||
7065 | // available, then pass as two separate arguments. If either the GPRs or FPRs | |||
7066 | // are exhausted, then pass according to the rule below. | |||
7067 | // * If a struct could never be passed in registers or directly in a stack | |||
7068 | // slot (as it is larger than 2*XLEN and the floating point rules don't | |||
7069 | // apply), then pass it using a pointer with the byval attribute. | |||
7070 | // * If a struct is less than 2*XLEN, then coerce to either a two-element | |||
7071 | // word-sized array or a 2*XLEN scalar (depending on alignment). | |||
7072 | // * The frontend can determine whether a struct is returned by reference or | |||
7073 | // not based on its size and fields. If it will be returned by reference, the | |||
7074 | // frontend must modify the prototype so a pointer with the sret annotation is | |||
7075 | // passed as the first argument. This is not necessary for large scalar | |||
7076 | // returns. | |||
7077 | // * Struct return values and varargs should be coerced to structs containing | |||
7078 | // register-size fields in the same situations they would be for fixed | |||
7079 | // arguments. | |||
7080 | ||||
7081 | static const MCPhysReg ArgGPRs[] = { | |||
7082 | RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, | |||
7083 | RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17 | |||
7084 | }; | |||
7085 | static const MCPhysReg ArgFPR16s[] = { | |||
7086 | RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, | |||
7087 | RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H | |||
7088 | }; | |||
7089 | static const MCPhysReg ArgFPR32s[] = { | |||
7090 | RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, | |||
7091 | RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F | |||
7092 | }; | |||
7093 | static const MCPhysReg ArgFPR64s[] = { | |||
7094 | RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, | |||
7095 | RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D | |||
7096 | }; | |||
7097 | // This is an interim calling convention and it may be changed in the future. | |||
7098 | static const MCPhysReg ArgVRs[] = { | |||
7099 | RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13, | |||
7100 | RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, | |||
7101 | RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23}; | |||
7102 | static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2, | |||
7103 | RISCV::V14M2, RISCV::V16M2, RISCV::V18M2, | |||
7104 | RISCV::V20M2, RISCV::V22M2}; | |||
7105 | static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4, | |||
7106 | RISCV::V20M4}; | |||
7107 | static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8}; | |||
7108 | ||||
7109 | // Pass a 2*XLEN argument that has been split into two XLEN values through | |||
7110 | // registers or the stack as necessary. | |||
7111 | static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, | |||
7112 | ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, | |||
7113 | MVT ValVT2, MVT LocVT2, | |||
7114 | ISD::ArgFlagsTy ArgFlags2) { | |||
7115 | unsigned XLenInBytes = XLen / 8; | |||
7116 | if (Register Reg = State.AllocateReg(ArgGPRs)) { | |||
7117 | // At least one half can be passed via register. | |||
7118 | State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, | |||
7119 | VA1.getLocVT(), CCValAssign::Full)); | |||
7120 | } else { | |||
7121 | // Both halves must be passed on the stack, with proper alignment. | |||
7122 | Align StackAlign = | |||
7123 | std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign()); | |||
7124 | State.addLoc( | |||
7125 | CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), | |||
7126 | State.AllocateStack(XLenInBytes, StackAlign), | |||
7127 | VA1.getLocVT(), CCValAssign::Full)); | |||
7128 | State.addLoc(CCValAssign::getMem( | |||
7129 | ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), | |||
7130 | LocVT2, CCValAssign::Full)); | |||
7131 | return false; | |||
7132 | } | |||
7133 | ||||
7134 | if (Register Reg = State.AllocateReg(ArgGPRs)) { | |||
7135 | // The second half can also be passed via register. | |||
7136 | State.addLoc( | |||
7137 | CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); | |||
7138 | } else { | |||
7139 | // The second half is passed via the stack, without additional alignment. | |||
7140 | State.addLoc(CCValAssign::getMem( | |||
7141 | ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), | |||
7142 | LocVT2, CCValAssign::Full)); | |||
7143 | } | |||
7144 | ||||
7145 | return false; | |||
7146 | } | |||
7147 | ||||
7148 | static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo, | |||
7149 | Optional<unsigned> FirstMaskArgument, | |||
7150 | CCState &State, const RISCVTargetLowering &TLI) { | |||
7151 | const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT); | |||
7152 | if (RC == &RISCV::VRRegClass) { | |||
7153 | // Assign the first mask argument to V0. | |||
7154 | // This is an interim calling convention and it may be changed in the | |||
7155 | // future. | |||
7156 | if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue()) | |||
7157 | return State.AllocateReg(RISCV::V0); | |||
7158 | return State.AllocateReg(ArgVRs); | |||
7159 | } | |||
7160 | if (RC == &RISCV::VRM2RegClass) | |||
7161 | return State.AllocateReg(ArgVRM2s); | |||
7162 | if (RC == &RISCV::VRM4RegClass) | |||
7163 | return State.AllocateReg(ArgVRM4s); | |||
7164 | if (RC == &RISCV::VRM8RegClass) | |||
7165 | return State.AllocateReg(ArgVRM8s); | |||
7166 | llvm_unreachable("Unhandled register class for ValueType")::llvm::llvm_unreachable_internal("Unhandled register class for ValueType" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7166); | |||
7167 | } | |||
7168 | ||||
7169 | // Implements the RISC-V calling convention. Returns true upon failure. | |||
7170 | static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, | |||
7171 | MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, | |||
7172 | ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, | |||
7173 | bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, | |||
7174 | Optional<unsigned> FirstMaskArgument) { | |||
7175 | unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); | |||
7176 | assert(XLen == 32 || XLen == 64)(static_cast <bool> (XLen == 32 || XLen == 64) ? void ( 0) : __assert_fail ("XLen == 32 || XLen == 64", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7176, __extension__ __PRETTY_FUNCTION__)); | |||
7177 | MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; | |||
7178 | ||||
7179 | // Any return value split in to more than two values can't be returned | |||
7180 | // directly. Vectors are returned via the available vector registers. | |||
7181 | if (!LocVT.isVector() && IsRet && ValNo > 1) | |||
7182 | return true; | |||
7183 | ||||
7184 | // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a | |||
7185 | // variadic argument, or if no F16/F32 argument registers are available. | |||
7186 | bool UseGPRForF16_F32 = true; | |||
7187 | // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a | |||
7188 | // variadic argument, or if no F64 argument registers are available. | |||
7189 | bool UseGPRForF64 = true; | |||
7190 | ||||
7191 | switch (ABI) { | |||
7192 | default: | |||
7193 | llvm_unreachable("Unexpected ABI")::llvm::llvm_unreachable_internal("Unexpected ABI", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7193); | |||
7194 | case RISCVABI::ABI_ILP32: | |||
7195 | case RISCVABI::ABI_LP64: | |||
7196 | break; | |||
7197 | case RISCVABI::ABI_ILP32F: | |||
7198 | case RISCVABI::ABI_LP64F: | |||
7199 | UseGPRForF16_F32 = !IsFixed; | |||
7200 | break; | |||
7201 | case RISCVABI::ABI_ILP32D: | |||
7202 | case RISCVABI::ABI_LP64D: | |||
7203 | UseGPRForF16_F32 = !IsFixed; | |||
7204 | UseGPRForF64 = !IsFixed; | |||
7205 | break; | |||
7206 | } | |||
7207 | ||||
7208 | // FPR16, FPR32, and FPR64 alias each other. | |||
7209 | if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) { | |||
7210 | UseGPRForF16_F32 = true; | |||
7211 | UseGPRForF64 = true; | |||
7212 | } | |||
7213 | ||||
7214 | // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and | |||
7215 | // similar local variables rather than directly checking against the target | |||
7216 | // ABI. | |||
7217 | ||||
7218 | if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) { | |||
7219 | LocVT = XLenVT; | |||
7220 | LocInfo = CCValAssign::BCvt; | |||
7221 | } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) { | |||
7222 | LocVT = MVT::i64; | |||
7223 | LocInfo = CCValAssign::BCvt; | |||
7224 | } | |||
7225 | ||||
7226 | // If this is a variadic argument, the RISC-V calling convention requires | |||
7227 | // that it is assigned an 'even' or 'aligned' register if it has 8-byte | |||
7228 | // alignment (RV32) or 16-byte alignment (RV64). An aligned register should | |||
7229 | // be used regardless of whether the original argument was split during | |||
7230 | // legalisation or not. The argument will not be passed by registers if the | |||
7231 | // original type is larger than 2*XLEN, so the register alignment rule does | |||
7232 | // not apply. | |||
7233 | unsigned TwoXLenInBytes = (2 * XLen) / 8; | |||
7234 | if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes && | |||
7235 | DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) { | |||
7236 | unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); | |||
7237 | // Skip 'odd' register if necessary. | |||
7238 | if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1) | |||
7239 | State.AllocateReg(ArgGPRs); | |||
7240 | } | |||
7241 | ||||
7242 | SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs(); | |||
7243 | SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags = | |||
7244 | State.getPendingArgFlags(); | |||
7245 | ||||
7246 | assert(PendingLocs.size() == PendingArgFlags.size() &&(static_cast <bool> (PendingLocs.size() == PendingArgFlags .size() && "PendingLocs and PendingArgFlags out of sync" ) ? void (0) : __assert_fail ("PendingLocs.size() == PendingArgFlags.size() && \"PendingLocs and PendingArgFlags out of sync\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7247, __extension__ __PRETTY_FUNCTION__)) | |||
7247 | "PendingLocs and PendingArgFlags out of sync")(static_cast <bool> (PendingLocs.size() == PendingArgFlags .size() && "PendingLocs and PendingArgFlags out of sync" ) ? void (0) : __assert_fail ("PendingLocs.size() == PendingArgFlags.size() && \"PendingLocs and PendingArgFlags out of sync\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7247, __extension__ __PRETTY_FUNCTION__)); | |||
7248 | ||||
7249 | // Handle passing f64 on RV32D with a soft float ABI or when floating point | |||
7250 | // registers are exhausted. | |||
7251 | if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) { | |||
7252 | assert(!ArgFlags.isSplit() && PendingLocs.empty() &&(static_cast <bool> (!ArgFlags.isSplit() && PendingLocs .empty() && "Can't lower f64 if it is split") ? void ( 0) : __assert_fail ("!ArgFlags.isSplit() && PendingLocs.empty() && \"Can't lower f64 if it is split\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7253, __extension__ __PRETTY_FUNCTION__)) | |||
7253 | "Can't lower f64 if it is split")(static_cast <bool> (!ArgFlags.isSplit() && PendingLocs .empty() && "Can't lower f64 if it is split") ? void ( 0) : __assert_fail ("!ArgFlags.isSplit() && PendingLocs.empty() && \"Can't lower f64 if it is split\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7253, __extension__ __PRETTY_FUNCTION__)); | |||
7254 | // Depending on available argument GPRS, f64 may be passed in a pair of | |||
7255 | // GPRs, split between a GPR and the stack, or passed completely on the | |||
7256 | // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these | |||
7257 | // cases. | |||
7258 | Register Reg = State.AllocateReg(ArgGPRs); | |||
7259 | LocVT = MVT::i32; | |||
7260 | if (!Reg) { | |||
7261 | unsigned StackOffset = State.AllocateStack(8, Align(8)); | |||
7262 | State.addLoc( | |||
7263 | CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); | |||
7264 | return false; | |||
7265 | } | |||
7266 | if (!State.AllocateReg(ArgGPRs)) | |||
7267 | State.AllocateStack(4, Align(4)); | |||
7268 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
7269 | return false; | |||
7270 | } | |||
7271 | ||||
7272 | // Fixed-length vectors are located in the corresponding scalable-vector | |||
7273 | // container types. | |||
7274 | if (ValVT.isFixedLengthVector()) | |||
7275 | LocVT = TLI.getContainerForFixedLengthVector(LocVT); | |||
7276 | ||||
7277 | // Split arguments might be passed indirectly, so keep track of the pending | |||
7278 | // values. Split vectors are passed via a mix of registers and indirectly, so | |||
7279 | // treat them as we would any other argument. | |||
7280 | if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) { | |||
7281 | LocVT = XLenVT; | |||
7282 | LocInfo = CCValAssign::Indirect; | |||
7283 | PendingLocs.push_back( | |||
7284 | CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); | |||
7285 | PendingArgFlags.push_back(ArgFlags); | |||
7286 | if (!ArgFlags.isSplitEnd()) { | |||
7287 | return false; | |||
7288 | } | |||
7289 | } | |||
7290 | ||||
7291 | // If the split argument only had two elements, it should be passed directly | |||
7292 | // in registers or on the stack. | |||
7293 | if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() && | |||
7294 | PendingLocs.size() <= 2) { | |||
7295 | assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()")(static_cast <bool> (PendingLocs.size() == 2 && "Unexpected PendingLocs.size()") ? void (0) : __assert_fail ( "PendingLocs.size() == 2 && \"Unexpected PendingLocs.size()\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7295, __extension__ __PRETTY_FUNCTION__)); | |||
7296 | // Apply the normal calling convention rules to the first half of the | |||
7297 | // split argument. | |||
7298 | CCValAssign VA = PendingLocs[0]; | |||
7299 | ISD::ArgFlagsTy AF = PendingArgFlags[0]; | |||
7300 | PendingLocs.clear(); | |||
7301 | PendingArgFlags.clear(); | |||
7302 | return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT, | |||
7303 | ArgFlags); | |||
7304 | } | |||
7305 | ||||
7306 | // Allocate to a register if possible, or else a stack slot. | |||
7307 | Register Reg; | |||
7308 | unsigned StoreSizeBytes = XLen / 8; | |||
7309 | Align StackAlign = Align(XLen / 8); | |||
7310 | ||||
7311 | if (ValVT == MVT::f16 && !UseGPRForF16_F32) | |||
7312 | Reg = State.AllocateReg(ArgFPR16s); | |||
7313 | else if (ValVT == MVT::f32 && !UseGPRForF16_F32) | |||
7314 | Reg = State.AllocateReg(ArgFPR32s); | |||
7315 | else if (ValVT == MVT::f64 && !UseGPRForF64) | |||
7316 | Reg = State.AllocateReg(ArgFPR64s); | |||
7317 | else if (ValVT.isVector()) { | |||
7318 | Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI); | |||
7319 | if (!Reg) { | |||
7320 | // For return values, the vector must be passed fully via registers or | |||
7321 | // via the stack. | |||
7322 | // FIXME: The proposed vector ABI only mandates v8-v15 for return values, | |||
7323 | // but we're using all of them. | |||
7324 | if (IsRet) | |||
7325 | return true; | |||
7326 | // Try using a GPR to pass the address | |||
7327 | if ((Reg = State.AllocateReg(ArgGPRs))) { | |||
7328 | LocVT = XLenVT; | |||
7329 | LocInfo = CCValAssign::Indirect; | |||
7330 | } else if (ValVT.isScalableVector()) { | |||
7331 | report_fatal_error("Unable to pass scalable vector types on the stack"); | |||
7332 | } else { | |||
7333 | // Pass fixed-length vectors on the stack. | |||
7334 | LocVT = ValVT; | |||
7335 | StoreSizeBytes = ValVT.getStoreSize(); | |||
7336 | // Align vectors to their element sizes, being careful for vXi1 | |||
7337 | // vectors. | |||
7338 | StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne(); | |||
7339 | } | |||
7340 | } | |||
7341 | } else { | |||
7342 | Reg = State.AllocateReg(ArgGPRs); | |||
7343 | } | |||
7344 | ||||
7345 | unsigned StackOffset = | |||
7346 | Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign); | |||
7347 | ||||
7348 | // If we reach this point and PendingLocs is non-empty, we must be at the | |||
7349 | // end of a split argument that must be passed indirectly. | |||
7350 | if (!PendingLocs.empty()) { | |||
7351 | assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()")(static_cast <bool> (ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()" ) ? void (0) : __assert_fail ("ArgFlags.isSplitEnd() && \"Expected ArgFlags.isSplitEnd()\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7351, __extension__ __PRETTY_FUNCTION__)); | |||
7352 | assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()")(static_cast <bool> (PendingLocs.size() > 2 && "Unexpected PendingLocs.size()") ? void (0) : __assert_fail ( "PendingLocs.size() > 2 && \"Unexpected PendingLocs.size()\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7352, __extension__ __PRETTY_FUNCTION__)); | |||
7353 | ||||
7354 | for (auto &It : PendingLocs) { | |||
7355 | if (Reg) | |||
7356 | It.convertToReg(Reg); | |||
7357 | else | |||
7358 | It.convertToMem(StackOffset); | |||
7359 | State.addLoc(It); | |||
7360 | } | |||
7361 | PendingLocs.clear(); | |||
7362 | PendingArgFlags.clear(); | |||
7363 | return false; | |||
7364 | } | |||
7365 | ||||
7366 | assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||(static_cast <bool> ((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) && "Expected an XLenVT or vector types at this stage" ) ? void (0) : __assert_fail ("(!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) && \"Expected an XLenVT or vector types at this stage\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7368, __extension__ __PRETTY_FUNCTION__)) | |||
7367 | (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) &&(static_cast <bool> ((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) && "Expected an XLenVT or vector types at this stage" ) ? void (0) : __assert_fail ("(!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) && \"Expected an XLenVT or vector types at this stage\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7368, __extension__ __PRETTY_FUNCTION__)) | |||
7368 | "Expected an XLenVT or vector types at this stage")(static_cast <bool> ((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) && "Expected an XLenVT or vector types at this stage" ) ? void (0) : __assert_fail ("(!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) && \"Expected an XLenVT or vector types at this stage\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7368, __extension__ __PRETTY_FUNCTION__)); | |||
7369 | ||||
7370 | if (Reg) { | |||
7371 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
7372 | return false; | |||
7373 | } | |||
7374 | ||||
7375 | // When a floating-point value is passed on the stack, no bit-conversion is | |||
7376 | // needed. | |||
7377 | if (ValVT.isFloatingPoint()) { | |||
7378 | LocVT = ValVT; | |||
7379 | LocInfo = CCValAssign::Full; | |||
7380 | } | |||
7381 | State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); | |||
7382 | return false; | |||
7383 | } | |||
7384 | ||||
7385 | template <typename ArgTy> | |||
7386 | static Optional<unsigned> preAssignMask(const ArgTy &Args) { | |||
7387 | for (const auto &ArgIdx : enumerate(Args)) { | |||
7388 | MVT ArgVT = ArgIdx.value().VT; | |||
7389 | if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1) | |||
7390 | return ArgIdx.index(); | |||
7391 | } | |||
7392 | return None; | |||
7393 | } | |||
7394 | ||||
7395 | void RISCVTargetLowering::analyzeInputArgs( | |||
7396 | MachineFunction &MF, CCState &CCInfo, | |||
7397 | const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet, | |||
7398 | RISCVCCAssignFn Fn) const { | |||
7399 | unsigned NumArgs = Ins.size(); | |||
7400 | FunctionType *FType = MF.getFunction().getFunctionType(); | |||
7401 | ||||
7402 | Optional<unsigned> FirstMaskArgument; | |||
7403 | if (Subtarget.hasStdExtV()) | |||
7404 | FirstMaskArgument = preAssignMask(Ins); | |||
7405 | ||||
7406 | for (unsigned i = 0; i != NumArgs; ++i) { | |||
7407 | MVT ArgVT = Ins[i].VT; | |||
7408 | ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; | |||
7409 | ||||
7410 | Type *ArgTy = nullptr; | |||
7411 | if (IsRet) | |||
7412 | ArgTy = FType->getReturnType(); | |||
7413 | else if (Ins[i].isOrigArg()) | |||
7414 | ArgTy = FType->getParamType(Ins[i].getOrigArgIndex()); | |||
7415 | ||||
7416 | RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); | |||
7417 | if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, | |||
7418 | ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this, | |||
7419 | FirstMaskArgument)) { | |||
7420 | LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("riscv-lower")) { dbgs() << "InputArg #" << i << " has unhandled type " << EVT(ArgVT).getEVTString() << '\n'; } } while (false) | |||
7421 | << EVT(ArgVT).getEVTString() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("riscv-lower")) { dbgs() << "InputArg #" << i << " has unhandled type " << EVT(ArgVT).getEVTString() << '\n'; } } while (false); | |||
7422 | llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7422); | |||
7423 | } | |||
7424 | } | |||
7425 | } | |||
7426 | ||||
7427 | void RISCVTargetLowering::analyzeOutputArgs( | |||
7428 | MachineFunction &MF, CCState &CCInfo, | |||
7429 | const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet, | |||
7430 | CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const { | |||
7431 | unsigned NumArgs = Outs.size(); | |||
7432 | ||||
7433 | Optional<unsigned> FirstMaskArgument; | |||
7434 | if (Subtarget.hasStdExtV()) | |||
7435 | FirstMaskArgument = preAssignMask(Outs); | |||
7436 | ||||
7437 | for (unsigned i = 0; i != NumArgs; i++) { | |||
7438 | MVT ArgVT = Outs[i].VT; | |||
7439 | ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; | |||
7440 | Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr; | |||
7441 | ||||
7442 | RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); | |||
7443 | if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, | |||
7444 | ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this, | |||
7445 | FirstMaskArgument)) { | |||
7446 | LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("riscv-lower")) { dbgs() << "OutputArg #" << i << " has unhandled type " << EVT(ArgVT).getEVTString() << "\n"; } } while (false) | |||
7447 | << EVT(ArgVT).getEVTString() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("riscv-lower")) { dbgs() << "OutputArg #" << i << " has unhandled type " << EVT(ArgVT).getEVTString() << "\n"; } } while (false); | |||
7448 | llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7448); | |||
7449 | } | |||
7450 | } | |||
7451 | } | |||
7452 | ||||
7453 | // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect | |||
7454 | // values. | |||
7455 | static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, | |||
7456 | const CCValAssign &VA, const SDLoc &DL, | |||
7457 | const RISCVSubtarget &Subtarget) { | |||
7458 | switch (VA.getLocInfo()) { | |||
7459 | default: | |||
7460 | llvm_unreachable("Unexpected CCValAssign::LocInfo")::llvm::llvm_unreachable_internal("Unexpected CCValAssign::LocInfo" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7460); | |||
7461 | case CCValAssign::Full: | |||
7462 | if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector()) | |||
7463 | Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget); | |||
7464 | break; | |||
7465 | case CCValAssign::BCvt: | |||
7466 | if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) | |||
7467 | Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val); | |||
7468 | else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) | |||
7469 | Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val); | |||
7470 | else | |||
7471 | Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); | |||
7472 | break; | |||
7473 | } | |||
7474 | return Val; | |||
7475 | } | |||
7476 | ||||
7477 | // The caller is responsible for loading the full value if the argument is | |||
7478 | // passed with CCValAssign::Indirect. | |||
7479 | static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, | |||
7480 | const CCValAssign &VA, const SDLoc &DL, | |||
7481 | const RISCVTargetLowering &TLI) { | |||
7482 | MachineFunction &MF = DAG.getMachineFunction(); | |||
7483 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); | |||
7484 | EVT LocVT = VA.getLocVT(); | |||
7485 | SDValue Val; | |||
7486 | const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT()); | |||
7487 | Register VReg = RegInfo.createVirtualRegister(RC); | |||
7488 | RegInfo.addLiveIn(VA.getLocReg(), VReg); | |||
7489 | Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); | |||
7490 | ||||
7491 | if (VA.getLocInfo() == CCValAssign::Indirect) | |||
7492 | return Val; | |||
7493 | ||||
7494 | return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget()); | |||
7495 | } | |||
7496 | ||||
7497 | static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, | |||
7498 | const CCValAssign &VA, const SDLoc &DL, | |||
7499 | const RISCVSubtarget &Subtarget) { | |||
7500 | EVT LocVT = VA.getLocVT(); | |||
7501 | ||||
7502 | switch (VA.getLocInfo()) { | |||
7503 | default: | |||
7504 | llvm_unreachable("Unexpected CCValAssign::LocInfo")::llvm::llvm_unreachable_internal("Unexpected CCValAssign::LocInfo" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7504); | |||
7505 | case CCValAssign::Full: | |||
7506 | if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector()) | |||
7507 | Val = convertToScalableVector(LocVT, Val, DAG, Subtarget); | |||
7508 | break; | |||
7509 | case CCValAssign::BCvt: | |||
7510 | if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) | |||
7511 | Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val); | |||
7512 | else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) | |||
7513 | Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val); | |||
7514 | else | |||
7515 | Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val); | |||
7516 | break; | |||
7517 | } | |||
7518 | return Val; | |||
7519 | } | |||
7520 | ||||
7521 | // The caller is responsible for loading the full value if the argument is | |||
7522 | // passed with CCValAssign::Indirect. | |||
7523 | static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, | |||
7524 | const CCValAssign &VA, const SDLoc &DL) { | |||
7525 | MachineFunction &MF = DAG.getMachineFunction(); | |||
7526 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
7527 | EVT LocVT = VA.getLocVT(); | |||
7528 | EVT ValVT = VA.getValVT(); | |||
7529 | EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0)); | |||
7530 | int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(), | |||
7531 | /*Immutable=*/true); | |||
7532 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); | |||
7533 | SDValue Val; | |||
7534 | ||||
7535 | ISD::LoadExtType ExtType; | |||
7536 | switch (VA.getLocInfo()) { | |||
7537 | default: | |||
7538 | llvm_unreachable("Unexpected CCValAssign::LocInfo")::llvm::llvm_unreachable_internal("Unexpected CCValAssign::LocInfo" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7538); | |||
7539 | case CCValAssign::Full: | |||
7540 | case CCValAssign::Indirect: | |||
7541 | case CCValAssign::BCvt: | |||
7542 | ExtType = ISD::NON_EXTLOAD; | |||
7543 | break; | |||
7544 | } | |||
7545 | Val = DAG.getExtLoad( | |||
7546 | ExtType, DL, LocVT, Chain, FIN, | |||
7547 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); | |||
7548 | return Val; | |||
7549 | } | |||
7550 | ||||
7551 | static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, | |||
7552 | const CCValAssign &VA, const SDLoc &DL) { | |||
7553 | assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&(static_cast <bool> (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && "Unexpected VA") ? void (0) : __assert_fail ("VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && \"Unexpected VA\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7554, __extension__ __PRETTY_FUNCTION__)) | |||
7554 | "Unexpected VA")(static_cast <bool> (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && "Unexpected VA") ? void (0) : __assert_fail ("VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && \"Unexpected VA\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7554, __extension__ __PRETTY_FUNCTION__)); | |||
7555 | MachineFunction &MF = DAG.getMachineFunction(); | |||
7556 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
7557 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); | |||
7558 | ||||
7559 | if (VA.isMemLoc()) { | |||
7560 | // f64 is passed on the stack. | |||
7561 | int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true); | |||
7562 | SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); | |||
7563 | return DAG.getLoad(MVT::f64, DL, Chain, FIN, | |||
7564 | MachinePointerInfo::getFixedStack(MF, FI)); | |||
7565 | } | |||
7566 | ||||
7567 | assert(VA.isRegLoc() && "Expected register VA assignment")(static_cast <bool> (VA.isRegLoc() && "Expected register VA assignment" ) ? void (0) : __assert_fail ("VA.isRegLoc() && \"Expected register VA assignment\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7567, __extension__ __PRETTY_FUNCTION__)); | |||
7568 | ||||
7569 | Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); | |||
7570 | RegInfo.addLiveIn(VA.getLocReg(), LoVReg); | |||
7571 | SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32); | |||
7572 | SDValue Hi; | |||
7573 | if (VA.getLocReg() == RISCV::X17) { | |||
7574 | // Second half of f64 is passed on the stack. | |||
7575 | int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true); | |||
7576 | SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); | |||
7577 | Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN, | |||
7578 | MachinePointerInfo::getFixedStack(MF, FI)); | |||
7579 | } else { | |||
7580 | // Second half of f64 is passed in another GPR. | |||
7581 | Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); | |||
7582 | RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg); | |||
7583 | Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32); | |||
7584 | } | |||
7585 | return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi); | |||
7586 | } | |||
7587 | ||||
7588 | // FastCC has less than 1% performance improvement for some particular | |||
7589 | // benchmark. But theoretically, it may has benenfit for some cases. | |||
7590 | static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, | |||
7591 | unsigned ValNo, MVT ValVT, MVT LocVT, | |||
7592 | CCValAssign::LocInfo LocInfo, | |||
7593 | ISD::ArgFlagsTy ArgFlags, CCState &State, | |||
7594 | bool IsFixed, bool IsRet, Type *OrigTy, | |||
7595 | const RISCVTargetLowering &TLI, | |||
7596 | Optional<unsigned> FirstMaskArgument) { | |||
7597 | ||||
7598 | // X5 and X6 might be used for save-restore libcall. | |||
7599 | static const MCPhysReg GPRList[] = { | |||
7600 | RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, | |||
7601 | RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28, | |||
7602 | RISCV::X29, RISCV::X30, RISCV::X31}; | |||
7603 | ||||
7604 | if (LocVT == MVT::i32 || LocVT == MVT::i64) { | |||
7605 | if (unsigned Reg = State.AllocateReg(GPRList)) { | |||
7606 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
7607 | return false; | |||
7608 | } | |||
7609 | } | |||
7610 | ||||
7611 | if (LocVT == MVT::f16) { | |||
7612 | static const MCPhysReg FPR16List[] = { | |||
7613 | RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H, | |||
7614 | RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H, | |||
7615 | RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H, | |||
7616 | RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H}; | |||
7617 | if (unsigned Reg = State.AllocateReg(FPR16List)) { | |||
7618 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
7619 | return false; | |||
7620 | } | |||
7621 | } | |||
7622 | ||||
7623 | if (LocVT == MVT::f32) { | |||
7624 | static const MCPhysReg FPR32List[] = { | |||
7625 | RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F, | |||
7626 | RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F, | |||
7627 | RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F, | |||
7628 | RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F}; | |||
7629 | if (unsigned Reg = State.AllocateReg(FPR32List)) { | |||
7630 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
7631 | return false; | |||
7632 | } | |||
7633 | } | |||
7634 | ||||
7635 | if (LocVT == MVT::f64) { | |||
7636 | static const MCPhysReg FPR64List[] = { | |||
7637 | RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D, | |||
7638 | RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D, | |||
7639 | RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D, | |||
7640 | RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D}; | |||
7641 | if (unsigned Reg = State.AllocateReg(FPR64List)) { | |||
7642 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
7643 | return false; | |||
7644 | } | |||
7645 | } | |||
7646 | ||||
7647 | if (LocVT == MVT::i32 || LocVT == MVT::f32) { | |||
7648 | unsigned Offset4 = State.AllocateStack(4, Align(4)); | |||
7649 | State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo)); | |||
7650 | return false; | |||
7651 | } | |||
7652 | ||||
7653 | if (LocVT == MVT::i64 || LocVT == MVT::f64) { | |||
7654 | unsigned Offset5 = State.AllocateStack(8, Align(8)); | |||
7655 | State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo)); | |||
7656 | return false; | |||
7657 | } | |||
7658 | ||||
7659 | if (LocVT.isVector()) { | |||
7660 | if (unsigned Reg = | |||
7661 | allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) { | |||
7662 | // Fixed-length vectors are located in the corresponding scalable-vector | |||
7663 | // container types. | |||
7664 | if (ValVT.isFixedLengthVector()) | |||
7665 | LocVT = TLI.getContainerForFixedLengthVector(LocVT); | |||
7666 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
7667 | } else { | |||
7668 | // Try and pass the address via a "fast" GPR. | |||
7669 | if (unsigned GPRReg = State.AllocateReg(GPRList)) { | |||
7670 | LocInfo = CCValAssign::Indirect; | |||
7671 | LocVT = TLI.getSubtarget().getXLenVT(); | |||
7672 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo)); | |||
7673 | } else if (ValVT.isFixedLengthVector()) { | |||
7674 | auto StackAlign = | |||
7675 | MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne(); | |||
7676 | unsigned StackOffset = | |||
7677 | State.AllocateStack(ValVT.getStoreSize(), StackAlign); | |||
7678 | State.addLoc( | |||
7679 | CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); | |||
7680 | } else { | |||
7681 | // Can't pass scalable vectors on the stack. | |||
7682 | return true; | |||
7683 | } | |||
7684 | } | |||
7685 | ||||
7686 | return false; | |||
7687 | } | |||
7688 | ||||
7689 | return true; // CC didn't match. | |||
7690 | } | |||
7691 | ||||
7692 | static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, | |||
7693 | CCValAssign::LocInfo LocInfo, | |||
7694 | ISD::ArgFlagsTy ArgFlags, CCState &State) { | |||
7695 | ||||
7696 | if (LocVT == MVT::i32 || LocVT == MVT::i64) { | |||
7697 | // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim | |||
7698 | // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 | |||
7699 | static const MCPhysReg GPRList[] = { | |||
7700 | RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22, | |||
7701 | RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27}; | |||
7702 | if (unsigned Reg = State.AllocateReg(GPRList)) { | |||
7703 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
7704 | return false; | |||
7705 | } | |||
7706 | } | |||
7707 | ||||
7708 | if (LocVT == MVT::f32) { | |||
7709 | // Pass in STG registers: F1, ..., F6 | |||
7710 | // fs0 ... fs5 | |||
7711 | static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F, | |||
7712 | RISCV::F18_F, RISCV::F19_F, | |||
7713 | RISCV::F20_F, RISCV::F21_F}; | |||
7714 | if (unsigned Reg = State.AllocateReg(FPR32List)) { | |||
7715 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
7716 | return false; | |||
7717 | } | |||
7718 | } | |||
7719 | ||||
7720 | if (LocVT == MVT::f64) { | |||
7721 | // Pass in STG registers: D1, ..., D6 | |||
7722 | // fs6 ... fs11 | |||
7723 | static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D, | |||
7724 | RISCV::F24_D, RISCV::F25_D, | |||
7725 | RISCV::F26_D, RISCV::F27_D}; | |||
7726 | if (unsigned Reg = State.AllocateReg(FPR64List)) { | |||
7727 | State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); | |||
7728 | return false; | |||
7729 | } | |||
7730 | } | |||
7731 | ||||
7732 | report_fatal_error("No registers left in GHC calling convention"); | |||
7733 | return true; | |||
7734 | } | |||
7735 | ||||
7736 | // Transform physical registers into virtual registers. | |||
7737 | SDValue RISCVTargetLowering::LowerFormalArguments( | |||
7738 | SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, | |||
7739 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, | |||
7740 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { | |||
7741 | ||||
7742 | MachineFunction &MF = DAG.getMachineFunction(); | |||
7743 | ||||
7744 | switch (CallConv) { | |||
7745 | default: | |||
7746 | report_fatal_error("Unsupported calling convention"); | |||
7747 | case CallingConv::C: | |||
7748 | case CallingConv::Fast: | |||
7749 | break; | |||
7750 | case CallingConv::GHC: | |||
7751 | if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] || | |||
7752 | !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD]) | |||
7753 | report_fatal_error( | |||
7754 | "GHC calling convention requires the F and D instruction set extensions"); | |||
7755 | } | |||
7756 | ||||
7757 | const Function &Func = MF.getFunction(); | |||
7758 | if (Func.hasFnAttribute("interrupt")) { | |||
7759 | if (!Func.arg_empty()) | |||
7760 | report_fatal_error( | |||
7761 | "Functions with the interrupt attribute cannot have arguments!"); | |||
7762 | ||||
7763 | StringRef Kind = | |||
7764 | MF.getFunction().getFnAttribute("interrupt").getValueAsString(); | |||
7765 | ||||
7766 | if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine")) | |||
7767 | report_fatal_error( | |||
7768 | "Function interrupt attribute argument not supported!"); | |||
7769 | } | |||
7770 | ||||
7771 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
7772 | MVT XLenVT = Subtarget.getXLenVT(); | |||
7773 | unsigned XLenInBytes = Subtarget.getXLen() / 8; | |||
7774 | // Used with vargs to acumulate store chains. | |||
7775 | std::vector<SDValue> OutChains; | |||
7776 | ||||
7777 | // Assign locations to all of the incoming arguments. | |||
7778 | SmallVector<CCValAssign, 16> ArgLocs; | |||
7779 | CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); | |||
7780 | ||||
7781 | if (CallConv == CallingConv::GHC) | |||
7782 | CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC); | |||
7783 | else | |||
7784 | analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false, | |||
7785 | CallConv == CallingConv::Fast ? CC_RISCV_FastCC | |||
7786 | : CC_RISCV); | |||
7787 | ||||
7788 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { | |||
7789 | CCValAssign &VA = ArgLocs[i]; | |||
7790 | SDValue ArgValue; | |||
7791 | // Passing f64 on RV32D with a soft float ABI must be handled as a special | |||
7792 | // case. | |||
7793 | if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) | |||
7794 | ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL); | |||
7795 | else if (VA.isRegLoc()) | |||
7796 | ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this); | |||
7797 | else | |||
7798 | ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL); | |||
7799 | ||||
7800 | if (VA.getLocInfo() == CCValAssign::Indirect) { | |||
7801 | // If the original argument was split and passed by reference (e.g. i128 | |||
7802 | // on RV32), we need to load all parts of it here (using the same | |||
7803 | // address). Vectors may be partly split to registers and partly to the | |||
7804 | // stack, in which case the base address is partly offset and subsequent | |||
7805 | // stores are relative to that. | |||
7806 | InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, | |||
7807 | MachinePointerInfo())); | |||
7808 | unsigned ArgIndex = Ins[i].OrigArgIndex; | |||
7809 | unsigned ArgPartOffset = Ins[i].PartOffset; | |||
7810 | assert(VA.getValVT().isVector() || ArgPartOffset == 0)(static_cast <bool> (VA.getValVT().isVector() || ArgPartOffset == 0) ? void (0) : __assert_fail ("VA.getValVT().isVector() || ArgPartOffset == 0" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 7810, __extension__ __PRETTY_FUNCTION__)); | |||
7811 | while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) { | |||
7812 | CCValAssign &PartVA = ArgLocs[i + 1]; | |||
7813 | unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset; | |||
7814 | SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL); | |||
7815 | if (PartVA.getValVT().isScalableVector()) | |||
7816 | Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset); | |||
7817 | SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset); | |||
7818 | InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, | |||
7819 | MachinePointerInfo())); | |||
7820 | ++i; | |||
7821 | } | |||
7822 | continue; | |||
7823 | } | |||
7824 | InVals.push_back(ArgValue); | |||
7825 | } | |||
7826 | ||||
7827 | if (IsVarArg) { | |||
7828 | ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs); | |||
7829 | unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); | |||
7830 | const TargetRegisterClass *RC = &RISCV::GPRRegClass; | |||
7831 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
7832 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); | |||
7833 | RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>(); | |||
7834 | ||||
7835 | // Offset of the first variable argument from stack pointer, and size of | |||
7836 | // the vararg save area. For now, the varargs save area is either zero or | |||
7837 | // large enough to hold a0-a7. | |||
7838 | int VaArgOffset, VarArgsSaveSize; | |||
7839 | ||||
7840 | // If all registers are allocated, then all varargs must be passed on the | |||
7841 | // stack and we don't need to save any argregs. | |||
7842 | if (ArgRegs.size() == Idx) { | |||
7843 | VaArgOffset = CCInfo.getNextStackOffset(); | |||
7844 | VarArgsSaveSize = 0; | |||
7845 | } else { | |||
7846 | VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); | |||
7847 | VaArgOffset = -VarArgsSaveSize; | |||
7848 | } | |||
7849 | ||||
7850 | // Record the frame index of the first variable argument | |||
7851 | // which is a value necessary to VASTART. | |||
7852 | int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); | |||
7853 | RVFI->setVarArgsFrameIndex(FI); | |||
7854 | ||||
7855 | // If saving an odd number of registers then create an extra stack slot to | |||
7856 | // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures | |||
7857 | // offsets to even-numbered registered remain 2*XLEN-aligned. | |||
7858 | if (Idx % 2) { | |||
7859 | MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true); | |||
7860 | VarArgsSaveSize += XLenInBytes; | |||
7861 | } | |||
7862 | ||||
7863 | // Copy the integer registers that may have been used for passing varargs | |||
7864 | // to the vararg save area. | |||
7865 | for (unsigned I = Idx; I < ArgRegs.size(); | |||
7866 | ++I, VaArgOffset += XLenInBytes) { | |||
7867 | const Register Reg = RegInfo.createVirtualRegister(RC); | |||
7868 | RegInfo.addLiveIn(ArgRegs[I], Reg); | |||
7869 | SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT); | |||
7870 | FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); | |||
7871 | SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); | |||
7872 | SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, | |||
7873 | MachinePointerInfo::getFixedStack(MF, FI)); | |||
7874 | cast<StoreSDNode>(Store.getNode()) | |||
7875 | ->getMemOperand() | |||
7876 | ->setValue((Value *)nullptr); | |||
7877 | OutChains.push_back(Store); | |||
7878 | } | |||
7879 | RVFI->setVarArgsSaveSize(VarArgsSaveSize); | |||
7880 | } | |||
7881 | ||||
7882 | // All stores are grouped in one node to allow the matching between | |||
7883 | // the size of Ins and InVals. This only happens for vararg functions. | |||
7884 | if (!OutChains.empty()) { | |||
7885 | OutChains.push_back(Chain); | |||
7886 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); | |||
7887 | } | |||
7888 | ||||
7889 | return Chain; | |||
7890 | } | |||
7891 | ||||
7892 | /// isEligibleForTailCallOptimization - Check whether the call is eligible | |||
7893 | /// for tail call optimization. | |||
7894 | /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization. | |||
7895 | bool RISCVTargetLowering::isEligibleForTailCallOptimization( | |||
7896 | CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF, | |||
7897 | const SmallVector<CCValAssign, 16> &ArgLocs) const { | |||
7898 | ||||
7899 | auto &Callee = CLI.Callee; | |||
7900 | auto CalleeCC = CLI.CallConv; | |||
7901 | auto &Outs = CLI.Outs; | |||
7902 | auto &Caller = MF.getFunction(); | |||
7903 | auto CallerCC = Caller.getCallingConv(); | |||
7904 | ||||
7905 | // Exception-handling functions need a special set of instructions to | |||
7906 | // indicate a return to the hardware. Tail-calling another function would | |||
7907 | // probably break this. | |||
7908 | // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This | |||
7909 | // should be expanded as new function attributes are introduced. | |||
7910 | if (Caller.hasFnAttribute("interrupt")) | |||
7911 | return false; | |||
7912 | ||||
7913 | // Do not tail call opt if the stack is used to pass parameters. | |||
7914 | if (CCInfo.getNextStackOffset() != 0) | |||
7915 | return false; | |||
7916 | ||||
7917 | // Do not tail call opt if any parameters need to be passed indirectly. | |||
7918 | // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are | |||
7919 | // passed indirectly. So the address of the value will be passed in a | |||
7920 | // register, or if not available, then the address is put on the stack. In | |||
7921 | // order to pass indirectly, space on the stack often needs to be allocated | |||
7922 | // in order to store the value. In this case the CCInfo.getNextStackOffset() | |||
7923 | // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs | |||
7924 | // are passed CCValAssign::Indirect. | |||
7925 | for (auto &VA : ArgLocs) | |||
7926 | if (VA.getLocInfo() == CCValAssign::Indirect) | |||
7927 | return false; | |||
7928 | ||||
7929 | // Do not tail call opt if either caller or callee uses struct return | |||
7930 | // semantics. | |||
7931 | auto IsCallerStructRet = Caller.hasStructRetAttr(); | |||
7932 | auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); | |||
7933 | if (IsCallerStructRet || IsCalleeStructRet) | |||
7934 | return false; | |||
7935 | ||||
7936 | // Externally-defined functions with weak linkage should not be | |||
7937 | // tail-called. The behaviour of branch instructions in this situation (as | |||
7938 | // used for tail calls) is implementation-defined, so we cannot rely on the | |||
7939 | // linker replacing the tail call with a return. | |||
7940 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { | |||
7941 | const GlobalValue *GV = G->getGlobal(); | |||
7942 | if (GV->hasExternalWeakLinkage()) | |||
7943 | return false; | |||
7944 | } | |||
7945 | ||||
7946 | // The callee has to preserve all registers the caller needs to preserve. | |||
7947 | const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
7948 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); | |||
7949 | if (CalleeCC != CallerCC) { | |||
7950 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); | |||
7951 | if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) | |||
7952 | return false; | |||
7953 | } | |||
7954 | ||||
7955 | // Byval parameters hand the function a pointer directly into the stack area | |||
7956 | // we want to reuse during a tail call. Working around this *is* possible | |||
7957 | // but less efficient and uglier in LowerCall. | |||
7958 | for (auto &Arg : Outs) | |||
7959 | if (Arg.Flags.isByVal()) | |||
7960 | return false; | |||
7961 | ||||
7962 | return true; | |||
7963 | } | |||
7964 | ||||
7965 | static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) { | |||
7966 | return DAG.getDataLayout().getPrefTypeAlign( | |||
7967 | VT.getTypeForEVT(*DAG.getContext())); | |||
7968 | } | |||
7969 | ||||
7970 | // Lower a call to a callseq_start + CALL + callseq_end chain, and add input | |||
7971 | // and output parameter nodes. | |||
7972 | SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, | |||
7973 | SmallVectorImpl<SDValue> &InVals) const { | |||
7974 | SelectionDAG &DAG = CLI.DAG; | |||
7975 | SDLoc &DL = CLI.DL; | |||
7976 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; | |||
7977 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; | |||
7978 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; | |||
7979 | SDValue Chain = CLI.Chain; | |||
7980 | SDValue Callee = CLI.Callee; | |||
7981 | bool &IsTailCall = CLI.IsTailCall; | |||
7982 | CallingConv::ID CallConv = CLI.CallConv; | |||
7983 | bool IsVarArg = CLI.IsVarArg; | |||
7984 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
7985 | MVT XLenVT = Subtarget.getXLenVT(); | |||
7986 | ||||
7987 | MachineFunction &MF = DAG.getMachineFunction(); | |||
7988 | ||||
7989 | // Analyze the operands of the call, assigning locations to each operand. | |||
7990 | SmallVector<CCValAssign, 16> ArgLocs; | |||
7991 | CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); | |||
7992 | ||||
7993 | if (CallConv == CallingConv::GHC) | |||
7994 | ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC); | |||
7995 | else | |||
7996 | analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI, | |||
7997 | CallConv == CallingConv::Fast ? CC_RISCV_FastCC | |||
7998 | : CC_RISCV); | |||
7999 | ||||
8000 | // Check if it's really possible to do a tail call. | |||
8001 | if (IsTailCall) | |||
8002 | IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs); | |||
8003 | ||||
8004 | if (IsTailCall) | |||
8005 | ++NumTailCalls; | |||
8006 | else if (CLI.CB && CLI.CB->isMustTailCall()) | |||
8007 | report_fatal_error("failed to perform tail call elimination on a call " | |||
8008 | "site marked musttail"); | |||
8009 | ||||
8010 | // Get a count of how many bytes are to be pushed on the stack. | |||
8011 | unsigned NumBytes = ArgCCInfo.getNextStackOffset(); | |||
8012 | ||||
8013 | // Create local copies for byval args | |||
8014 | SmallVector<SDValue, 8> ByValArgs; | |||
8015 | for (unsigned i = 0, e = Outs.size(); i != e; ++i) { | |||
8016 | ISD::ArgFlagsTy Flags = Outs[i].Flags; | |||
8017 | if (!Flags.isByVal()) | |||
8018 | continue; | |||
8019 | ||||
8020 | SDValue Arg = OutVals[i]; | |||
8021 | unsigned Size = Flags.getByValSize(); | |||
8022 | Align Alignment = Flags.getNonZeroByValAlign(); | |||
8023 | ||||
8024 | int FI = | |||
8025 | MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false); | |||
8026 | SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); | |||
8027 | SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT); | |||
8028 | ||||
8029 | Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment, | |||
8030 | /*IsVolatile=*/false, | |||
8031 | /*AlwaysInline=*/false, IsTailCall, | |||
8032 | MachinePointerInfo(), MachinePointerInfo()); | |||
8033 | ByValArgs.push_back(FIPtr); | |||
8034 | } | |||
8035 | ||||
8036 | if (!IsTailCall) | |||
8037 | Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); | |||
8038 | ||||
8039 | // Copy argument values to their designated locations. | |||
8040 | SmallVector<std::pair<Register, SDValue>, 8> RegsToPass; | |||
8041 | SmallVector<SDValue, 8> MemOpChains; | |||
8042 | SDValue StackPtr; | |||
8043 | for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) { | |||
8044 | CCValAssign &VA = ArgLocs[i]; | |||
8045 | SDValue ArgValue = OutVals[i]; | |||
8046 | ISD::ArgFlagsTy Flags = Outs[i].Flags; | |||
8047 | ||||
8048 | // Handle passing f64 on RV32D with a soft float ABI as a special case. | |||
8049 | bool IsF64OnRV32DSoftABI = | |||
8050 | VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64; | |||
8051 | if (IsF64OnRV32DSoftABI && VA.isRegLoc()) { | |||
8052 | SDValue SplitF64 = DAG.getNode( | |||
8053 | RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue); | |||
8054 | SDValue Lo = SplitF64.getValue(0); | |||
8055 | SDValue Hi = SplitF64.getValue(1); | |||
8056 | ||||
8057 | Register RegLo = VA.getLocReg(); | |||
8058 | RegsToPass.push_back(std::make_pair(RegLo, Lo)); | |||
8059 | ||||
8060 | if (RegLo == RISCV::X17) { | |||
8061 | // Second half of f64 is passed on the stack. | |||
8062 | // Work out the address of the stack slot. | |||
8063 | if (!StackPtr.getNode()) | |||
8064 | StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); | |||
8065 | // Emit the store. | |||
8066 | MemOpChains.push_back( | |||
8067 | DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo())); | |||
8068 | } else { | |||
8069 | // Second half of f64 is passed in another GPR. | |||
8070 | assert(RegLo < RISCV::X31 && "Invalid register pair")(static_cast <bool> (RegLo < RISCV::X31 && "Invalid register pair" ) ? void (0) : __assert_fail ("RegLo < RISCV::X31 && \"Invalid register pair\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 8070, __extension__ __PRETTY_FUNCTION__)); | |||
8071 | Register RegHigh = RegLo + 1; | |||
8072 | RegsToPass.push_back(std::make_pair(RegHigh, Hi)); | |||
8073 | } | |||
8074 | continue; | |||
8075 | } | |||
8076 | ||||
8077 | // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way | |||
8078 | // as any other MemLoc. | |||
8079 | ||||
8080 | // Promote the value if needed. | |||
8081 | // For now, only handle fully promoted and indirect arguments. | |||
8082 | if (VA.getLocInfo() == CCValAssign::Indirect) { | |||
8083 | // Store the argument in a stack slot and pass its address. | |||
8084 | Align StackAlign = | |||
8085 | std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG), | |||
8086 | getPrefTypeAlign(ArgValue.getValueType(), DAG)); | |||
8087 | TypeSize StoredSize = ArgValue.getValueType().getStoreSize(); | |||
8088 | // If the original argument was split (e.g. i128), we need | |||
8089 | // to store the required parts of it here (and pass just one address). | |||
8090 | // Vectors may be partly split to registers and partly to the stack, in | |||
8091 | // which case the base address is partly offset and subsequent stores are | |||
8092 | // relative to that. | |||
8093 | unsigned ArgIndex = Outs[i].OrigArgIndex; | |||
8094 | unsigned ArgPartOffset = Outs[i].PartOffset; | |||
8095 | assert(VA.getValVT().isVector() || ArgPartOffset == 0)(static_cast <bool> (VA.getValVT().isVector() || ArgPartOffset == 0) ? void (0) : __assert_fail ("VA.getValVT().isVector() || ArgPartOffset == 0" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 8095, __extension__ __PRETTY_FUNCTION__)); | |||
8096 | // Calculate the total size to store. We don't have access to what we're | |||
8097 | // actually storing other than performing the loop and collecting the | |||
8098 | // info. | |||
8099 | SmallVector<std::pair<SDValue, SDValue>> Parts; | |||
8100 | while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) { | |||
8101 | SDValue PartValue = OutVals[i + 1]; | |||
8102 | unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset; | |||
8103 | SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL); | |||
8104 | EVT PartVT = PartValue.getValueType(); | |||
8105 | if (PartVT.isScalableVector()) | |||
8106 | Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset); | |||
8107 | StoredSize += PartVT.getStoreSize(); | |||
8108 | StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG)); | |||
8109 | Parts.push_back(std::make_pair(PartValue, Offset)); | |||
8110 | ++i; | |||
8111 | } | |||
8112 | SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign); | |||
8113 | int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); | |||
8114 | MemOpChains.push_back( | |||
8115 | DAG.getStore(Chain, DL, ArgValue, SpillSlot, | |||
8116 | MachinePointerInfo::getFixedStack(MF, FI))); | |||
8117 | for (const auto &Part : Parts) { | |||
8118 | SDValue PartValue = Part.first; | |||
8119 | SDValue PartOffset = Part.second; | |||
8120 | SDValue Address = | |||
8121 | DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset); | |||
8122 | MemOpChains.push_back( | |||
8123 | DAG.getStore(Chain, DL, PartValue, Address, | |||
8124 | MachinePointerInfo::getFixedStack(MF, FI))); | |||
8125 | } | |||
8126 | ArgValue = SpillSlot; | |||
8127 | } else { | |||
8128 | ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget); | |||
8129 | } | |||
8130 | ||||
8131 | // Use local copy if it is a byval arg. | |||
8132 | if (Flags.isByVal()) | |||
8133 | ArgValue = ByValArgs[j++]; | |||
8134 | ||||
8135 | if (VA.isRegLoc()) { | |||
8136 | // Queue up the argument copies and emit them at the end. | |||
8137 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); | |||
8138 | } else { | |||
8139 | assert(VA.isMemLoc() && "Argument not register or memory")(static_cast <bool> (VA.isMemLoc() && "Argument not register or memory" ) ? void (0) : __assert_fail ("VA.isMemLoc() && \"Argument not register or memory\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 8139, __extension__ __PRETTY_FUNCTION__)); | |||
8140 | assert(!IsTailCall && "Tail call not allowed if stack is used "(static_cast <bool> (!IsTailCall && "Tail call not allowed if stack is used " "for passing parameters") ? void (0) : __assert_fail ("!IsTailCall && \"Tail call not allowed if stack is used \" \"for passing parameters\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 8141, __extension__ __PRETTY_FUNCTION__)) | |||
8141 | "for passing parameters")(static_cast <bool> (!IsTailCall && "Tail call not allowed if stack is used " "for passing parameters") ? void (0) : __assert_fail ("!IsTailCall && \"Tail call not allowed if stack is used \" \"for passing parameters\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 8141, __extension__ __PRETTY_FUNCTION__)); | |||
8142 | ||||
8143 | // Work out the address of the stack slot. | |||
8144 | if (!StackPtr.getNode()) | |||
8145 | StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); | |||
8146 | SDValue Address = | |||
8147 | DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, | |||
8148 | DAG.getIntPtrConstant(VA.getLocMemOffset(), DL)); | |||
8149 | ||||
8150 | // Emit the store. | |||
8151 | MemOpChains.push_back( | |||
8152 | DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); | |||
8153 | } | |||
8154 | } | |||
8155 | ||||
8156 | // Join the stores, which are independent of one another. | |||
8157 | if (!MemOpChains.empty()) | |||
8158 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); | |||
8159 | ||||
8160 | SDValue Glue; | |||
8161 | ||||
8162 | // Build a sequence of copy-to-reg nodes, chained and glued together. | |||
8163 | for (auto &Reg : RegsToPass) { | |||
8164 | Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue); | |||
8165 | Glue = Chain.getValue(1); | |||
8166 | } | |||
8167 | ||||
8168 | // Validate that none of the argument registers have been marked as | |||
8169 | // reserved, if so report an error. Do the same for the return address if this | |||
8170 | // is not a tailcall. | |||
8171 | validateCCReservedRegs(RegsToPass, MF); | |||
8172 | if (!IsTailCall && | |||
8173 | MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1)) | |||
8174 | MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ | |||
8175 | MF.getFunction(), | |||
8176 | "Return address register required, but has been reserved."}); | |||
8177 | ||||
8178 | // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a | |||
8179 | // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't | |||
8180 | // split it and then direct call can be matched by PseudoCALL. | |||
8181 | if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) { | |||
8182 | const GlobalValue *GV = S->getGlobal(); | |||
8183 | ||||
8184 | unsigned OpFlags = RISCVII::MO_CALL; | |||
8185 | if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) | |||
8186 | OpFlags = RISCVII::MO_PLT; | |||
8187 | ||||
8188 | Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); | |||
8189 | } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { | |||
8190 | unsigned OpFlags = RISCVII::MO_CALL; | |||
8191 | ||||
8192 | if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(), | |||
8193 | nullptr)) | |||
8194 | OpFlags = RISCVII::MO_PLT; | |||
8195 | ||||
8196 | Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags); | |||
8197 | } | |||
8198 | ||||
8199 | // The first call operand is the chain and the second is the target address. | |||
8200 | SmallVector<SDValue, 8> Ops; | |||
8201 | Ops.push_back(Chain); | |||
8202 | Ops.push_back(Callee); | |||
8203 | ||||
8204 | // Add argument registers to the end of the list so that they are | |||
8205 | // known live into the call. | |||
8206 | for (auto &Reg : RegsToPass) | |||
8207 | Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); | |||
8208 | ||||
8209 | if (!IsTailCall) { | |||
8210 | // Add a register mask operand representing the call-preserved registers. | |||
8211 | const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
8212 | const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); | |||
8213 | assert(Mask && "Missing call preserved mask for calling convention")(static_cast <bool> (Mask && "Missing call preserved mask for calling convention" ) ? void (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 8213, __extension__ __PRETTY_FUNCTION__)); | |||
8214 | Ops.push_back(DAG.getRegisterMask(Mask)); | |||
8215 | } | |||
8216 | ||||
8217 | // Glue the call to the argument copies, if any. | |||
8218 | if (Glue.getNode()) | |||
8219 | Ops.push_back(Glue); | |||
8220 | ||||
8221 | // Emit the call. | |||
8222 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
8223 | ||||
8224 | if (IsTailCall) { | |||
8225 | MF.getFrameInfo().setHasTailCall(); | |||
8226 | return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops); | |||
8227 | } | |||
8228 | ||||
8229 | Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops); | |||
8230 | DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); | |||
8231 | Glue = Chain.getValue(1); | |||
8232 | ||||
8233 | // Mark the end of the call, which is glued to the call itself. | |||
8234 | Chain = DAG.getCALLSEQ_END(Chain, | |||
8235 | DAG.getConstant(NumBytes, DL, PtrVT, true), | |||
8236 | DAG.getConstant(0, DL, PtrVT, true), | |||
8237 | Glue, DL); | |||
8238 | Glue = Chain.getValue(1); | |||
8239 | ||||
8240 | // Assign locations to each value returned by this call. | |||
8241 | SmallVector<CCValAssign, 16> RVLocs; | |||
8242 | CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); | |||
8243 | analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV); | |||
8244 | ||||
8245 | // Copy all of the result registers out of their specified physreg. | |||
8246 | for (auto &VA : RVLocs) { | |||
8247 | // Copy the value out | |||
8248 | SDValue RetValue = | |||
8249 | DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue); | |||
8250 | // Glue the RetValue to the end of the call sequence | |||
8251 | Chain = RetValue.getValue(1); | |||
8252 | Glue = RetValue.getValue(2); | |||
8253 | ||||
8254 | if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { | |||
8255 | assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment")(static_cast <bool> (VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment") ? void (0) : __assert_fail ("VA.getLocReg() == ArgGPRs[0] && \"Unexpected reg assignment\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 8255, __extension__ __PRETTY_FUNCTION__)); | |||
8256 | SDValue RetValue2 = | |||
8257 | DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue); | |||
8258 | Chain = RetValue2.getValue(1); | |||
8259 | Glue = RetValue2.getValue(2); | |||
8260 | RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue, | |||
8261 | RetValue2); | |||
8262 | } | |||
8263 | ||||
8264 | RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget); | |||
8265 | ||||
8266 | InVals.push_back(RetValue); | |||
8267 | } | |||
8268 | ||||
8269 | return Chain; | |||
8270 | } | |||
8271 | ||||
8272 | bool RISCVTargetLowering::CanLowerReturn( | |||
8273 | CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, | |||
8274 | const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { | |||
8275 | SmallVector<CCValAssign, 16> RVLocs; | |||
8276 | CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); | |||
8277 | ||||
8278 | Optional<unsigned> FirstMaskArgument; | |||
8279 | if (Subtarget.hasStdExtV()) | |||
8280 | FirstMaskArgument = preAssignMask(Outs); | |||
8281 | ||||
8282 | for (unsigned i = 0, e = Outs.size(); i != e; ++i) { | |||
8283 | MVT VT = Outs[i].VT; | |||
8284 | ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; | |||
8285 | RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); | |||
8286 | if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, | |||
8287 | ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr, | |||
8288 | *this, FirstMaskArgument)) | |||
8289 | return false; | |||
8290 | } | |||
8291 | return true; | |||
8292 | } | |||
8293 | ||||
8294 | SDValue | |||
8295 | RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, | |||
8296 | bool IsVarArg, | |||
8297 | const SmallVectorImpl<ISD::OutputArg> &Outs, | |||
8298 | const SmallVectorImpl<SDValue> &OutVals, | |||
8299 | const SDLoc &DL, SelectionDAG &DAG) const { | |||
8300 | const MachineFunction &MF = DAG.getMachineFunction(); | |||
8301 | const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); | |||
8302 | ||||
8303 | // Stores the assignment of the return value to a location. | |||
8304 | SmallVector<CCValAssign, 16> RVLocs; | |||
8305 | ||||
8306 | // Info about the registers and stack slot. | |||
8307 | CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, | |||
8308 | *DAG.getContext()); | |||
8309 | ||||
8310 | analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true, | |||
8311 | nullptr, CC_RISCV); | |||
8312 | ||||
8313 | if (CallConv == CallingConv::GHC && !RVLocs.empty()) | |||
8314 | report_fatal_error("GHC functions return void only"); | |||
8315 | ||||
8316 | SDValue Glue; | |||
8317 | SmallVector<SDValue, 4> RetOps(1, Chain); | |||
8318 | ||||
8319 | // Copy the result values into the output registers. | |||
8320 | for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) { | |||
8321 | SDValue Val = OutVals[i]; | |||
8322 | CCValAssign &VA = RVLocs[i]; | |||
8323 | assert(VA.isRegLoc() && "Can only return in registers!")(static_cast <bool> (VA.isRegLoc() && "Can only return in registers!" ) ? void (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 8323, __extension__ __PRETTY_FUNCTION__)); | |||
8324 | ||||
8325 | if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { | |||
8326 | // Handle returning f64 on RV32D with a soft float ABI. | |||
8327 | assert(VA.isRegLoc() && "Expected return via registers")(static_cast <bool> (VA.isRegLoc() && "Expected return via registers" ) ? void (0) : __assert_fail ("VA.isRegLoc() && \"Expected return via registers\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 8327, __extension__ __PRETTY_FUNCTION__)); | |||
8328 | SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL, | |||
8329 | DAG.getVTList(MVT::i32, MVT::i32), Val); | |||
8330 | SDValue Lo = SplitF64.getValue(0); | |||
8331 | SDValue Hi = SplitF64.getValue(1); | |||
8332 | Register RegLo = VA.getLocReg(); | |||
8333 | assert(RegLo < RISCV::X31 && "Invalid register pair")(static_cast <bool> (RegLo < RISCV::X31 && "Invalid register pair" ) ? void (0) : __assert_fail ("RegLo < RISCV::X31 && \"Invalid register pair\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 8333, __extension__ __PRETTY_FUNCTION__)); | |||
8334 | Register RegHi = RegLo + 1; | |||
8335 | ||||
8336 | if (STI.isRegisterReservedByUser(RegLo) || | |||
8337 | STI.isRegisterReservedByUser(RegHi)) | |||
8338 | MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ | |||
8339 | MF.getFunction(), | |||
8340 | "Return value register required, but has been reserved."}); | |||
8341 | ||||
8342 | Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue); | |||
8343 | Glue = Chain.getValue(1); | |||
8344 | RetOps.push_back(DAG.getRegister(RegLo, MVT::i32)); | |||
8345 | Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue); | |||
8346 | Glue = Chain.getValue(1); | |||
8347 | RetOps.push_back(DAG.getRegister(RegHi, MVT::i32)); | |||
8348 | } else { | |||
8349 | // Handle a 'normal' return. | |||
8350 | Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget); | |||
8351 | Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue); | |||
8352 | ||||
8353 | if (STI.isRegisterReservedByUser(VA.getLocReg())) | |||
8354 | MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ | |||
8355 | MF.getFunction(), | |||
8356 | "Return value register required, but has been reserved."}); | |||
8357 | ||||
8358 | // Guarantee that all emitted copies are stuck together. | |||
8359 | Glue = Chain.getValue(1); | |||
8360 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); | |||
8361 | } | |||
8362 | } | |||
8363 | ||||
8364 | RetOps[0] = Chain; // Update chain. | |||
8365 | ||||
8366 | // Add the glue node if we have it. | |||
8367 | if (Glue.getNode()) { | |||
8368 | RetOps.push_back(Glue); | |||
8369 | } | |||
8370 | ||||
8371 | unsigned RetOpc = RISCVISD::RET_FLAG; | |||
8372 | // Interrupt service routines use different return instructions. | |||
8373 | const Function &Func = DAG.getMachineFunction().getFunction(); | |||
8374 | if (Func.hasFnAttribute("interrupt")) { | |||
8375 | if (!Func.getReturnType()->isVoidTy()) | |||
8376 | report_fatal_error( | |||
8377 | "Functions with the interrupt attribute must have void return type!"); | |||
8378 | ||||
8379 | MachineFunction &MF = DAG.getMachineFunction(); | |||
8380 | StringRef Kind = | |||
8381 | MF.getFunction().getFnAttribute("interrupt").getValueAsString(); | |||
8382 | ||||
8383 | if (Kind == "user") | |||
8384 | RetOpc = RISCVISD::URET_FLAG; | |||
8385 | else if (Kind == "supervisor") | |||
8386 | RetOpc = RISCVISD::SRET_FLAG; | |||
8387 | else | |||
8388 | RetOpc = RISCVISD::MRET_FLAG; | |||
8389 | } | |||
8390 | ||||
8391 | return DAG.getNode(RetOpc, DL, MVT::Other, RetOps); | |||
8392 | } | |||
8393 | ||||
8394 | void RISCVTargetLowering::validateCCReservedRegs( | |||
8395 | const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs, | |||
8396 | MachineFunction &MF) const { | |||
8397 | const Function &F = MF.getFunction(); | |||
8398 | const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); | |||
8399 | ||||
8400 | if (llvm::any_of(Regs, [&STI](auto Reg) { | |||
8401 | return STI.isRegisterReservedByUser(Reg.first); | |||
8402 | })) | |||
8403 | F.getContext().diagnose(DiagnosticInfoUnsupported{ | |||
8404 | F, "Argument register required, but has been reserved."}); | |||
8405 | } | |||
8406 | ||||
8407 | bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { | |||
8408 | return CI->isTailCall(); | |||
8409 | } | |||
8410 | ||||
8411 | const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { | |||
8412 | #define NODE_NAME_CASE(NODE) \ | |||
8413 | case RISCVISD::NODE: \ | |||
8414 | return "RISCVISD::" #NODE; | |||
8415 | // clang-format off | |||
8416 | switch ((RISCVISD::NodeType)Opcode) { | |||
8417 | case RISCVISD::FIRST_NUMBER: | |||
8418 | break; | |||
8419 | NODE_NAME_CASE(RET_FLAG) | |||
8420 | NODE_NAME_CASE(URET_FLAG) | |||
8421 | NODE_NAME_CASE(SRET_FLAG) | |||
8422 | NODE_NAME_CASE(MRET_FLAG) | |||
8423 | NODE_NAME_CASE(CALL) | |||
8424 | NODE_NAME_CASE(SELECT_CC) | |||
8425 | NODE_NAME_CASE(BR_CC) | |||
8426 | NODE_NAME_CASE(BuildPairF64) | |||
8427 | NODE_NAME_CASE(SplitF64) | |||
8428 | NODE_NAME_CASE(TAIL) | |||
8429 | NODE_NAME_CASE(MULHSU) | |||
8430 | NODE_NAME_CASE(SLLW) | |||
8431 | NODE_NAME_CASE(SRAW) | |||
8432 | NODE_NAME_CASE(SRLW) | |||
8433 | NODE_NAME_CASE(DIVW) | |||
8434 | NODE_NAME_CASE(DIVUW) | |||
8435 | NODE_NAME_CASE(REMUW) | |||
8436 | NODE_NAME_CASE(ROLW) | |||
8437 | NODE_NAME_CASE(RORW) | |||
8438 | NODE_NAME_CASE(CLZW) | |||
8439 | NODE_NAME_CASE(CTZW) | |||
8440 | NODE_NAME_CASE(FSLW) | |||
8441 | NODE_NAME_CASE(FSRW) | |||
8442 | NODE_NAME_CASE(FSL) | |||
8443 | NODE_NAME_CASE(FSR) | |||
8444 | NODE_NAME_CASE(FMV_H_X) | |||
8445 | NODE_NAME_CASE(FMV_X_ANYEXTH) | |||
8446 | NODE_NAME_CASE(FMV_W_X_RV64) | |||
8447 | NODE_NAME_CASE(FMV_X_ANYEXTW_RV64) | |||
8448 | NODE_NAME_CASE(FCVT_X_RTZ) | |||
8449 | NODE_NAME_CASE(FCVT_XU_RTZ) | |||
8450 | NODE_NAME_CASE(FCVT_W_RTZ_RV64) | |||
8451 | NODE_NAME_CASE(FCVT_WU_RTZ_RV64) | |||
8452 | NODE_NAME_CASE(READ_CYCLE_WIDE) | |||
8453 | NODE_NAME_CASE(GREV) | |||
8454 | NODE_NAME_CASE(GREVW) | |||
8455 | NODE_NAME_CASE(GORC) | |||
8456 | NODE_NAME_CASE(GORCW) | |||
8457 | NODE_NAME_CASE(SHFL) | |||
8458 | NODE_NAME_CASE(SHFLW) | |||
8459 | NODE_NAME_CASE(UNSHFL) | |||
8460 | NODE_NAME_CASE(UNSHFLW) | |||
8461 | NODE_NAME_CASE(BCOMPRESS) | |||
8462 | NODE_NAME_CASE(BCOMPRESSW) | |||
8463 | NODE_NAME_CASE(BDECOMPRESS) | |||
8464 | NODE_NAME_CASE(BDECOMPRESSW) | |||
8465 | NODE_NAME_CASE(VMV_V_X_VL) | |||
8466 | NODE_NAME_CASE(VFMV_V_F_VL) | |||
8467 | NODE_NAME_CASE(VMV_X_S) | |||
8468 | NODE_NAME_CASE(VMV_S_X_VL) | |||
8469 | NODE_NAME_CASE(VFMV_S_F_VL) | |||
8470 | NODE_NAME_CASE(SPLAT_VECTOR_I64) | |||
8471 | NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL) | |||
8472 | NODE_NAME_CASE(READ_VLENB) | |||
8473 | NODE_NAME_CASE(TRUNCATE_VECTOR_VL) | |||
8474 | NODE_NAME_CASE(VSLIDEUP_VL) | |||
8475 | NODE_NAME_CASE(VSLIDE1UP_VL) | |||
8476 | NODE_NAME_CASE(VSLIDEDOWN_VL) | |||
8477 | NODE_NAME_CASE(VSLIDE1DOWN_VL) | |||
8478 | NODE_NAME_CASE(VID_VL) | |||
8479 | NODE_NAME_CASE(VFNCVT_ROD_VL) | |||
8480 | NODE_NAME_CASE(VECREDUCE_ADD_VL) | |||
8481 | NODE_NAME_CASE(VECREDUCE_UMAX_VL) | |||
8482 | NODE_NAME_CASE(VECREDUCE_SMAX_VL) | |||
8483 | NODE_NAME_CASE(VECREDUCE_UMIN_VL) | |||
8484 | NODE_NAME_CASE(VECREDUCE_SMIN_VL) | |||
8485 | NODE_NAME_CASE(VECREDUCE_AND_VL) | |||
8486 | NODE_NAME_CASE(VECREDUCE_OR_VL) | |||
8487 | NODE_NAME_CASE(VECREDUCE_XOR_VL) | |||
8488 | NODE_NAME_CASE(VECREDUCE_FADD_VL) | |||
8489 | NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL) | |||
8490 | NODE_NAME_CASE(VECREDUCE_FMIN_VL) | |||
8491 | NODE_NAME_CASE(VECREDUCE_FMAX_VL) | |||
8492 | NODE_NAME_CASE(ADD_VL) | |||
8493 | NODE_NAME_CASE(AND_VL) | |||
8494 | NODE_NAME_CASE(MUL_VL) | |||
8495 | NODE_NAME_CASE(OR_VL) | |||
8496 | NODE_NAME_CASE(SDIV_VL) | |||
8497 | NODE_NAME_CASE(SHL_VL) | |||
8498 | NODE_NAME_CASE(SREM_VL) | |||
8499 | NODE_NAME_CASE(SRA_VL) | |||
8500 | NODE_NAME_CASE(SRL_VL) | |||
8501 | NODE_NAME_CASE(SUB_VL) | |||
8502 | NODE_NAME_CASE(UDIV_VL) | |||
8503 | NODE_NAME_CASE(UREM_VL) | |||
8504 | NODE_NAME_CASE(XOR_VL) | |||
8505 | NODE_NAME_CASE(SADDSAT_VL) | |||
8506 | NODE_NAME_CASE(UADDSAT_VL) | |||
8507 | NODE_NAME_CASE(SSUBSAT_VL) | |||
8508 | NODE_NAME_CASE(USUBSAT_VL) | |||
8509 | NODE_NAME_CASE(FADD_VL) | |||
8510 | NODE_NAME_CASE(FSUB_VL) | |||
8511 | NODE_NAME_CASE(FMUL_VL) | |||
8512 | NODE_NAME_CASE(FDIV_VL) | |||
8513 | NODE_NAME_CASE(FNEG_VL) | |||
8514 | NODE_NAME_CASE(FABS_VL) | |||
8515 | NODE_NAME_CASE(FSQRT_VL) | |||
8516 | NODE_NAME_CASE(FMA_VL) | |||
8517 | NODE_NAME_CASE(FCOPYSIGN_VL) | |||
8518 | NODE_NAME_CASE(SMIN_VL) | |||
8519 | NODE_NAME_CASE(SMAX_VL) | |||
8520 | NODE_NAME_CASE(UMIN_VL) | |||
8521 | NODE_NAME_CASE(UMAX_VL) | |||
8522 | NODE_NAME_CASE(FMINNUM_VL) | |||
8523 | NODE_NAME_CASE(FMAXNUM_VL) | |||
8524 | NODE_NAME_CASE(MULHS_VL) | |||
8525 | NODE_NAME_CASE(MULHU_VL) | |||
8526 | NODE_NAME_CASE(FP_TO_SINT_VL) | |||
8527 | NODE_NAME_CASE(FP_TO_UINT_VL) | |||
8528 | NODE_NAME_CASE(SINT_TO_FP_VL) | |||
8529 | NODE_NAME_CASE(UINT_TO_FP_VL) | |||
8530 | NODE_NAME_CASE(FP_EXTEND_VL) | |||
8531 | NODE_NAME_CASE(FP_ROUND_VL) | |||
8532 | NODE_NAME_CASE(VWMUL_VL) | |||
8533 | NODE_NAME_CASE(VWMULU_VL) | |||
8534 | NODE_NAME_CASE(SETCC_VL) | |||
8535 | NODE_NAME_CASE(VSELECT_VL) | |||
8536 | NODE_NAME_CASE(VMAND_VL) | |||
8537 | NODE_NAME_CASE(VMOR_VL) | |||
8538 | NODE_NAME_CASE(VMXOR_VL) | |||
8539 | NODE_NAME_CASE(VMCLR_VL) | |||
8540 | NODE_NAME_CASE(VMSET_VL) | |||
8541 | NODE_NAME_CASE(VRGATHER_VX_VL) | |||
8542 | NODE_NAME_CASE(VRGATHER_VV_VL) | |||
8543 | NODE_NAME_CASE(VRGATHEREI16_VV_VL) | |||
8544 | NODE_NAME_CASE(VSEXT_VL) | |||
8545 | NODE_NAME_CASE(VZEXT_VL) | |||
8546 | NODE_NAME_CASE(VPOPC_VL) | |||
8547 | NODE_NAME_CASE(VLE_VL) | |||
8548 | NODE_NAME_CASE(VSE_VL) | |||
8549 | NODE_NAME_CASE(READ_CSR) | |||
8550 | NODE_NAME_CASE(WRITE_CSR) | |||
8551 | NODE_NAME_CASE(SWAP_CSR) | |||
8552 | } | |||
8553 | // clang-format on | |||
8554 | return nullptr; | |||
8555 | #undef NODE_NAME_CASE | |||
8556 | } | |||
8557 | ||||
8558 | /// getConstraintType - Given a constraint letter, return the type of | |||
8559 | /// constraint it is for this target. | |||
8560 | RISCVTargetLowering::ConstraintType | |||
8561 | RISCVTargetLowering::getConstraintType(StringRef Constraint) const { | |||
8562 | if (Constraint.size() == 1) { | |||
8563 | switch (Constraint[0]) { | |||
8564 | default: | |||
8565 | break; | |||
8566 | case 'f': | |||
8567 | return C_RegisterClass; | |||
8568 | case 'I': | |||
8569 | case 'J': | |||
8570 | case 'K': | |||
8571 | return C_Immediate; | |||
8572 | case 'A': | |||
8573 | return C_Memory; | |||
8574 | case 'S': // A symbolic address | |||
8575 | return C_Other; | |||
8576 | } | |||
8577 | } else { | |||
8578 | if (Constraint == "vr" || Constraint == "vm") | |||
8579 | return C_RegisterClass; | |||
8580 | } | |||
8581 | return TargetLowering::getConstraintType(Constraint); | |||
8582 | } | |||
8583 | ||||
8584 | std::pair<unsigned, const TargetRegisterClass *> | |||
8585 | RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, | |||
8586 | StringRef Constraint, | |||
8587 | MVT VT) const { | |||
8588 | // First, see if this is a constraint that directly corresponds to a | |||
8589 | // RISCV register class. | |||
8590 | if (Constraint.size() == 1) { | |||
8591 | switch (Constraint[0]) { | |||
8592 | case 'r': | |||
8593 | return std::make_pair(0U, &RISCV::GPRRegClass); | |||
8594 | case 'f': | |||
8595 | if (Subtarget.hasStdExtZfh() && VT == MVT::f16) | |||
8596 | return std::make_pair(0U, &RISCV::FPR16RegClass); | |||
8597 | if (Subtarget.hasStdExtF() && VT == MVT::f32) | |||
8598 | return std::make_pair(0U, &RISCV::FPR32RegClass); | |||
8599 | if (Subtarget.hasStdExtD() && VT == MVT::f64) | |||
8600 | return std::make_pair(0U, &RISCV::FPR64RegClass); | |||
8601 | break; | |||
8602 | default: | |||
8603 | break; | |||
8604 | } | |||
8605 | } else { | |||
8606 | if (Constraint == "vr") { | |||
8607 | for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass, | |||
8608 | &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) { | |||
8609 | if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) | |||
8610 | return std::make_pair(0U, RC); | |||
8611 | } | |||
8612 | } else if (Constraint == "vm") { | |||
8613 | if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy)) | |||
8614 | return std::make_pair(0U, &RISCV::VMRegClass); | |||
8615 | } | |||
8616 | } | |||
8617 | ||||
8618 | // Clang will correctly decode the usage of register name aliases into their | |||
8619 | // official names. However, other frontends like `rustc` do not. This allows | |||
8620 | // users of these frontends to use the ABI names for registers in LLVM-style | |||
8621 | // register constraints. | |||
8622 | unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower()) | |||
8623 | .Case("{zero}", RISCV::X0) | |||
8624 | .Case("{ra}", RISCV::X1) | |||
8625 | .Case("{sp}", RISCV::X2) | |||
8626 | .Case("{gp}", RISCV::X3) | |||
8627 | .Case("{tp}", RISCV::X4) | |||
8628 | .Case("{t0}", RISCV::X5) | |||
8629 | .Case("{t1}", RISCV::X6) | |||
8630 | .Case("{t2}", RISCV::X7) | |||
8631 | .Cases("{s0}", "{fp}", RISCV::X8) | |||
8632 | .Case("{s1}", RISCV::X9) | |||
8633 | .Case("{a0}", RISCV::X10) | |||
8634 | .Case("{a1}", RISCV::X11) | |||
8635 | .Case("{a2}", RISCV::X12) | |||
8636 | .Case("{a3}", RISCV::X13) | |||
8637 | .Case("{a4}", RISCV::X14) | |||
8638 | .Case("{a5}", RISCV::X15) | |||
8639 | .Case("{a6}", RISCV::X16) | |||
8640 | .Case("{a7}", RISCV::X17) | |||
8641 | .Case("{s2}", RISCV::X18) | |||
8642 | .Case("{s3}", RISCV::X19) | |||
8643 | .Case("{s4}", RISCV::X20) | |||
8644 | .Case("{s5}", RISCV::X21) | |||
8645 | .Case("{s6}", RISCV::X22) | |||
8646 | .Case("{s7}", RISCV::X23) | |||
8647 | .Case("{s8}", RISCV::X24) | |||
8648 | .Case("{s9}", RISCV::X25) | |||
8649 | .Case("{s10}", RISCV::X26) | |||
8650 | .Case("{s11}", RISCV::X27) | |||
8651 | .Case("{t3}", RISCV::X28) | |||
8652 | .Case("{t4}", RISCV::X29) | |||
8653 | .Case("{t5}", RISCV::X30) | |||
8654 | .Case("{t6}", RISCV::X31) | |||
8655 | .Default(RISCV::NoRegister); | |||
8656 | if (XRegFromAlias != RISCV::NoRegister) | |||
8657 | return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass); | |||
8658 | ||||
8659 | // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the | |||
8660 | // TableGen record rather than the AsmName to choose registers for InlineAsm | |||
8661 | // constraints, plus we want to match those names to the widest floating point | |||
8662 | // register type available, manually select floating point registers here. | |||
8663 | // | |||
8664 | // The second case is the ABI name of the register, so that frontends can also | |||
8665 | // use the ABI names in register constraint lists. | |||
8666 | if (Subtarget.hasStdExtF()) { | |||
8667 | unsigned FReg = StringSwitch<unsigned>(Constraint.lower()) | |||
8668 | .Cases("{f0}", "{ft0}", RISCV::F0_F) | |||
8669 | .Cases("{f1}", "{ft1}", RISCV::F1_F) | |||
8670 | .Cases("{f2}", "{ft2}", RISCV::F2_F) | |||
8671 | .Cases("{f3}", "{ft3}", RISCV::F3_F) | |||
8672 | .Cases("{f4}", "{ft4}", RISCV::F4_F) | |||
8673 | .Cases("{f5}", "{ft5}", RISCV::F5_F) | |||
8674 | .Cases("{f6}", "{ft6}", RISCV::F6_F) | |||
8675 | .Cases("{f7}", "{ft7}", RISCV::F7_F) | |||
8676 | .Cases("{f8}", "{fs0}", RISCV::F8_F) | |||
8677 | .Cases("{f9}", "{fs1}", RISCV::F9_F) | |||
8678 | .Cases("{f10}", "{fa0}", RISCV::F10_F) | |||
8679 | .Cases("{f11}", "{fa1}", RISCV::F11_F) | |||
8680 | .Cases("{f12}", "{fa2}", RISCV::F12_F) | |||
8681 | .Cases("{f13}", "{fa3}", RISCV::F13_F) | |||
8682 | .Cases("{f14}", "{fa4}", RISCV::F14_F) | |||
8683 | .Cases("{f15}", "{fa5}", RISCV::F15_F) | |||
8684 | .Cases("{f16}", "{fa6}", RISCV::F16_F) | |||
8685 | .Cases("{f17}", "{fa7}", RISCV::F17_F) | |||
8686 | .Cases("{f18}", "{fs2}", RISCV::F18_F) | |||
8687 | .Cases("{f19}", "{fs3}", RISCV::F19_F) | |||
8688 | .Cases("{f20}", "{fs4}", RISCV::F20_F) | |||
8689 | .Cases("{f21}", "{fs5}", RISCV::F21_F) | |||
8690 | .Cases("{f22}", "{fs6}", RISCV::F22_F) | |||
8691 | .Cases("{f23}", "{fs7}", RISCV::F23_F) | |||
8692 | .Cases("{f24}", "{fs8}", RISCV::F24_F) | |||
8693 | .Cases("{f25}", "{fs9}", RISCV::F25_F) | |||
8694 | .Cases("{f26}", "{fs10}", RISCV::F26_F) | |||
8695 | .Cases("{f27}", "{fs11}", RISCV::F27_F) | |||
8696 | .Cases("{f28}", "{ft8}", RISCV::F28_F) | |||
8697 | .Cases("{f29}", "{ft9}", RISCV::F29_F) | |||
8698 | .Cases("{f30}", "{ft10}", RISCV::F30_F) | |||
8699 | .Cases("{f31}", "{ft11}", RISCV::F31_F) | |||
8700 | .Default(RISCV::NoRegister); | |||
8701 | if (FReg != RISCV::NoRegister) { | |||
8702 | assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg")(static_cast <bool> (RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg") ? void (0) : __assert_fail ("RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && \"Unknown fp-reg\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 8702, __extension__ __PRETTY_FUNCTION__)); | |||
8703 | if (Subtarget.hasStdExtD()) { | |||
8704 | unsigned RegNo = FReg - RISCV::F0_F; | |||
8705 | unsigned DReg = RISCV::F0_D + RegNo; | |||
8706 | return std::make_pair(DReg, &RISCV::FPR64RegClass); | |||
8707 | } | |||
8708 | return std::make_pair(FReg, &RISCV::FPR32RegClass); | |||
8709 | } | |||
8710 | } | |||
8711 | ||||
8712 | if (Subtarget.hasStdExtV()) { | |||
8713 | Register VReg = StringSwitch<Register>(Constraint.lower()) | |||
8714 | .Case("{v0}", RISCV::V0) | |||
8715 | .Case("{v1}", RISCV::V1) | |||
8716 | .Case("{v2}", RISCV::V2) | |||
8717 | .Case("{v3}", RISCV::V3) | |||
8718 | .Case("{v4}", RISCV::V4) | |||
8719 | .Case("{v5}", RISCV::V5) | |||
8720 | .Case("{v6}", RISCV::V6) | |||
8721 | .Case("{v7}", RISCV::V7) | |||
8722 | .Case("{v8}", RISCV::V8) | |||
8723 | .Case("{v9}", RISCV::V9) | |||
8724 | .Case("{v10}", RISCV::V10) | |||
8725 | .Case("{v11}", RISCV::V11) | |||
8726 | .Case("{v12}", RISCV::V12) | |||
8727 | .Case("{v13}", RISCV::V13) | |||
8728 | .Case("{v14}", RISCV::V14) | |||
8729 | .Case("{v15}", RISCV::V15) | |||
8730 | .Case("{v16}", RISCV::V16) | |||
8731 | .Case("{v17}", RISCV::V17) | |||
8732 | .Case("{v18}", RISCV::V18) | |||
8733 | .Case("{v19}", RISCV::V19) | |||
8734 | .Case("{v20}", RISCV::V20) | |||
8735 | .Case("{v21}", RISCV::V21) | |||
8736 | .Case("{v22}", RISCV::V22) | |||
8737 | .Case("{v23}", RISCV::V23) | |||
8738 | .Case("{v24}", RISCV::V24) | |||
8739 | .Case("{v25}", RISCV::V25) | |||
8740 | .Case("{v26}", RISCV::V26) | |||
8741 | .Case("{v27}", RISCV::V27) | |||
8742 | .Case("{v28}", RISCV::V28) | |||
8743 | .Case("{v29}", RISCV::V29) | |||
8744 | .Case("{v30}", RISCV::V30) | |||
8745 | .Case("{v31}", RISCV::V31) | |||
8746 | .Default(RISCV::NoRegister); | |||
8747 | if (VReg != RISCV::NoRegister) { | |||
8748 | if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy)) | |||
8749 | return std::make_pair(VReg, &RISCV::VMRegClass); | |||
8750 | if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy)) | |||
8751 | return std::make_pair(VReg, &RISCV::VRRegClass); | |||
8752 | for (const auto *RC : | |||
8753 | {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) { | |||
8754 | if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) { | |||
8755 | VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC); | |||
8756 | return std::make_pair(VReg, RC); | |||
8757 | } | |||
8758 | } | |||
8759 | } | |||
8760 | } | |||
8761 | ||||
8762 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); | |||
8763 | } | |||
8764 | ||||
8765 | unsigned | |||
8766 | RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const { | |||
8767 | // Currently only support length 1 constraints. | |||
8768 | if (ConstraintCode.size() == 1) { | |||
8769 | switch (ConstraintCode[0]) { | |||
8770 | case 'A': | |||
8771 | return InlineAsm::Constraint_A; | |||
8772 | default: | |||
8773 | break; | |||
8774 | } | |||
8775 | } | |||
8776 | ||||
8777 | return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); | |||
8778 | } | |||
8779 | ||||
8780 | void RISCVTargetLowering::LowerAsmOperandForConstraint( | |||
8781 | SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops, | |||
8782 | SelectionDAG &DAG) const { | |||
8783 | // Currently only support length 1 constraints. | |||
8784 | if (Constraint.length() == 1) { | |||
8785 | switch (Constraint[0]) { | |||
8786 | case 'I': | |||
8787 | // Validate & create a 12-bit signed immediate operand. | |||
8788 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) { | |||
8789 | uint64_t CVal = C->getSExtValue(); | |||
8790 | if (isInt<12>(CVal)) | |||
8791 | Ops.push_back( | |||
8792 | DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); | |||
8793 | } | |||
8794 | return; | |||
8795 | case 'J': | |||
8796 | // Validate & create an integer zero operand. | |||
8797 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) | |||
8798 | if (C->getZExtValue() == 0) | |||
8799 | Ops.push_back( | |||
8800 | DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT())); | |||
8801 | return; | |||
8802 | case 'K': | |||
8803 | // Validate & create a 5-bit unsigned immediate operand. | |||
8804 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) { | |||
8805 | uint64_t CVal = C->getZExtValue(); | |||
8806 | if (isUInt<5>(CVal)) | |||
8807 | Ops.push_back( | |||
8808 | DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); | |||
8809 | } | |||
8810 | return; | |||
8811 | case 'S': | |||
8812 | if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) { | |||
8813 | Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), | |||
8814 | GA->getValueType(0))); | |||
8815 | } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) { | |||
8816 | Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(), | |||
8817 | BA->getValueType(0))); | |||
8818 | } | |||
8819 | return; | |||
8820 | default: | |||
8821 | break; | |||
8822 | } | |||
8823 | } | |||
8824 | TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); | |||
8825 | } | |||
8826 | ||||
8827 | Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder, | |||
8828 | Instruction *Inst, | |||
8829 | AtomicOrdering Ord) const { | |||
8830 | if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent) | |||
8831 | return Builder.CreateFence(Ord); | |||
8832 | if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord)) | |||
8833 | return Builder.CreateFence(AtomicOrdering::Release); | |||
8834 | return nullptr; | |||
8835 | } | |||
8836 | ||||
8837 | Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder, | |||
8838 | Instruction *Inst, | |||
8839 | AtomicOrdering Ord) const { | |||
8840 | if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord)) | |||
8841 | return Builder.CreateFence(AtomicOrdering::Acquire); | |||
8842 | return nullptr; | |||
8843 | } | |||
8844 | ||||
8845 | TargetLowering::AtomicExpansionKind | |||
8846 | RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { | |||
8847 | // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating | |||
8848 | // point operations can't be used in an lr/sc sequence without breaking the | |||
8849 | // forward-progress guarantee. | |||
8850 | if (AI->isFloatingPointOperation()) | |||
8851 | return AtomicExpansionKind::CmpXChg; | |||
8852 | ||||
8853 | unsigned Size = AI->getType()->getPrimitiveSizeInBits(); | |||
8854 | if (Size == 8 || Size == 16) | |||
8855 | return AtomicExpansionKind::MaskedIntrinsic; | |||
8856 | return AtomicExpansionKind::None; | |||
8857 | } | |||
8858 | ||||
8859 | static Intrinsic::ID | |||
8860 | getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) { | |||
8861 | if (XLen == 32) { | |||
8862 | switch (BinOp) { | |||
8863 | default: | |||
8864 | llvm_unreachable("Unexpected AtomicRMW BinOp")::llvm::llvm_unreachable_internal("Unexpected AtomicRMW BinOp" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 8864); | |||
8865 | case AtomicRMWInst::Xchg: | |||
8866 | return Intrinsic::riscv_masked_atomicrmw_xchg_i32; | |||
8867 | case AtomicRMWInst::Add: | |||
8868 | return Intrinsic::riscv_masked_atomicrmw_add_i32; | |||
8869 | case AtomicRMWInst::Sub: | |||
8870 | return Intrinsic::riscv_masked_atomicrmw_sub_i32; | |||
8871 | case AtomicRMWInst::Nand: | |||
8872 | return Intrinsic::riscv_masked_atomicrmw_nand_i32; | |||
8873 | case AtomicRMWInst::Max: | |||
8874 | return Intrinsic::riscv_masked_atomicrmw_max_i32; | |||
8875 | case AtomicRMWInst::Min: | |||
8876 | return Intrinsic::riscv_masked_atomicrmw_min_i32; | |||
8877 | case AtomicRMWInst::UMax: | |||
8878 | return Intrinsic::riscv_masked_atomicrmw_umax_i32; | |||
8879 | case AtomicRMWInst::UMin: | |||
8880 | return Intrinsic::riscv_masked_atomicrmw_umin_i32; | |||
8881 | } | |||
8882 | } | |||
8883 | ||||
8884 | if (XLen == 64) { | |||
8885 | switch (BinOp) { | |||
8886 | default: | |||
8887 | llvm_unreachable("Unexpected AtomicRMW BinOp")::llvm::llvm_unreachable_internal("Unexpected AtomicRMW BinOp" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 8887); | |||
8888 | case AtomicRMWInst::Xchg: | |||
8889 | return Intrinsic::riscv_masked_atomicrmw_xchg_i64; | |||
8890 | case AtomicRMWInst::Add: | |||
8891 | return Intrinsic::riscv_masked_atomicrmw_add_i64; | |||
8892 | case AtomicRMWInst::Sub: | |||
8893 | return Intrinsic::riscv_masked_atomicrmw_sub_i64; | |||
8894 | case AtomicRMWInst::Nand: | |||
8895 | return Intrinsic::riscv_masked_atomicrmw_nand_i64; | |||
8896 | case AtomicRMWInst::Max: | |||
8897 | return Intrinsic::riscv_masked_atomicrmw_max_i64; | |||
8898 | case AtomicRMWInst::Min: | |||
8899 | return Intrinsic::riscv_masked_atomicrmw_min_i64; | |||
8900 | case AtomicRMWInst::UMax: | |||
8901 | return Intrinsic::riscv_masked_atomicrmw_umax_i64; | |||
8902 | case AtomicRMWInst::UMin: | |||
8903 | return Intrinsic::riscv_masked_atomicrmw_umin_i64; | |||
8904 | } | |||
8905 | } | |||
8906 | ||||
8907 | llvm_unreachable("Unexpected XLen\n")::llvm::llvm_unreachable_internal("Unexpected XLen\n", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 8907); | |||
8908 | } | |||
8909 | ||||
8910 | Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic( | |||
8911 | IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, | |||
8912 | Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const { | |||
8913 | unsigned XLen = Subtarget.getXLen(); | |||
8914 | Value *Ordering = | |||
8915 | Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering())); | |||
8916 | Type *Tys[] = {AlignedAddr->getType()}; | |||
8917 | Function *LrwOpScwLoop = Intrinsic::getDeclaration( | |||
8918 | AI->getModule(), | |||
8919 | getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys); | |||
8920 | ||||
8921 | if (XLen == 64) { | |||
8922 | Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty()); | |||
8923 | Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); | |||
8924 | ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty()); | |||
8925 | } | |||
8926 | ||||
8927 | Value *Result; | |||
8928 | ||||
8929 | // Must pass the shift amount needed to sign extend the loaded value prior | |||
8930 | // to performing a signed comparison for min/max. ShiftAmt is the number of | |||
8931 | // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which | |||
8932 | // is the number of bits to left+right shift the value in order to | |||
8933 | // sign-extend. | |||
8934 | if (AI->getOperation() == AtomicRMWInst::Min || | |||
8935 | AI->getOperation() == AtomicRMWInst::Max) { | |||
8936 | const DataLayout &DL = AI->getModule()->getDataLayout(); | |||
8937 | unsigned ValWidth = | |||
8938 | DL.getTypeStoreSizeInBits(AI->getValOperand()->getType()); | |||
8939 | Value *SextShamt = | |||
8940 | Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt); | |||
8941 | Result = Builder.CreateCall(LrwOpScwLoop, | |||
8942 | {AlignedAddr, Incr, Mask, SextShamt, Ordering}); | |||
8943 | } else { | |||
8944 | Result = | |||
8945 | Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering}); | |||
8946 | } | |||
8947 | ||||
8948 | if (XLen == 64) | |||
8949 | Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); | |||
8950 | return Result; | |||
8951 | } | |||
8952 | ||||
8953 | TargetLowering::AtomicExpansionKind | |||
8954 | RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR( | |||
8955 | AtomicCmpXchgInst *CI) const { | |||
8956 | unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits(); | |||
8957 | if (Size == 8 || Size == 16) | |||
8958 | return AtomicExpansionKind::MaskedIntrinsic; | |||
8959 | return AtomicExpansionKind::None; | |||
8960 | } | |||
8961 | ||||
8962 | Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic( | |||
8963 | IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, | |||
8964 | Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { | |||
8965 | unsigned XLen = Subtarget.getXLen(); | |||
8966 | Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord)); | |||
8967 | Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32; | |||
8968 | if (XLen == 64) { | |||
8969 | CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty()); | |||
8970 | NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty()); | |||
8971 | Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); | |||
8972 | CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64; | |||
8973 | } | |||
8974 | Type *Tys[] = {AlignedAddr->getType()}; | |||
8975 | Function *MaskedCmpXchg = | |||
8976 | Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys); | |||
8977 | Value *Result = Builder.CreateCall( | |||
8978 | MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering}); | |||
8979 | if (XLen == 64) | |||
8980 | Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); | |||
8981 | return Result; | |||
8982 | } | |||
8983 | ||||
8984 | bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const { | |||
8985 | return false; | |||
8986 | } | |||
8987 | ||||
8988 | bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, | |||
8989 | EVT VT) const { | |||
8990 | VT = VT.getScalarType(); | |||
8991 | ||||
8992 | if (!VT.isSimple()) | |||
8993 | return false; | |||
8994 | ||||
8995 | switch (VT.getSimpleVT().SimpleTy) { | |||
8996 | case MVT::f16: | |||
8997 | return Subtarget.hasStdExtZfh(); | |||
8998 | case MVT::f32: | |||
8999 | return Subtarget.hasStdExtF(); | |||
9000 | case MVT::f64: | |||
9001 | return Subtarget.hasStdExtD(); | |||
9002 | default: | |||
9003 | break; | |||
9004 | } | |||
9005 | ||||
9006 | return false; | |||
9007 | } | |||
9008 | ||||
9009 | Register RISCVTargetLowering::getExceptionPointerRegister( | |||
9010 | const Constant *PersonalityFn) const { | |||
9011 | return RISCV::X10; | |||
9012 | } | |||
9013 | ||||
9014 | Register RISCVTargetLowering::getExceptionSelectorRegister( | |||
9015 | const Constant *PersonalityFn) const { | |||
9016 | return RISCV::X11; | |||
9017 | } | |||
9018 | ||||
9019 | bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const { | |||
9020 | // Return false to suppress the unnecessary extensions if the LibCall | |||
9021 | // arguments or return value is f32 type for LP64 ABI. | |||
9022 | RISCVABI::ABI ABI = Subtarget.getTargetABI(); | |||
9023 | if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32)) | |||
9024 | return false; | |||
9025 | ||||
9026 | return true; | |||
9027 | } | |||
9028 | ||||
9029 | bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const { | |||
9030 | if (Subtarget.is64Bit() && Type == MVT::i32) | |||
9031 | return true; | |||
9032 | ||||
9033 | return IsSigned; | |||
9034 | } | |||
9035 | ||||
9036 | bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, | |||
9037 | SDValue C) const { | |||
9038 | // Check integral scalar types. | |||
9039 | if (VT.isScalarInteger()) { | |||
9040 | // Omit the optimization if the sub target has the M extension and the data | |||
9041 | // size exceeds XLen. | |||
9042 | if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen()) | |||
9043 | return false; | |||
9044 | if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) { | |||
9045 | // Break the MUL to a SLLI and an ADD/SUB. | |||
9046 | const APInt &Imm = ConstNode->getAPIntValue(); | |||
9047 | if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() || | |||
9048 | (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2()) | |||
9049 | return true; | |||
9050 | // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12. | |||
9051 | if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) && | |||
9052 | ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() || | |||
9053 | (Imm - 8).isPowerOf2())) | |||
9054 | return true; | |||
9055 | // Omit the following optimization if the sub target has the M extension | |||
9056 | // and the data size >= XLen. | |||
9057 | if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen()) | |||
9058 | return false; | |||
9059 | // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs | |||
9060 | // a pair of LUI/ADDI. | |||
9061 | if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) { | |||
9062 | APInt ImmS = Imm.ashr(Imm.countTrailingZeros()); | |||
9063 | if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() || | |||
9064 | (1 - ImmS).isPowerOf2()) | |||
9065 | return true; | |||
9066 | } | |||
9067 | } | |||
9068 | } | |||
9069 | ||||
9070 | return false; | |||
9071 | } | |||
9072 | ||||
9073 | bool RISCVTargetLowering::isMulAddWithConstProfitable( | |||
9074 | const SDValue &AddNode, const SDValue &ConstNode) const { | |||
9075 | // Let the DAGCombiner decide for vectors. | |||
9076 | EVT VT = AddNode.getValueType(); | |||
9077 | if (VT.isVector()) | |||
9078 | return true; | |||
9079 | ||||
9080 | // Let the DAGCombiner decide for larger types. | |||
9081 | if (VT.getScalarSizeInBits() > Subtarget.getXLen()) | |||
9082 | return true; | |||
9083 | ||||
9084 | // It is worse if c1 is simm12 while c1*c2 is not. | |||
9085 | ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1)); | |||
9086 | ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode); | |||
9087 | const APInt &C1 = C1Node->getAPIntValue(); | |||
9088 | const APInt &C2 = C2Node->getAPIntValue(); | |||
9089 | if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12)) | |||
9090 | return false; | |||
9091 | ||||
9092 | // Default to true and let the DAGCombiner decide. | |||
9093 | return true; | |||
9094 | } | |||
9095 | ||||
9096 | bool RISCVTargetLowering::allowsMisalignedMemoryAccesses( | |||
9097 | EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, | |||
9098 | bool *Fast) const { | |||
9099 | if (!VT.isVector()) | |||
9100 | return false; | |||
9101 | ||||
9102 | EVT ElemVT = VT.getVectorElementType(); | |||
9103 | if (Alignment >= ElemVT.getStoreSize()) { | |||
9104 | if (Fast) | |||
9105 | *Fast = true; | |||
9106 | return true; | |||
9107 | } | |||
9108 | ||||
9109 | return false; | |||
9110 | } | |||
9111 | ||||
9112 | bool RISCVTargetLowering::splitValueIntoRegisterParts( | |||
9113 | SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, | |||
9114 | unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const { | |||
9115 | bool IsABIRegCopy = CC.hasValue(); | |||
9116 | EVT ValueVT = Val.getValueType(); | |||
9117 | if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) { | |||
9118 | // Cast the f16 to i16, extend to i32, pad with ones to make a float nan, | |||
9119 | // and cast to f32. | |||
9120 | Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val); | |||
9121 | Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val); | |||
9122 | Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val, | |||
9123 | DAG.getConstant(0xFFFF0000, DL, MVT::i32)); | |||
9124 | Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val); | |||
9125 | Parts[0] = Val; | |||
9126 | return true; | |||
9127 | } | |||
9128 | ||||
9129 | if (ValueVT.isScalableVector() && PartVT.isScalableVector()) { | |||
9130 | LLVMContext &Context = *DAG.getContext(); | |||
9131 | EVT ValueEltVT = ValueVT.getVectorElementType(); | |||
9132 | EVT PartEltVT = PartVT.getVectorElementType(); | |||
9133 | unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize(); | |||
9134 | unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize(); | |||
9135 | if (PartVTBitSize % ValueVTBitSize == 0) { | |||
9136 | // If the element types are different, bitcast to the same element type of | |||
9137 | // PartVT first. | |||
9138 | if (ValueEltVT != PartEltVT) { | |||
9139 | unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits(); | |||
9140 | assert(Count != 0 && "The number of element should not be zero.")(static_cast <bool> (Count != 0 && "The number of element should not be zero." ) ? void (0) : __assert_fail ("Count != 0 && \"The number of element should not be zero.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 9140, __extension__ __PRETTY_FUNCTION__)); | |||
9141 | EVT SameEltTypeVT = | |||
9142 | EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true); | |||
9143 | Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val); | |||
9144 | } | |||
9145 | Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT), | |||
9146 | Val, DAG.getConstant(0, DL, Subtarget.getXLenVT())); | |||
9147 | Parts[0] = Val; | |||
9148 | return true; | |||
9149 | } | |||
9150 | } | |||
9151 | return false; | |||
9152 | } | |||
9153 | ||||
9154 | SDValue RISCVTargetLowering::joinRegisterPartsIntoValue( | |||
9155 | SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, | |||
9156 | MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const { | |||
9157 | bool IsABIRegCopy = CC.hasValue(); | |||
9158 | if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) { | |||
9159 | SDValue Val = Parts[0]; | |||
9160 | ||||
9161 | // Cast the f32 to i32, truncate to i16, and cast back to f16. | |||
9162 | Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val); | |||
9163 | Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val); | |||
9164 | Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val); | |||
9165 | return Val; | |||
9166 | } | |||
9167 | ||||
9168 | if (ValueVT.isScalableVector() && PartVT.isScalableVector()) { | |||
9169 | LLVMContext &Context = *DAG.getContext(); | |||
9170 | SDValue Val = Parts[0]; | |||
9171 | EVT ValueEltVT = ValueVT.getVectorElementType(); | |||
9172 | EVT PartEltVT = PartVT.getVectorElementType(); | |||
9173 | unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize(); | |||
9174 | unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize(); | |||
9175 | if (PartVTBitSize % ValueVTBitSize == 0) { | |||
9176 | EVT SameEltTypeVT = ValueVT; | |||
9177 | // If the element types are different, convert it to the same element type | |||
9178 | // of PartVT. | |||
9179 | if (ValueEltVT != PartEltVT) { | |||
9180 | unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits(); | |||
9181 | assert(Count != 0 && "The number of element should not be zero.")(static_cast <bool> (Count != 0 && "The number of element should not be zero." ) ? void (0) : __assert_fail ("Count != 0 && \"The number of element should not be zero.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/RISCV/RISCVISelLowering.cpp" , 9181, __extension__ __PRETTY_FUNCTION__)); | |||
9182 | SameEltTypeVT = | |||
9183 | EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true); | |||
9184 | } | |||
9185 | Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val, | |||
9186 | DAG.getConstant(0, DL, Subtarget.getXLenVT())); | |||
9187 | if (ValueEltVT != PartEltVT) | |||
9188 | Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); | |||
9189 | return Val; | |||
9190 | } | |||
9191 | } | |||
9192 | return SDValue(); | |||
9193 | } | |||
9194 | ||||
9195 | #define GET_REGISTER_MATCHER | |||
9196 | #include "RISCVGenAsmMatcher.inc" | |||
9197 | ||||
9198 | Register | |||
9199 | RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT, | |||
9200 | const MachineFunction &MF) const { | |||
9201 | Register Reg = MatchRegisterAltName(RegName); | |||
9202 | if (Reg == RISCV::NoRegister) | |||
9203 | Reg = MatchRegisterName(RegName); | |||
9204 | if (Reg == RISCV::NoRegister) | |||
9205 | report_fatal_error( | |||
9206 | Twine("Invalid register name \"" + StringRef(RegName) + "\".")); | |||
9207 | BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF); | |||
9208 | if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg)) | |||
9209 | report_fatal_error(Twine("Trying to obtain non-reserved register \"" + | |||
9210 | StringRef(RegName) + "\".")); | |||
9211 | return Reg; | |||
9212 | } | |||
9213 | ||||
9214 | namespace llvm { | |||
9215 | namespace RISCVVIntrinsicsTable { | |||
9216 | ||||
9217 | #define GET_RISCVVIntrinsicsTable_IMPL | |||
9218 | #include "RISCVGenSearchableTables.inc" | |||
9219 | ||||
9220 | } // namespace RISCVVIntrinsicsTable | |||
9221 | ||||
9222 | } // namespace llvm |
1 | //===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | /// |
9 | /// \file |
10 | /// This file describes how to lower LLVM code to machine code. This has two |
11 | /// main components: |
12 | /// |
13 | /// 1. Which ValueTypes are natively supported by the target. |
14 | /// 2. Which operations are supported for supported ValueTypes. |
15 | /// 3. Cost thresholds for alternative implementations of certain operations. |
16 | /// |
17 | /// In addition it has a few other components, like information about FP |
18 | /// immediates. |
19 | /// |
20 | //===----------------------------------------------------------------------===// |
21 | |
22 | #ifndef LLVM_CODEGEN_TARGETLOWERING_H |
23 | #define LLVM_CODEGEN_TARGETLOWERING_H |
24 | |
25 | #include "llvm/ADT/APInt.h" |
26 | #include "llvm/ADT/ArrayRef.h" |
27 | #include "llvm/ADT/DenseMap.h" |
28 | #include "llvm/ADT/STLExtras.h" |
29 | #include "llvm/ADT/SmallVector.h" |
30 | #include "llvm/ADT/StringRef.h" |
31 | #include "llvm/CodeGen/DAGCombine.h" |
32 | #include "llvm/CodeGen/ISDOpcodes.h" |
33 | #include "llvm/CodeGen/LowLevelType.h" |
34 | #include "llvm/CodeGen/RuntimeLibcalls.h" |
35 | #include "llvm/CodeGen/SelectionDAG.h" |
36 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
37 | #include "llvm/CodeGen/TargetCallingConv.h" |
38 | #include "llvm/CodeGen/ValueTypes.h" |
39 | #include "llvm/IR/Attributes.h" |
40 | #include "llvm/IR/CallingConv.h" |
41 | #include "llvm/IR/DataLayout.h" |
42 | #include "llvm/IR/DerivedTypes.h" |
43 | #include "llvm/IR/Function.h" |
44 | #include "llvm/IR/InlineAsm.h" |
45 | #include "llvm/IR/Instruction.h" |
46 | #include "llvm/IR/Instructions.h" |
47 | #include "llvm/IR/Type.h" |
48 | #include "llvm/Support/Alignment.h" |
49 | #include "llvm/Support/AtomicOrdering.h" |
50 | #include "llvm/Support/Casting.h" |
51 | #include "llvm/Support/ErrorHandling.h" |
52 | #include "llvm/Support/InstructionCost.h" |
53 | #include "llvm/Support/MachineValueType.h" |
54 | #include <algorithm> |
55 | #include <cassert> |
56 | #include <climits> |
57 | #include <cstdint> |
58 | #include <iterator> |
59 | #include <map> |
60 | #include <string> |
61 | #include <utility> |
62 | #include <vector> |
63 | |
64 | namespace llvm { |
65 | |
66 | class BranchProbability; |
67 | class CCState; |
68 | class CCValAssign; |
69 | class Constant; |
70 | class FastISel; |
71 | class FunctionLoweringInfo; |
72 | class GlobalValue; |
73 | class GISelKnownBits; |
74 | class IntrinsicInst; |
75 | class IRBuilderBase; |
76 | struct KnownBits; |
77 | class LegacyDivergenceAnalysis; |
78 | class LLVMContext; |
79 | class MachineBasicBlock; |
80 | class MachineFunction; |
81 | class MachineInstr; |
82 | class MachineJumpTableInfo; |
83 | class MachineLoop; |
84 | class MachineRegisterInfo; |
85 | class MCContext; |
86 | class MCExpr; |
87 | class Module; |
88 | class ProfileSummaryInfo; |
89 | class TargetLibraryInfo; |
90 | class TargetMachine; |
91 | class TargetRegisterClass; |
92 | class TargetRegisterInfo; |
93 | class TargetTransformInfo; |
94 | class Value; |
95 | |
96 | namespace Sched { |
97 | |
98 | enum Preference { |
99 | None, // No preference |
100 | Source, // Follow source order. |
101 | RegPressure, // Scheduling for lowest register pressure. |
102 | Hybrid, // Scheduling for both latency and register pressure. |
103 | ILP, // Scheduling for ILP in low register pressure mode. |
104 | VLIW, // Scheduling for VLIW targets. |
105 | Fast, // Fast suboptimal list scheduling |
106 | Linearize // Linearize DAG, no scheduling |
107 | }; |
108 | |
109 | } // end namespace Sched |
110 | |
111 | // MemOp models a memory operation, either memset or memcpy/memmove. |
112 | struct MemOp { |
113 | private: |
114 | // Shared |
115 | uint64_t Size; |
116 | bool DstAlignCanChange; // true if destination alignment can satisfy any |
117 | // constraint. |
118 | Align DstAlign; // Specified alignment of the memory operation. |
119 | |
120 | bool AllowOverlap; |
121 | // memset only |
122 | bool IsMemset; // If setthis memory operation is a memset. |
123 | bool ZeroMemset; // If set clears out memory with zeros. |
124 | // memcpy only |
125 | bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register |
126 | // constant so it does not need to be loaded. |
127 | Align SrcAlign; // Inferred alignment of the source or default value if the |
128 | // memory operation does not need to load the value. |
129 | public: |
130 | static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, |
131 | Align SrcAlign, bool IsVolatile, |
132 | bool MemcpyStrSrc = false) { |
133 | MemOp Op; |
134 | Op.Size = Size; |
135 | Op.DstAlignCanChange = DstAlignCanChange; |
136 | Op.DstAlign = DstAlign; |
137 | Op.AllowOverlap = !IsVolatile; |
138 | Op.IsMemset = false; |
139 | Op.ZeroMemset = false; |
140 | Op.MemcpyStrSrc = MemcpyStrSrc; |
141 | Op.SrcAlign = SrcAlign; |
142 | return Op; |
143 | } |
144 | |
145 | static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, |
146 | bool IsZeroMemset, bool IsVolatile) { |
147 | MemOp Op; |
148 | Op.Size = Size; |
149 | Op.DstAlignCanChange = DstAlignCanChange; |
150 | Op.DstAlign = DstAlign; |
151 | Op.AllowOverlap = !IsVolatile; |
152 | Op.IsMemset = true; |
153 | Op.ZeroMemset = IsZeroMemset; |
154 | Op.MemcpyStrSrc = false; |
155 | return Op; |
156 | } |
157 | |
158 | uint64_t size() const { return Size; } |
159 | Align getDstAlign() const { |
160 | assert(!DstAlignCanChange)(static_cast <bool> (!DstAlignCanChange) ? void (0) : __assert_fail ("!DstAlignCanChange", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 160, __extension__ __PRETTY_FUNCTION__)); |
161 | return DstAlign; |
162 | } |
163 | bool isFixedDstAlign() const { return !DstAlignCanChange; } |
164 | bool allowOverlap() const { return AllowOverlap; } |
165 | bool isMemset() const { return IsMemset; } |
166 | bool isMemcpy() const { return !IsMemset; } |
167 | bool isMemcpyWithFixedDstAlign() const { |
168 | return isMemcpy() && !DstAlignCanChange; |
169 | } |
170 | bool isZeroMemset() const { return isMemset() && ZeroMemset; } |
171 | bool isMemcpyStrSrc() const { |
172 | assert(isMemcpy() && "Must be a memcpy")(static_cast <bool> (isMemcpy() && "Must be a memcpy" ) ? void (0) : __assert_fail ("isMemcpy() && \"Must be a memcpy\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 172, __extension__ __PRETTY_FUNCTION__)); |
173 | return MemcpyStrSrc; |
174 | } |
175 | Align getSrcAlign() const { |
176 | assert(isMemcpy() && "Must be a memcpy")(static_cast <bool> (isMemcpy() && "Must be a memcpy" ) ? void (0) : __assert_fail ("isMemcpy() && \"Must be a memcpy\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 176, __extension__ __PRETTY_FUNCTION__)); |
177 | return SrcAlign; |
178 | } |
179 | bool isSrcAligned(Align AlignCheck) const { |
180 | return isMemset() || llvm::isAligned(AlignCheck, SrcAlign.value()); |
181 | } |
182 | bool isDstAligned(Align AlignCheck) const { |
183 | return DstAlignCanChange || llvm::isAligned(AlignCheck, DstAlign.value()); |
184 | } |
185 | bool isAligned(Align AlignCheck) const { |
186 | return isSrcAligned(AlignCheck) && isDstAligned(AlignCheck); |
187 | } |
188 | }; |
189 | |
190 | /// This base class for TargetLowering contains the SelectionDAG-independent |
191 | /// parts that can be used from the rest of CodeGen. |
192 | class TargetLoweringBase { |
193 | public: |
194 | /// This enum indicates whether operations are valid for a target, and if not, |
195 | /// what action should be used to make them valid. |
196 | enum LegalizeAction : uint8_t { |
197 | Legal, // The target natively supports this operation. |
198 | Promote, // This operation should be executed in a larger type. |
199 | Expand, // Try to expand this to other ops, otherwise use a libcall. |
200 | LibCall, // Don't try to expand this to other ops, always use a libcall. |
201 | Custom // Use the LowerOperation hook to implement custom lowering. |
202 | }; |
203 | |
204 | /// This enum indicates whether a types are legal for a target, and if not, |
205 | /// what action should be used to make them valid. |
206 | enum LegalizeTypeAction : uint8_t { |
207 | TypeLegal, // The target natively supports this type. |
208 | TypePromoteInteger, // Replace this integer with a larger one. |
209 | TypeExpandInteger, // Split this integer into two of half the size. |
210 | TypeSoftenFloat, // Convert this float to a same size integer type. |
211 | TypeExpandFloat, // Split this float into two of half the size. |
212 | TypeScalarizeVector, // Replace this one-element vector with its element. |
213 | TypeSplitVector, // Split this vector into two of half the size. |
214 | TypeWidenVector, // This vector should be widened into a larger vector. |
215 | TypePromoteFloat, // Replace this float with a larger one. |
216 | TypeSoftPromoteHalf, // Soften half to i16 and use float to do arithmetic. |
217 | TypeScalarizeScalableVector, // This action is explicitly left unimplemented. |
218 | // While it is theoretically possible to |
219 | // legalize operations on scalable types with a |
220 | // loop that handles the vscale * #lanes of the |
221 | // vector, this is non-trivial at SelectionDAG |
222 | // level and these types are better to be |
223 | // widened or promoted. |
224 | }; |
225 | |
226 | /// LegalizeKind holds the legalization kind that needs to happen to EVT |
227 | /// in order to type-legalize it. |
228 | using LegalizeKind = std::pair<LegalizeTypeAction, EVT>; |
229 | |
230 | /// Enum that describes how the target represents true/false values. |
231 | enum BooleanContent { |
232 | UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage. |
233 | ZeroOrOneBooleanContent, // All bits zero except for bit 0. |
234 | ZeroOrNegativeOneBooleanContent // All bits equal to bit 0. |
235 | }; |
236 | |
237 | /// Enum that describes what type of support for selects the target has. |
238 | enum SelectSupportKind { |
239 | ScalarValSelect, // The target supports scalar selects (ex: cmov). |
240 | ScalarCondVectorVal, // The target supports selects with a scalar condition |
241 | // and vector values (ex: cmov). |
242 | VectorMaskSelect // The target supports vector selects with a vector |
243 | // mask (ex: x86 blends). |
244 | }; |
245 | |
246 | /// Enum that specifies what an atomic load/AtomicRMWInst is expanded |
247 | /// to, if at all. Exists because different targets have different levels of |
248 | /// support for these atomic instructions, and also have different options |
249 | /// w.r.t. what they should expand to. |
250 | enum class AtomicExpansionKind { |
251 | None, // Don't expand the instruction. |
252 | LLSC, // Expand the instruction into loadlinked/storeconditional; used |
253 | // by ARM/AArch64. |
254 | LLOnly, // Expand the (load) instruction into just a load-linked, which has |
255 | // greater atomic guarantees than a normal load. |
256 | CmpXChg, // Expand the instruction into cmpxchg; used by at least X86. |
257 | MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop. |
258 | }; |
259 | |
260 | /// Enum that specifies when a multiplication should be expanded. |
261 | enum class MulExpansionKind { |
262 | Always, // Always expand the instruction. |
263 | OnlyLegalOrCustom, // Only expand when the resulting instructions are legal |
264 | // or custom. |
265 | }; |
266 | |
267 | /// Enum that specifies when a float negation is beneficial. |
268 | enum class NegatibleCost { |
269 | Cheaper = 0, // Negated expression is cheaper. |
270 | Neutral = 1, // Negated expression has the same cost. |
271 | Expensive = 2 // Negated expression is more expensive. |
272 | }; |
273 | |
274 | class ArgListEntry { |
275 | public: |
276 | Value *Val = nullptr; |
277 | SDValue Node = SDValue(); |
278 | Type *Ty = nullptr; |
279 | bool IsSExt : 1; |
280 | bool IsZExt : 1; |
281 | bool IsInReg : 1; |
282 | bool IsSRet : 1; |
283 | bool IsNest : 1; |
284 | bool IsByVal : 1; |
285 | bool IsByRef : 1; |
286 | bool IsInAlloca : 1; |
287 | bool IsPreallocated : 1; |
288 | bool IsReturned : 1; |
289 | bool IsSwiftSelf : 1; |
290 | bool IsSwiftAsync : 1; |
291 | bool IsSwiftError : 1; |
292 | bool IsCFGuardTarget : 1; |
293 | MaybeAlign Alignment = None; |
294 | Type *IndirectType = nullptr; |
295 | |
296 | ArgListEntry() |
297 | : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false), |
298 | IsNest(false), IsByVal(false), IsByRef(false), IsInAlloca(false), |
299 | IsPreallocated(false), IsReturned(false), IsSwiftSelf(false), |
300 | IsSwiftAsync(false), IsSwiftError(false), IsCFGuardTarget(false) {} |
301 | |
302 | void setAttributes(const CallBase *Call, unsigned ArgIdx); |
303 | }; |
304 | using ArgListTy = std::vector<ArgListEntry>; |
305 | |
306 | virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC, |
307 | ArgListTy &Args) const {}; |
308 | |
309 | static ISD::NodeType getExtendForContent(BooleanContent Content) { |
310 | switch (Content) { |
311 | case UndefinedBooleanContent: |
312 | // Extend by adding rubbish bits. |
313 | return ISD::ANY_EXTEND; |
314 | case ZeroOrOneBooleanContent: |
315 | // Extend by adding zero bits. |
316 | return ISD::ZERO_EXTEND; |
317 | case ZeroOrNegativeOneBooleanContent: |
318 | // Extend by copying the sign bit. |
319 | return ISD::SIGN_EXTEND; |
320 | } |
321 | llvm_unreachable("Invalid content kind")::llvm::llvm_unreachable_internal("Invalid content kind", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 321); |
322 | } |
323 | |
324 | explicit TargetLoweringBase(const TargetMachine &TM); |
325 | TargetLoweringBase(const TargetLoweringBase &) = delete; |
326 | TargetLoweringBase &operator=(const TargetLoweringBase &) = delete; |
327 | virtual ~TargetLoweringBase() = default; |
328 | |
329 | /// Return true if the target support strict float operation |
330 | bool isStrictFPEnabled() const { |
331 | return IsStrictFPEnabled; |
332 | } |
333 | |
334 | protected: |
335 | /// Initialize all of the actions to default values. |
336 | void initActions(); |
337 | |
338 | public: |
339 | const TargetMachine &getTargetMachine() const { return TM; } |
340 | |
341 | virtual bool useSoftFloat() const { return false; } |
342 | |
343 | /// Return the pointer type for the given address space, defaults to |
344 | /// the pointer type from the data layout. |
345 | /// FIXME: The default needs to be removed once all the code is updated. |
346 | virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const { |
347 | return MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); |
348 | } |
349 | |
350 | /// Return the in-memory pointer type for the given address space, defaults to |
351 | /// the pointer type from the data layout. FIXME: The default needs to be |
352 | /// removed once all the code is updated. |
353 | virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const { |
354 | return MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); |
355 | } |
356 | |
357 | /// Return the type for frame index, which is determined by |
358 | /// the alloca address space specified through the data layout. |
359 | MVT getFrameIndexTy(const DataLayout &DL) const { |
360 | return getPointerTy(DL, DL.getAllocaAddrSpace()); |
361 | } |
362 | |
363 | /// Return the type for code pointers, which is determined by the program |
364 | /// address space specified through the data layout. |
365 | MVT getProgramPointerTy(const DataLayout &DL) const { |
366 | return getPointerTy(DL, DL.getProgramAddressSpace()); |
367 | } |
368 | |
369 | /// Return the type for operands of fence. |
370 | /// TODO: Let fence operands be of i32 type and remove this. |
371 | virtual MVT getFenceOperandTy(const DataLayout &DL) const { |
372 | return getPointerTy(DL); |
373 | } |
374 | |
375 | /// EVT is not used in-tree, but is used by out-of-tree target. |
376 | /// A documentation for this function would be nice... |
377 | virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const; |
378 | |
379 | EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, |
380 | bool LegalTypes = true) const; |
381 | |
382 | /// Return the preferred type to use for a shift opcode, given the shifted |
383 | /// amount type is \p ShiftValueTy. |
384 | LLVM_READONLY__attribute__((__pure__)) |
385 | virtual LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const { |
386 | return ShiftValueTy; |
387 | } |
388 | |
389 | /// Returns the type to be used for the index operand of: |
390 | /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, |
391 | /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR |
392 | virtual MVT getVectorIdxTy(const DataLayout &DL) const { |
393 | return getPointerTy(DL); |
394 | } |
395 | |
396 | /// Returns the type to be used for the EVL/AVL operand of VP nodes: |
397 | /// ISD::VP_ADD, ISD::VP_SUB, etc. It must be a legal scalar integer type, |
398 | /// and must be at least as large as i32. The EVL is implicitly zero-extended |
399 | /// to any larger type. |
400 | virtual MVT getVPExplicitVectorLengthTy() const { return MVT::i32; } |
401 | |
402 | /// This callback is used to inspect load/store instructions and add |
403 | /// target-specific MachineMemOperand flags to them. The default |
404 | /// implementation does nothing. |
405 | virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const { |
406 | return MachineMemOperand::MONone; |
407 | } |
408 | |
409 | MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, |
410 | const DataLayout &DL) const; |
411 | MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, |
412 | const DataLayout &DL) const; |
413 | MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, |
414 | const DataLayout &DL) const; |
415 | |
416 | virtual bool isSelectSupported(SelectSupportKind /*kind*/) const { |
417 | return true; |
418 | } |
419 | |
420 | /// Return true if it is profitable to convert a select of FP constants into |
421 | /// a constant pool load whose address depends on the select condition. The |
422 | /// parameter may be used to differentiate a select with FP compare from |
423 | /// integer compare. |
424 | virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const { |
425 | return true; |
426 | } |
427 | |
428 | /// Return true if multiple condition registers are available. |
429 | bool hasMultipleConditionRegisters() const { |
430 | return HasMultipleConditionRegisters; |
431 | } |
432 | |
433 | /// Return true if the target has BitExtract instructions. |
434 | bool hasExtractBitsInsn() const { return HasExtractBitsInsn; } |
435 | |
436 | /// Return the preferred vector type legalization action. |
437 | virtual TargetLoweringBase::LegalizeTypeAction |
438 | getPreferredVectorAction(MVT VT) const { |
439 | // The default action for one element vectors is to scalarize |
440 | if (VT.getVectorElementCount().isScalar()) |
441 | return TypeScalarizeVector; |
442 | // The default action for an odd-width vector is to widen. |
443 | if (!VT.isPow2VectorType()) |
444 | return TypeWidenVector; |
445 | // The default action for other vectors is to promote |
446 | return TypePromoteInteger; |
447 | } |
448 | |
449 | // Return true if the half type should be passed around as i16, but promoted |
450 | // to float around arithmetic. The default behavior is to pass around as |
451 | // float and convert around loads/stores/bitcasts and other places where |
452 | // the size matters. |
453 | virtual bool softPromoteHalfType() const { return false; } |
454 | |
455 | // There are two general methods for expanding a BUILD_VECTOR node: |
456 | // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle |
457 | // them together. |
458 | // 2. Build the vector on the stack and then load it. |
459 | // If this function returns true, then method (1) will be used, subject to |
460 | // the constraint that all of the necessary shuffles are legal (as determined |
461 | // by isShuffleMaskLegal). If this function returns false, then method (2) is |
462 | // always used. The vector type, and the number of defined values, are |
463 | // provided. |
464 | virtual bool |
465 | shouldExpandBuildVectorWithShuffles(EVT /* VT */, |
466 | unsigned DefinedValues) const { |
467 | return DefinedValues < 3; |
468 | } |
469 | |
470 | /// Return true if integer divide is usually cheaper than a sequence of |
471 | /// several shifts, adds, and multiplies for this target. |
472 | /// The definition of "cheaper" may depend on whether we're optimizing |
473 | /// for speed or for size. |
474 | virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; } |
475 | |
476 | /// Return true if the target can handle a standalone remainder operation. |
477 | virtual bool hasStandaloneRem(EVT VT) const { |
478 | return true; |
479 | } |
480 | |
481 | /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X). |
482 | virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const { |
483 | // Default behavior is to replace SQRT(X) with X*RSQRT(X). |
484 | return false; |
485 | } |
486 | |
487 | /// Reciprocal estimate status values used by the functions below. |
488 | enum ReciprocalEstimate : int { |
489 | Unspecified = -1, |
490 | Disabled = 0, |
491 | Enabled = 1 |
492 | }; |
493 | |
494 | /// Return a ReciprocalEstimate enum value for a square root of the given type |
495 | /// based on the function's attributes. If the operation is not overridden by |
496 | /// the function's attributes, "Unspecified" is returned and target defaults |
497 | /// are expected to be used for instruction selection. |
498 | int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const; |
499 | |
500 | /// Return a ReciprocalEstimate enum value for a division of the given type |
501 | /// based on the function's attributes. If the operation is not overridden by |
502 | /// the function's attributes, "Unspecified" is returned and target defaults |
503 | /// are expected to be used for instruction selection. |
504 | int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const; |
505 | |
506 | /// Return the refinement step count for a square root of the given type based |
507 | /// on the function's attributes. If the operation is not overridden by |
508 | /// the function's attributes, "Unspecified" is returned and target defaults |
509 | /// are expected to be used for instruction selection. |
510 | int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const; |
511 | |
512 | /// Return the refinement step count for a division of the given type based |
513 | /// on the function's attributes. If the operation is not overridden by |
514 | /// the function's attributes, "Unspecified" is returned and target defaults |
515 | /// are expected to be used for instruction selection. |
516 | int getDivRefinementSteps(EVT VT, MachineFunction &MF) const; |
517 | |
518 | /// Returns true if target has indicated at least one type should be bypassed. |
519 | bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); } |
520 | |
521 | /// Returns map of slow types for division or remainder with corresponding |
522 | /// fast types |
523 | const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const { |
524 | return BypassSlowDivWidths; |
525 | } |
526 | |
527 | /// Return true if Flow Control is an expensive operation that should be |
528 | /// avoided. |
529 | bool isJumpExpensive() const { return JumpIsExpensive; } |
530 | |
531 | /// Return true if selects are only cheaper than branches if the branch is |
532 | /// unlikely to be predicted right. |
533 | bool isPredictableSelectExpensive() const { |
534 | return PredictableSelectIsExpensive; |
535 | } |
536 | |
537 | virtual bool fallBackToDAGISel(const Instruction &Inst) const { |
538 | return false; |
539 | } |
540 | |
541 | /// Return true if the following transform is beneficial: |
542 | /// fold (conv (load x)) -> (load (conv*)x) |
543 | /// On architectures that don't natively support some vector loads |
544 | /// efficiently, casting the load to a smaller vector of larger types and |
545 | /// loading is more efficient, however, this can be undone by optimizations in |
546 | /// dag combiner. |
547 | virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT, |
548 | const SelectionDAG &DAG, |
549 | const MachineMemOperand &MMO) const { |
550 | // Don't do if we could do an indexed load on the original type, but not on |
551 | // the new one. |
552 | if (!LoadVT.isSimple() || !BitcastVT.isSimple()) |
553 | return true; |
554 | |
555 | MVT LoadMVT = LoadVT.getSimpleVT(); |
556 | |
557 | // Don't bother doing this if it's just going to be promoted again later, as |
558 | // doing so might interfere with other combines. |
559 | if (getOperationAction(ISD::LOAD, LoadMVT) == Promote && |
560 | getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT()) |
561 | return false; |
562 | |
563 | bool Fast = false; |
564 | return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), BitcastVT, |
565 | MMO, &Fast) && Fast; |
566 | } |
567 | |
568 | /// Return true if the following transform is beneficial: |
569 | /// (store (y (conv x)), y*)) -> (store x, (x*)) |
570 | virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT, |
571 | const SelectionDAG &DAG, |
572 | const MachineMemOperand &MMO) const { |
573 | // Default to the same logic as loads. |
574 | return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO); |
575 | } |
576 | |
577 | /// Return true if it is expected to be cheaper to do a store of a non-zero |
578 | /// vector constant with the given size and type for the address space than to |
579 | /// store the individual scalar element constants. |
580 | virtual bool storeOfVectorConstantIsCheap(EVT MemVT, |
581 | unsigned NumElem, |
582 | unsigned AddrSpace) const { |
583 | return false; |
584 | } |
585 | |
586 | /// Allow store merging for the specified type after legalization in addition |
587 | /// to before legalization. This may transform stores that do not exist |
588 | /// earlier (for example, stores created from intrinsics). |
589 | virtual bool mergeStoresAfterLegalization(EVT MemVT) const { |
590 | return true; |
591 | } |
592 | |
593 | /// Returns if it's reasonable to merge stores to MemVT size. |
594 | virtual bool canMergeStoresTo(unsigned AS, EVT MemVT, |
595 | const MachineFunction &MF) const { |
596 | return true; |
597 | } |
598 | |
599 | /// Return true if it is cheap to speculate a call to intrinsic cttz. |
600 | virtual bool isCheapToSpeculateCttz() const { |
601 | return false; |
602 | } |
603 | |
604 | /// Return true if it is cheap to speculate a call to intrinsic ctlz. |
605 | virtual bool isCheapToSpeculateCtlz() const { |
606 | return false; |
607 | } |
608 | |
609 | /// Return true if ctlz instruction is fast. |
610 | virtual bool isCtlzFast() const { |
611 | return false; |
612 | } |
613 | |
614 | /// Return the maximum number of "x & (x - 1)" operations that can be done |
615 | /// instead of deferring to a custom CTPOP. |
616 | virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const { |
617 | return 1; |
618 | } |
619 | |
620 | /// Return true if instruction generated for equality comparison is folded |
621 | /// with instruction generated for signed comparison. |
622 | virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; } |
623 | |
624 | /// Return true if the heuristic to prefer icmp eq zero should be used in code |
625 | /// gen prepare. |
626 | virtual bool preferZeroCompareBranch() const { return false; } |
627 | |
628 | /// Return true if it is safe to transform an integer-domain bitwise operation |
629 | /// into the equivalent floating-point operation. This should be set to true |
630 | /// if the target has IEEE-754-compliant fabs/fneg operations for the input |
631 | /// type. |
632 | virtual bool hasBitPreservingFPLogic(EVT VT) const { |
633 | return false; |
634 | } |
635 | |
636 | /// Return true if it is cheaper to split the store of a merged int val |
637 | /// from a pair of smaller values into multiple stores. |
638 | virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const { |
639 | return false; |
640 | } |
641 | |
642 | /// Return if the target supports combining a |
643 | /// chain like: |
644 | /// \code |
645 | /// %andResult = and %val1, #mask |
646 | /// %icmpResult = icmp %andResult, 0 |
647 | /// \endcode |
648 | /// into a single machine instruction of a form like: |
649 | /// \code |
650 | /// cc = test %register, #mask |
651 | /// \endcode |
652 | virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const { |
653 | return false; |
654 | } |
655 | |
656 | /// Use bitwise logic to make pairs of compares more efficient. For example: |
657 | /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0 |
658 | /// This should be true when it takes more than one instruction to lower |
659 | /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on |
660 | /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win. |
661 | virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const { |
662 | return false; |
663 | } |
664 | |
665 | /// Return the preferred operand type if the target has a quick way to compare |
666 | /// integer values of the given size. Assume that any legal integer type can |
667 | /// be compared efficiently. Targets may override this to allow illegal wide |
668 | /// types to return a vector type if there is support to compare that type. |
669 | virtual MVT hasFastEqualityCompare(unsigned NumBits) const { |
670 | MVT VT = MVT::getIntegerVT(NumBits); |
671 | return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE; |
672 | } |
673 | |
674 | /// Return true if the target should transform: |
675 | /// (X & Y) == Y ---> (~X & Y) == 0 |
676 | /// (X & Y) != Y ---> (~X & Y) != 0 |
677 | /// |
678 | /// This may be profitable if the target has a bitwise and-not operation that |
679 | /// sets comparison flags. A target may want to limit the transformation based |
680 | /// on the type of Y or if Y is a constant. |
681 | /// |
682 | /// Note that the transform will not occur if Y is known to be a power-of-2 |
683 | /// because a mask and compare of a single bit can be handled by inverting the |
684 | /// predicate, for example: |
685 | /// (X & 8) == 8 ---> (X & 8) != 0 |
686 | virtual bool hasAndNotCompare(SDValue Y) const { |
687 | return false; |
688 | } |
689 | |
690 | /// Return true if the target has a bitwise and-not operation: |
691 | /// X = ~A & B |
692 | /// This can be used to simplify select or other instructions. |
693 | virtual bool hasAndNot(SDValue X) const { |
694 | // If the target has the more complex version of this operation, assume that |
695 | // it has this operation too. |
696 | return hasAndNotCompare(X); |
697 | } |
698 | |
699 | /// Return true if the target has a bit-test instruction: |
700 | /// (X & (1 << Y)) ==/!= 0 |
701 | /// This knowledge can be used to prevent breaking the pattern, |
702 | /// or creating it if it could be recognized. |
703 | virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; } |
704 | |
705 | /// There are two ways to clear extreme bits (either low or high): |
706 | /// Mask: x & (-1 << y) (the instcombine canonical form) |
707 | /// Shifts: x >> y << y |
708 | /// Return true if the variant with 2 variable shifts is preferred. |
709 | /// Return false if there is no preference. |
710 | virtual bool shouldFoldMaskToVariableShiftPair(SDValue X) const { |
711 | // By default, let's assume that no one prefers shifts. |
712 | return false; |
713 | } |
714 | |
715 | /// Return true if it is profitable to fold a pair of shifts into a mask. |
716 | /// This is usually true on most targets. But some targets, like Thumb1, |
717 | /// have immediate shift instructions, but no immediate "and" instruction; |
718 | /// this makes the fold unprofitable. |
719 | virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N, |
720 | CombineLevel Level) const { |
721 | return true; |
722 | } |
723 | |
724 | /// Should we tranform the IR-optimal check for whether given truncation |
725 | /// down into KeptBits would be truncating or not: |
726 | /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits) |
727 | /// Into it's more traditional form: |
728 | /// ((%x << C) a>> C) dstcond %x |
729 | /// Return true if we should transform. |
730 | /// Return false if there is no preference. |
731 | virtual bool shouldTransformSignedTruncationCheck(EVT XVT, |
732 | unsigned KeptBits) const { |
733 | // By default, let's assume that no one prefers shifts. |
734 | return false; |
735 | } |
736 | |
737 | /// Given the pattern |
738 | /// (X & (C l>>/<< Y)) ==/!= 0 |
739 | /// return true if it should be transformed into: |
740 | /// ((X <</l>> Y) & C) ==/!= 0 |
741 | /// WARNING: if 'X' is a constant, the fold may deadlock! |
742 | /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat() |
743 | /// here because it can end up being not linked in. |
744 | virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( |
745 | SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, |
746 | unsigned OldShiftOpcode, unsigned NewShiftOpcode, |
747 | SelectionDAG &DAG) const { |
748 | if (hasBitTest(X, Y)) { |
749 | // One interesting pattern that we'd want to form is 'bit test': |
750 | // ((1 << Y) & C) ==/!= 0 |
751 | // But we also need to be careful not to try to reverse that fold. |
752 | |
753 | // Is this '1 << Y' ? |
754 | if (OldShiftOpcode == ISD::SHL && CC->isOne()) |
755 | return false; // Keep the 'bit test' pattern. |
756 | |
757 | // Will it be '1 << Y' after the transform ? |
758 | if (XC && NewShiftOpcode == ISD::SHL && XC->isOne()) |
759 | return true; // Do form the 'bit test' pattern. |
760 | } |
761 | |
762 | // If 'X' is a constant, and we transform, then we will immediately |
763 | // try to undo the fold, thus causing endless combine loop. |
764 | // So by default, let's assume everyone prefers the fold |
765 | // iff 'X' is not a constant. |
766 | return !XC; |
767 | } |
768 | |
769 | /// These two forms are equivalent: |
770 | /// sub %y, (xor %x, -1) |
771 | /// add (add %x, 1), %y |
772 | /// The variant with two add's is IR-canonical. |
773 | /// Some targets may prefer one to the other. |
774 | virtual bool preferIncOfAddToSubOfNot(EVT VT) const { |
775 | // By default, let's assume that everyone prefers the form with two add's. |
776 | return true; |
777 | } |
778 | |
779 | /// Return true if the target wants to use the optimization that |
780 | /// turns ext(promotableInst1(...(promotableInstN(load)))) into |
781 | /// promotedInst1(...(promotedInstN(ext(load)))). |
782 | bool enableExtLdPromotion() const { return EnableExtLdPromotion; } |
783 | |
784 | /// Return true if the target can combine store(extractelement VectorTy, |
785 | /// Idx). |
786 | /// \p Cost[out] gives the cost of that transformation when this is true. |
787 | virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, |
788 | unsigned &Cost) const { |
789 | return false; |
790 | } |
791 | |
792 | /// Return true if inserting a scalar into a variable element of an undef |
793 | /// vector is more efficiently handled by splatting the scalar instead. |
794 | virtual bool shouldSplatInsEltVarIndex(EVT) const { |
795 | return false; |
796 | } |
797 | |
798 | /// Return true if target always benefits from combining into FMA for a |
799 | /// given value type. This must typically return false on targets where FMA |
800 | /// takes more cycles to execute than FADD. |
801 | virtual bool enableAggressiveFMAFusion(EVT VT) const { |
802 | return false; |
803 | } |
804 | |
805 | /// Return the ValueType of the result of SETCC operations. |
806 | virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, |
807 | EVT VT) const; |
808 | |
809 | /// Return the ValueType for comparison libcalls. Comparions libcalls include |
810 | /// floating point comparion calls, and Ordered/Unordered check calls on |
811 | /// floating point numbers. |
812 | virtual |
813 | MVT::SimpleValueType getCmpLibcallReturnType() const; |
814 | |
815 | /// For targets without i1 registers, this gives the nature of the high-bits |
816 | /// of boolean values held in types wider than i1. |
817 | /// |
818 | /// "Boolean values" are special true/false values produced by nodes like |
819 | /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND. |
820 | /// Not to be confused with general values promoted from i1. Some cpus |
821 | /// distinguish between vectors of boolean and scalars; the isVec parameter |
822 | /// selects between the two kinds. For example on X86 a scalar boolean should |
823 | /// be zero extended from i1, while the elements of a vector of booleans |
824 | /// should be sign extended from i1. |
825 | /// |
826 | /// Some cpus also treat floating point types the same way as they treat |
827 | /// vectors instead of the way they treat scalars. |
828 | BooleanContent getBooleanContents(bool isVec, bool isFloat) const { |
829 | if (isVec) |
830 | return BooleanVectorContents; |
831 | return isFloat ? BooleanFloatContents : BooleanContents; |
832 | } |
833 | |
834 | BooleanContent getBooleanContents(EVT Type) const { |
835 | return getBooleanContents(Type.isVector(), Type.isFloatingPoint()); |
836 | } |
837 | |
838 | /// Return target scheduling preference. |
839 | Sched::Preference getSchedulingPreference() const { |
840 | return SchedPreferenceInfo; |
841 | } |
842 | |
843 | /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics |
844 | /// for different nodes. This function returns the preference (or none) for |
845 | /// the given node. |
846 | virtual Sched::Preference getSchedulingPreference(SDNode *) const { |
847 | return Sched::None; |
848 | } |
849 | |
850 | /// Return the register class that should be used for the specified value |
851 | /// type. |
852 | virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const { |
853 | (void)isDivergent; |
854 | const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy]; |
855 | assert(RC && "This value type is not natively supported!")(static_cast <bool> (RC && "This value type is not natively supported!" ) ? void (0) : __assert_fail ("RC && \"This value type is not natively supported!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 855, __extension__ __PRETTY_FUNCTION__)); |
856 | return RC; |
857 | } |
858 | |
859 | /// Allows target to decide about the register class of the |
860 | /// specific value that is live outside the defining block. |
861 | /// Returns true if the value needs uniform register class. |
862 | virtual bool requiresUniformRegister(MachineFunction &MF, |
863 | const Value *) const { |
864 | return false; |
865 | } |
866 | |
867 | /// Return the 'representative' register class for the specified value |
868 | /// type. |
869 | /// |
870 | /// The 'representative' register class is the largest legal super-reg |
871 | /// register class for the register class of the value type. For example, on |
872 | /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep |
873 | /// register class is GR64 on x86_64. |
874 | virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const { |
875 | const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy]; |
876 | return RC; |
877 | } |
878 | |
879 | /// Return the cost of the 'representative' register class for the specified |
880 | /// value type. |
881 | virtual uint8_t getRepRegClassCostFor(MVT VT) const { |
882 | return RepRegClassCostForVT[VT.SimpleTy]; |
883 | } |
884 | |
885 | /// Return true if SHIFT instructions should be expanded to SHIFT_PARTS |
886 | /// instructions, and false if a library call is preferred (e.g for code-size |
887 | /// reasons). |
888 | virtual bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const { |
889 | return true; |
890 | } |
891 | |
892 | /// Return true if the target has native support for the specified value type. |
893 | /// This means that it has a register that directly holds it without |
894 | /// promotions or expansions. |
895 | bool isTypeLegal(EVT VT) const { |
896 | assert(!VT.isSimple() ||(static_cast <bool> (!VT.isSimple() || (unsigned)VT.getSimpleVT ().SimpleTy < array_lengthof(RegClassForVT)) ? void (0) : __assert_fail ("!VT.isSimple() || (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 897, __extension__ __PRETTY_FUNCTION__)) |
897 | (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT))(static_cast <bool> (!VT.isSimple() || (unsigned)VT.getSimpleVT ().SimpleTy < array_lengthof(RegClassForVT)) ? void (0) : __assert_fail ("!VT.isSimple() || (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 897, __extension__ __PRETTY_FUNCTION__)); |
898 | return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr; |
899 | } |
900 | |
901 | class ValueTypeActionImpl { |
902 | /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum |
903 | /// that indicates how instruction selection should deal with the type. |
904 | LegalizeTypeAction ValueTypeActions[MVT::VALUETYPE_SIZE]; |
905 | |
906 | public: |
907 | ValueTypeActionImpl() { |
908 | std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions), |
909 | TypeLegal); |
910 | } |
911 | |
912 | LegalizeTypeAction getTypeAction(MVT VT) const { |
913 | return ValueTypeActions[VT.SimpleTy]; |
914 | } |
915 | |
916 | void setTypeAction(MVT VT, LegalizeTypeAction Action) { |
917 | ValueTypeActions[VT.SimpleTy] = Action; |
918 | } |
919 | }; |
920 | |
921 | const ValueTypeActionImpl &getValueTypeActions() const { |
922 | return ValueTypeActions; |
923 | } |
924 | |
925 | /// Return how we should legalize values of this type, either it is already |
926 | /// legal (return 'Legal') or we need to promote it to a larger type (return |
927 | /// 'Promote'), or we need to expand it into multiple registers of smaller |
928 | /// integer type (return 'Expand'). 'Custom' is not an option. |
929 | LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const { |
930 | return getTypeConversion(Context, VT).first; |
931 | } |
932 | LegalizeTypeAction getTypeAction(MVT VT) const { |
933 | return ValueTypeActions.getTypeAction(VT); |
934 | } |
935 | |
936 | /// For types supported by the target, this is an identity function. For |
937 | /// types that must be promoted to larger types, this returns the larger type |
938 | /// to promote to. For integer types that are larger than the largest integer |
939 | /// register, this contains one step in the expansion to get to the smaller |
940 | /// register. For illegal floating point types, this returns the integer type |
941 | /// to transform to. |
942 | EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const { |
943 | return getTypeConversion(Context, VT).second; |
944 | } |
945 | |
946 | /// For types supported by the target, this is an identity function. For |
947 | /// types that must be expanded (i.e. integer types that are larger than the |
948 | /// largest integer register or illegal floating point types), this returns |
949 | /// the largest legal type it will be expanded to. |
950 | EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const { |
951 | assert(!VT.isVector())(static_cast <bool> (!VT.isVector()) ? void (0) : __assert_fail ("!VT.isVector()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 951, __extension__ __PRETTY_FUNCTION__)); |
952 | while (true) { |
953 | switch (getTypeAction(Context, VT)) { |
954 | case TypeLegal: |
955 | return VT; |
956 | case TypeExpandInteger: |
957 | VT = getTypeToTransformTo(Context, VT); |
958 | break; |
959 | default: |
960 | llvm_unreachable("Type is not legal nor is it to be expanded!")::llvm::llvm_unreachable_internal("Type is not legal nor is it to be expanded!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 960); |
961 | } |
962 | } |
963 | } |
964 | |
965 | /// Vector types are broken down into some number of legal first class types. |
966 | /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8 |
967 | /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64 |
968 | /// turns into 4 EVT::i32 values with both PPC and X86. |
969 | /// |
970 | /// This method returns the number of registers needed, and the VT for each |
971 | /// register. It also returns the VT and quantity of the intermediate values |
972 | /// before they are promoted/expanded. |
973 | unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, |
974 | EVT &IntermediateVT, |
975 | unsigned &NumIntermediates, |
976 | MVT &RegisterVT) const; |
977 | |
978 | /// Certain targets such as MIPS require that some types such as vectors are |
979 | /// always broken down into scalars in some contexts. This occurs even if the |
980 | /// vector type is legal. |
981 | virtual unsigned getVectorTypeBreakdownForCallingConv( |
982 | LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, |
983 | unsigned &NumIntermediates, MVT &RegisterVT) const { |
984 | return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates, |
985 | RegisterVT); |
986 | } |
987 | |
988 | struct IntrinsicInfo { |
989 | unsigned opc = 0; // target opcode |
990 | EVT memVT; // memory VT |
991 | |
992 | // value representing memory location |
993 | PointerUnion<const Value *, const PseudoSourceValue *> ptrVal; |
994 | |
995 | int offset = 0; // offset off of ptrVal |
996 | uint64_t size = 0; // the size of the memory location |
997 | // (taken from memVT if zero) |
998 | MaybeAlign align = Align(1); // alignment |
999 | |
1000 | MachineMemOperand::Flags flags = MachineMemOperand::MONone; |
1001 | IntrinsicInfo() = default; |
1002 | }; |
1003 | |
1004 | /// Given an intrinsic, checks if on the target the intrinsic will need to map |
1005 | /// to a MemIntrinsicNode (touches memory). If this is the case, it returns |
1006 | /// true and store the intrinsic information into the IntrinsicInfo that was |
1007 | /// passed to the function. |
1008 | virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, |
1009 | MachineFunction &, |
1010 | unsigned /*Intrinsic*/) const { |
1011 | return false; |
1012 | } |
1013 | |
1014 | /// Returns true if the target can instruction select the specified FP |
1015 | /// immediate natively. If false, the legalizer will materialize the FP |
1016 | /// immediate as a load from a constant pool. |
1017 | virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/, |
1018 | bool ForCodeSize = false) const { |
1019 | return false; |
1020 | } |
1021 | |
1022 | /// Targets can use this to indicate that they only support *some* |
1023 | /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a |
1024 | /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be |
1025 | /// legal. |
1026 | virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const { |
1027 | return true; |
1028 | } |
1029 | |
1030 | /// Returns true if the operation can trap for the value type. |
1031 | /// |
1032 | /// VT must be a legal type. By default, we optimistically assume most |
1033 | /// operations don't trap except for integer divide and remainder. |
1034 | virtual bool canOpTrap(unsigned Op, EVT VT) const; |
1035 | |
1036 | /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there |
1037 | /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a |
1038 | /// constant pool entry. |
1039 | virtual bool isVectorClearMaskLegal(ArrayRef<int> /*Mask*/, |
1040 | EVT /*VT*/) const { |
1041 | return false; |
1042 | } |
1043 | |
1044 | /// Return how this operation should be treated: either it is legal, needs to |
1045 | /// be promoted to a larger size, needs to be expanded to some other code |
1046 | /// sequence, or the target has a custom expander for it. |
1047 | LegalizeAction getOperationAction(unsigned Op, EVT VT) const { |
1048 | if (VT.isExtended()) return Expand; |
1049 | // If a target-specific SDNode requires legalization, require the target |
1050 | // to provide custom legalization for it. |
1051 | if (Op >= array_lengthof(OpActions[0])) return Custom; |
1052 | return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op]; |
1053 | } |
1054 | |
1055 | /// Custom method defined by each target to indicate if an operation which |
1056 | /// may require a scale is supported natively by the target. |
1057 | /// If not, the operation is illegal. |
1058 | virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT, |
1059 | unsigned Scale) const { |
1060 | return false; |
1061 | } |
1062 | |
1063 | /// Some fixed point operations may be natively supported by the target but |
1064 | /// only for specific scales. This method allows for checking |
1065 | /// if the width is supported by the target for a given operation that may |
1066 | /// depend on scale. |
1067 | LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, |
1068 | unsigned Scale) const { |
1069 | auto Action = getOperationAction(Op, VT); |
1070 | if (Action != Legal) |
1071 | return Action; |
1072 | |
1073 | // This operation is supported in this type but may only work on specific |
1074 | // scales. |
1075 | bool Supported; |
1076 | switch (Op) { |
1077 | default: |
1078 | llvm_unreachable("Unexpected fixed point operation.")::llvm::llvm_unreachable_internal("Unexpected fixed point operation." , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1078); |
1079 | case ISD::SMULFIX: |
1080 | case ISD::SMULFIXSAT: |
1081 | case ISD::UMULFIX: |
1082 | case ISD::UMULFIXSAT: |
1083 | case ISD::SDIVFIX: |
1084 | case ISD::SDIVFIXSAT: |
1085 | case ISD::UDIVFIX: |
1086 | case ISD::UDIVFIXSAT: |
1087 | Supported = isSupportedFixedPointOperation(Op, VT, Scale); |
1088 | break; |
1089 | } |
1090 | |
1091 | return Supported ? Action : Expand; |
1092 | } |
1093 | |
1094 | // If Op is a strict floating-point operation, return the result |
1095 | // of getOperationAction for the equivalent non-strict operation. |
1096 | LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const { |
1097 | unsigned EqOpc; |
1098 | switch (Op) { |
1099 | default: llvm_unreachable("Unexpected FP pseudo-opcode")::llvm::llvm_unreachable_internal("Unexpected FP pseudo-opcode" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1099); |
1100 | #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ |
1101 | case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break; |
1102 | #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ |
1103 | case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break; |
1104 | #include "llvm/IR/ConstrainedOps.def" |
1105 | } |
1106 | |
1107 | return getOperationAction(EqOpc, VT); |
1108 | } |
1109 | |
1110 | /// Return true if the specified operation is legal on this target or can be |
1111 | /// made legal with custom lowering. This is used to help guide high-level |
1112 | /// lowering decisions. LegalOnly is an optional convenience for code paths |
1113 | /// traversed pre and post legalisation. |
1114 | bool isOperationLegalOrCustom(unsigned Op, EVT VT, |
1115 | bool LegalOnly = false) const { |
1116 | if (LegalOnly) |
1117 | return isOperationLegal(Op, VT); |
1118 | |
1119 | return (VT == MVT::Other || isTypeLegal(VT)) && |
1120 | (getOperationAction(Op, VT) == Legal || |
1121 | getOperationAction(Op, VT) == Custom); |
1122 | } |
1123 | |
1124 | /// Return true if the specified operation is legal on this target or can be |
1125 | /// made legal using promotion. This is used to help guide high-level lowering |
1126 | /// decisions. LegalOnly is an optional convenience for code paths traversed |
1127 | /// pre and post legalisation. |
1128 | bool isOperationLegalOrPromote(unsigned Op, EVT VT, |
1129 | bool LegalOnly = false) const { |
1130 | if (LegalOnly) |
1131 | return isOperationLegal(Op, VT); |
1132 | |
1133 | return (VT == MVT::Other || isTypeLegal(VT)) && |
1134 | (getOperationAction(Op, VT) == Legal || |
1135 | getOperationAction(Op, VT) == Promote); |
1136 | } |
1137 | |
1138 | /// Return true if the specified operation is legal on this target or can be |
1139 | /// made legal with custom lowering or using promotion. This is used to help |
1140 | /// guide high-level lowering decisions. LegalOnly is an optional convenience |
1141 | /// for code paths traversed pre and post legalisation. |
1142 | bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, |
1143 | bool LegalOnly = false) const { |
1144 | if (LegalOnly) |
1145 | return isOperationLegal(Op, VT); |
1146 | |
1147 | return (VT == MVT::Other || isTypeLegal(VT)) && |
1148 | (getOperationAction(Op, VT) == Legal || |
1149 | getOperationAction(Op, VT) == Custom || |
1150 | getOperationAction(Op, VT) == Promote); |
1151 | } |
1152 | |
1153 | /// Return true if the operation uses custom lowering, regardless of whether |
1154 | /// the type is legal or not. |
1155 | bool isOperationCustom(unsigned Op, EVT VT) const { |
1156 | return getOperationAction(Op, VT) == Custom; |
1157 | } |
1158 | |
1159 | /// Return true if lowering to a jump table is allowed. |
1160 | virtual bool areJTsAllowed(const Function *Fn) const { |
1161 | if (Fn->getFnAttribute("no-jump-tables").getValueAsBool()) |
1162 | return false; |
1163 | |
1164 | return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) || |
1165 | isOperationLegalOrCustom(ISD::BRIND, MVT::Other); |
1166 | } |
1167 | |
1168 | /// Check whether the range [Low,High] fits in a machine word. |
1169 | bool rangeFitsInWord(const APInt &Low, const APInt &High, |
1170 | const DataLayout &DL) const { |
1171 | // FIXME: Using the pointer type doesn't seem ideal. |
1172 | uint64_t BW = DL.getIndexSizeInBits(0u); |
1173 | uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX(18446744073709551615UL) - 1) + 1; |
1174 | return Range <= BW; |
1175 | } |
1176 | |
1177 | /// Return true if lowering to a jump table is suitable for a set of case |
1178 | /// clusters which may contain \p NumCases cases, \p Range range of values. |
1179 | virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, |
1180 | uint64_t Range, ProfileSummaryInfo *PSI, |
1181 | BlockFrequencyInfo *BFI) const; |
1182 | |
1183 | /// Return true if lowering to a bit test is suitable for a set of case |
1184 | /// clusters which contains \p NumDests unique destinations, \p Low and |
1185 | /// \p High as its lowest and highest case values, and expects \p NumCmps |
1186 | /// case value comparisons. Check if the number of destinations, comparison |
1187 | /// metric, and range are all suitable. |
1188 | bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, |
1189 | const APInt &Low, const APInt &High, |
1190 | const DataLayout &DL) const { |
1191 | // FIXME: I don't think NumCmps is the correct metric: a single case and a |
1192 | // range of cases both require only one branch to lower. Just looking at the |
1193 | // number of clusters and destinations should be enough to decide whether to |
1194 | // build bit tests. |
1195 | |
1196 | // To lower a range with bit tests, the range must fit the bitwidth of a |
1197 | // machine word. |
1198 | if (!rangeFitsInWord(Low, High, DL)) |
1199 | return false; |
1200 | |
1201 | // Decide whether it's profitable to lower this range with bit tests. Each |
1202 | // destination requires a bit test and branch, and there is an overall range |
1203 | // check branch. For a small number of clusters, separate comparisons might |
1204 | // be cheaper, and for many destinations, splitting the range might be |
1205 | // better. |
1206 | return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) || |
1207 | (NumDests == 3 && NumCmps >= 6); |
1208 | } |
1209 | |
1210 | /// Return true if the specified operation is illegal on this target or |
1211 | /// unlikely to be made legal with custom lowering. This is used to help guide |
1212 | /// high-level lowering decisions. |
1213 | bool isOperationExpand(unsigned Op, EVT VT) const { |
1214 | return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand); |
1215 | } |
1216 | |
1217 | /// Return true if the specified operation is legal on this target. |
1218 | bool isOperationLegal(unsigned Op, EVT VT) const { |
1219 | return (VT == MVT::Other || isTypeLegal(VT)) && |
1220 | getOperationAction(Op, VT) == Legal; |
1221 | } |
1222 | |
1223 | /// Return how this load with extension should be treated: either it is legal, |
1224 | /// needs to be promoted to a larger size, needs to be expanded to some other |
1225 | /// code sequence, or the target has a custom expander for it. |
1226 | LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, |
1227 | EVT MemVT) const { |
1228 | if (ValVT.isExtended() || MemVT.isExtended()) return Expand; |
1229 | unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy; |
1230 | unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy; |
1231 | assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE &&(static_cast <bool> (ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!") ? void (0) : __assert_fail ("ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE && \"Table isn't big enough!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1232, __extension__ __PRETTY_FUNCTION__)) |
1232 | MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!")(static_cast <bool> (ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!") ? void (0) : __assert_fail ("ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE && \"Table isn't big enough!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1232, __extension__ __PRETTY_FUNCTION__)); |
1233 | unsigned Shift = 4 * ExtType; |
1234 | return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf); |
1235 | } |
1236 | |
1237 | /// Return true if the specified load with extension is legal on this target. |
1238 | bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const { |
1239 | return getLoadExtAction(ExtType, ValVT, MemVT) == Legal; |
1240 | } |
1241 | |
1242 | /// Return true if the specified load with extension is legal or custom |
1243 | /// on this target. |
1244 | bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const { |
1245 | return getLoadExtAction(ExtType, ValVT, MemVT) == Legal || |
1246 | getLoadExtAction(ExtType, ValVT, MemVT) == Custom; |
1247 | } |
1248 | |
1249 | /// Return how this store with truncation should be treated: either it is |
1250 | /// legal, needs to be promoted to a larger size, needs to be expanded to some |
1251 | /// other code sequence, or the target has a custom expander for it. |
1252 | LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const { |
1253 | if (ValVT.isExtended() || MemVT.isExtended()) return Expand; |
1254 | unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy; |
1255 | unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy; |
1256 | assert(ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE &&(static_cast <bool> (ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!" ) ? void (0) : __assert_fail ("ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE && \"Table isn't big enough!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1257, __extension__ __PRETTY_FUNCTION__)) |
1257 | "Table isn't big enough!")(static_cast <bool> (ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!" ) ? void (0) : __assert_fail ("ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE && \"Table isn't big enough!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1257, __extension__ __PRETTY_FUNCTION__)); |
1258 | return TruncStoreActions[ValI][MemI]; |
1259 | } |
1260 | |
1261 | /// Return true if the specified store with truncation is legal on this |
1262 | /// target. |
1263 | bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const { |
1264 | return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal; |
1265 | } |
1266 | |
1267 | /// Return true if the specified store with truncation has solution on this |
1268 | /// target. |
1269 | bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const { |
1270 | return isTypeLegal(ValVT) && |
1271 | (getTruncStoreAction(ValVT, MemVT) == Legal || |
1272 | getTruncStoreAction(ValVT, MemVT) == Custom); |
1273 | } |
1274 | |
1275 | virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT, |
1276 | bool LegalOnly) const { |
1277 | if (LegalOnly) |
1278 | return isTruncStoreLegal(ValVT, MemVT); |
1279 | |
1280 | return isTruncStoreLegalOrCustom(ValVT, MemVT); |
1281 | } |
1282 | |
1283 | /// Return how the indexed load should be treated: either it is legal, needs |
1284 | /// to be promoted to a larger size, needs to be expanded to some other code |
1285 | /// sequence, or the target has a custom expander for it. |
1286 | LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const { |
1287 | return getIndexedModeAction(IdxMode, VT, IMAB_Load); |
1288 | } |
1289 | |
1290 | /// Return true if the specified indexed load is legal on this target. |
1291 | bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const { |
1292 | return VT.isSimple() && |
1293 | (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal || |
1294 | getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom); |
1295 | } |
1296 | |
1297 | /// Return how the indexed store should be treated: either it is legal, needs |
1298 | /// to be promoted to a larger size, needs to be expanded to some other code |
1299 | /// sequence, or the target has a custom expander for it. |
1300 | LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const { |
1301 | return getIndexedModeAction(IdxMode, VT, IMAB_Store); |
1302 | } |
1303 | |
1304 | /// Return true if the specified indexed load is legal on this target. |
1305 | bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const { |
1306 | return VT.isSimple() && |
1307 | (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal || |
1308 | getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom); |
1309 | } |
1310 | |
1311 | /// Return how the indexed load should be treated: either it is legal, needs |
1312 | /// to be promoted to a larger size, needs to be expanded to some other code |
1313 | /// sequence, or the target has a custom expander for it. |
1314 | LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const { |
1315 | return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad); |
1316 | } |
1317 | |
1318 | /// Return true if the specified indexed load is legal on this target. |
1319 | bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const { |
1320 | return VT.isSimple() && |
1321 | (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal || |
1322 | getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Custom); |
1323 | } |
1324 | |
1325 | /// Return how the indexed store should be treated: either it is legal, needs |
1326 | /// to be promoted to a larger size, needs to be expanded to some other code |
1327 | /// sequence, or the target has a custom expander for it. |
1328 | LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const { |
1329 | return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore); |
1330 | } |
1331 | |
1332 | /// Return true if the specified indexed load is legal on this target. |
1333 | bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const { |
1334 | return VT.isSimple() && |
1335 | (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal || |
1336 | getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Custom); |
1337 | } |
1338 | |
1339 | /// Returns true if the index type for a masked gather/scatter requires |
1340 | /// extending |
1341 | virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const { return false; } |
1342 | |
1343 | // Returns true if VT is a legal index type for masked gathers/scatters |
1344 | // on this target |
1345 | virtual bool shouldRemoveExtendFromGSIndex(EVT VT) const { return false; } |
1346 | |
1347 | /// Return how the condition code should be treated: either it is legal, needs |
1348 | /// to be expanded to some other code sequence, or the target has a custom |
1349 | /// expander for it. |
1350 | LegalizeAction |
1351 | getCondCodeAction(ISD::CondCode CC, MVT VT) const { |
1352 | assert((unsigned)CC < array_lengthof(CondCodeActions) &&(static_cast <bool> ((unsigned)CC < array_lengthof(CondCodeActions ) && ((unsigned)VT.SimpleTy >> 3) < array_lengthof (CondCodeActions[0]) && "Table isn't big enough!") ? void (0) : __assert_fail ("(unsigned)CC < array_lengthof(CondCodeActions) && ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) && \"Table isn't big enough!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1354, __extension__ __PRETTY_FUNCTION__)) |
1353 | ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) &&(static_cast <bool> ((unsigned)CC < array_lengthof(CondCodeActions ) && ((unsigned)VT.SimpleTy >> 3) < array_lengthof (CondCodeActions[0]) && "Table isn't big enough!") ? void (0) : __assert_fail ("(unsigned)CC < array_lengthof(CondCodeActions) && ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) && \"Table isn't big enough!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1354, __extension__ __PRETTY_FUNCTION__)) |
1354 | "Table isn't big enough!")(static_cast <bool> ((unsigned)CC < array_lengthof(CondCodeActions ) && ((unsigned)VT.SimpleTy >> 3) < array_lengthof (CondCodeActions[0]) && "Table isn't big enough!") ? void (0) : __assert_fail ("(unsigned)CC < array_lengthof(CondCodeActions) && ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) && \"Table isn't big enough!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1354, __extension__ __PRETTY_FUNCTION__)); |
1355 | // See setCondCodeAction for how this is encoded. |
1356 | uint32_t Shift = 4 * (VT.SimpleTy & 0x7); |
1357 | uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3]; |
1358 | LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF); |
1359 | assert(Action != Promote && "Can't promote condition code!")(static_cast <bool> (Action != Promote && "Can't promote condition code!" ) ? void (0) : __assert_fail ("Action != Promote && \"Can't promote condition code!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1359, __extension__ __PRETTY_FUNCTION__)); |
1360 | return Action; |
1361 | } |
1362 | |
1363 | /// Return true if the specified condition code is legal on this target. |
1364 | bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const { |
1365 | return getCondCodeAction(CC, VT) == Legal; |
1366 | } |
1367 | |
1368 | /// Return true if the specified condition code is legal or custom on this |
1369 | /// target. |
1370 | bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const { |
1371 | return getCondCodeAction(CC, VT) == Legal || |
1372 | getCondCodeAction(CC, VT) == Custom; |
1373 | } |
1374 | |
1375 | /// If the action for this operation is to promote, this method returns the |
1376 | /// ValueType to promote to. |
1377 | MVT getTypeToPromoteTo(unsigned Op, MVT VT) const { |
1378 | assert(getOperationAction(Op, VT) == Promote &&(static_cast <bool> (getOperationAction(Op, VT) == Promote && "This operation isn't promoted!") ? void (0) : __assert_fail ("getOperationAction(Op, VT) == Promote && \"This operation isn't promoted!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1379, __extension__ __PRETTY_FUNCTION__)) |
1379 | "This operation isn't promoted!")(static_cast <bool> (getOperationAction(Op, VT) == Promote && "This operation isn't promoted!") ? void (0) : __assert_fail ("getOperationAction(Op, VT) == Promote && \"This operation isn't promoted!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1379, __extension__ __PRETTY_FUNCTION__)); |
1380 | |
1381 | // See if this has an explicit type specified. |
1382 | std::map<std::pair<unsigned, MVT::SimpleValueType>, |
1383 | MVT::SimpleValueType>::const_iterator PTTI = |
1384 | PromoteToType.find(std::make_pair(Op, VT.SimpleTy)); |
1385 | if (PTTI != PromoteToType.end()) return PTTI->second; |
1386 | |
1387 | assert((VT.isInteger() || VT.isFloatingPoint()) &&(static_cast <bool> ((VT.isInteger() || VT.isFloatingPoint ()) && "Cannot autopromote this type, add it with AddPromotedToType." ) ? void (0) : __assert_fail ("(VT.isInteger() || VT.isFloatingPoint()) && \"Cannot autopromote this type, add it with AddPromotedToType.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1388, __extension__ __PRETTY_FUNCTION__)) |
1388 | "Cannot autopromote this type, add it with AddPromotedToType.")(static_cast <bool> ((VT.isInteger() || VT.isFloatingPoint ()) && "Cannot autopromote this type, add it with AddPromotedToType." ) ? void (0) : __assert_fail ("(VT.isInteger() || VT.isFloatingPoint()) && \"Cannot autopromote this type, add it with AddPromotedToType.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1388, __extension__ __PRETTY_FUNCTION__)); |
1389 | |
1390 | MVT NVT = VT; |
1391 | do { |
1392 | NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1); |
1393 | assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&(static_cast <bool> (NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && "Didn't find type to promote to!" ) ? void (0) : __assert_fail ("NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && \"Didn't find type to promote to!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1394, __extension__ __PRETTY_FUNCTION__)) |
1394 | "Didn't find type to promote to!")(static_cast <bool> (NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && "Didn't find type to promote to!" ) ? void (0) : __assert_fail ("NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && \"Didn't find type to promote to!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1394, __extension__ __PRETTY_FUNCTION__)); |
1395 | } while (!isTypeLegal(NVT) || |
1396 | getOperationAction(Op, NVT) == Promote); |
1397 | return NVT; |
1398 | } |
1399 | |
1400 | virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, |
1401 | bool AllowUnknown = false) const { |
1402 | return getValueType(DL, Ty, AllowUnknown); |
1403 | } |
1404 | |
1405 | /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM |
1406 | /// operations except for the pointer size. If AllowUnknown is true, this |
1407 | /// will return MVT::Other for types with no EVT counterpart (e.g. structs), |
1408 | /// otherwise it will assert. |
1409 | EVT getValueType(const DataLayout &DL, Type *Ty, |
1410 | bool AllowUnknown = false) const { |
1411 | // Lower scalar pointers to native pointer types. |
1412 | if (auto *PTy = dyn_cast<PointerType>(Ty)) |
1413 | return getPointerTy(DL, PTy->getAddressSpace()); |
1414 | |
1415 | if (auto *VTy = dyn_cast<VectorType>(Ty)) { |
1416 | Type *EltTy = VTy->getElementType(); |
1417 | // Lower vectors of pointers to native pointer types. |
1418 | if (auto *PTy = dyn_cast<PointerType>(EltTy)) { |
1419 | EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace())); |
1420 | EltTy = PointerTy.getTypeForEVT(Ty->getContext()); |
1421 | } |
1422 | return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false), |
1423 | VTy->getElementCount()); |
1424 | } |
1425 | |
1426 | return EVT::getEVT(Ty, AllowUnknown); |
1427 | } |
1428 | |
1429 | EVT getMemValueType(const DataLayout &DL, Type *Ty, |
1430 | bool AllowUnknown = false) const { |
1431 | // Lower scalar pointers to native pointer types. |
1432 | if (PointerType *PTy = dyn_cast<PointerType>(Ty)) |
1433 | return getPointerMemTy(DL, PTy->getAddressSpace()); |
1434 | else if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { |
1435 | Type *Elm = VTy->getElementType(); |
1436 | if (PointerType *PT = dyn_cast<PointerType>(Elm)) { |
1437 | EVT PointerTy(getPointerMemTy(DL, PT->getAddressSpace())); |
1438 | Elm = PointerTy.getTypeForEVT(Ty->getContext()); |
1439 | } |
1440 | return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false), |
1441 | VTy->getElementCount()); |
1442 | } |
1443 | |
1444 | return getValueType(DL, Ty, AllowUnknown); |
1445 | } |
1446 | |
1447 | |
1448 | /// Return the MVT corresponding to this LLVM type. See getValueType. |
1449 | MVT getSimpleValueType(const DataLayout &DL, Type *Ty, |
1450 | bool AllowUnknown = false) const { |
1451 | return getValueType(DL, Ty, AllowUnknown).getSimpleVT(); |
1452 | } |
1453 | |
1454 | /// Return the desired alignment for ByVal or InAlloca aggregate function |
1455 | /// arguments in the caller parameter area. This is the actual alignment, not |
1456 | /// its logarithm. |
1457 | virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const; |
1458 | |
1459 | /// Return the type of registers that this ValueType will eventually require. |
1460 | MVT getRegisterType(MVT VT) const { |
1461 | assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT))(static_cast <bool> ((unsigned)VT.SimpleTy < array_lengthof (RegisterTypeForVT)) ? void (0) : __assert_fail ("(unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1461, __extension__ __PRETTY_FUNCTION__)); |
1462 | return RegisterTypeForVT[VT.SimpleTy]; |
1463 | } |
1464 | |
1465 | /// Return the type of registers that this ValueType will eventually require. |
1466 | MVT getRegisterType(LLVMContext &Context, EVT VT) const { |
1467 | if (VT.isSimple()) { |
1468 | assert((unsigned)VT.getSimpleVT().SimpleTy <(static_cast <bool> ((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegisterTypeForVT)) ? void (0) : __assert_fail ("(unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegisterTypeForVT)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1469, __extension__ __PRETTY_FUNCTION__)) |
1469 | array_lengthof(RegisterTypeForVT))(static_cast <bool> ((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegisterTypeForVT)) ? void (0) : __assert_fail ("(unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegisterTypeForVT)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1469, __extension__ __PRETTY_FUNCTION__)); |
1470 | return RegisterTypeForVT[VT.getSimpleVT().SimpleTy]; |
1471 | } |
1472 | if (VT.isVector()) { |
1473 | EVT VT1; |
1474 | MVT RegisterVT; |
1475 | unsigned NumIntermediates; |
1476 | (void)getVectorTypeBreakdown(Context, VT, VT1, |
1477 | NumIntermediates, RegisterVT); |
1478 | return RegisterVT; |
1479 | } |
1480 | if (VT.isInteger()) { |
1481 | return getRegisterType(Context, getTypeToTransformTo(Context, VT)); |
1482 | } |
1483 | llvm_unreachable("Unsupported extended type!")::llvm::llvm_unreachable_internal("Unsupported extended type!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1483); |
1484 | } |
1485 | |
1486 | /// Return the number of registers that this ValueType will eventually |
1487 | /// require. |
1488 | /// |
1489 | /// This is one for any types promoted to live in larger registers, but may be |
1490 | /// more than one for types (like i64) that are split into pieces. For types |
1491 | /// like i140, which are first promoted then expanded, it is the number of |
1492 | /// registers needed to hold all the bits of the original type. For an i140 |
1493 | /// on a 32 bit machine this means 5 registers. |
1494 | /// |
1495 | /// RegisterVT may be passed as a way to override the default settings, for |
1496 | /// instance with i128 inline assembly operands on SystemZ. |
1497 | virtual unsigned |
1498 | getNumRegisters(LLVMContext &Context, EVT VT, |
1499 | Optional<MVT> RegisterVT = None) const { |
1500 | if (VT.isSimple()) { |
1501 | assert((unsigned)VT.getSimpleVT().SimpleTy <(static_cast <bool> ((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(NumRegistersForVT)) ? void (0) : __assert_fail ("(unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(NumRegistersForVT)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1502, __extension__ __PRETTY_FUNCTION__)) |
1502 | array_lengthof(NumRegistersForVT))(static_cast <bool> ((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(NumRegistersForVT)) ? void (0) : __assert_fail ("(unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(NumRegistersForVT)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1502, __extension__ __PRETTY_FUNCTION__)); |
1503 | return NumRegistersForVT[VT.getSimpleVT().SimpleTy]; |
1504 | } |
1505 | if (VT.isVector()) { |
1506 | EVT VT1; |
1507 | MVT VT2; |
1508 | unsigned NumIntermediates; |
1509 | return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2); |
1510 | } |
1511 | if (VT.isInteger()) { |
1512 | unsigned BitWidth = VT.getSizeInBits(); |
1513 | unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits(); |
1514 | return (BitWidth + RegWidth - 1) / RegWidth; |
1515 | } |
1516 | llvm_unreachable("Unsupported extended type!")::llvm::llvm_unreachable_internal("Unsupported extended type!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1516); |
1517 | } |
1518 | |
1519 | /// Certain combinations of ABIs, Targets and features require that types |
1520 | /// are legal for some operations and not for other operations. |
1521 | /// For MIPS all vector types must be passed through the integer register set. |
1522 | virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, |
1523 | CallingConv::ID CC, EVT VT) const { |
1524 | return getRegisterType(Context, VT); |
1525 | } |
1526 | |
1527 | /// Certain targets require unusual breakdowns of certain types. For MIPS, |
1528 | /// this occurs when a vector type is used, as vector are passed through the |
1529 | /// integer register set. |
1530 | virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, |
1531 | CallingConv::ID CC, |
1532 | EVT VT) const { |
1533 | return getNumRegisters(Context, VT); |
1534 | } |
1535 | |
1536 | /// Certain targets have context sensitive alignment requirements, where one |
1537 | /// type has the alignment requirement of another type. |
1538 | virtual Align getABIAlignmentForCallingConv(Type *ArgTy, |
1539 | const DataLayout &DL) const { |
1540 | return DL.getABITypeAlign(ArgTy); |
1541 | } |
1542 | |
1543 | /// If true, then instruction selection should seek to shrink the FP constant |
1544 | /// of the specified type to a smaller type in order to save space and / or |
1545 | /// reduce runtime. |
1546 | virtual bool ShouldShrinkFPConstant(EVT) const { return true; } |
1547 | |
1548 | /// Return true if it is profitable to reduce a load to a smaller type. |
1549 | /// Example: (i16 (trunc (i32 (load x))) -> i16 load x |
1550 | virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, |
1551 | EVT NewVT) const { |
1552 | // By default, assume that it is cheaper to extract a subvector from a wide |
1553 | // vector load rather than creating multiple narrow vector loads. |
1554 | if (NewVT.isVector() && !Load->hasOneUse()) |
1555 | return false; |
1556 | |
1557 | return true; |
1558 | } |
1559 | |
1560 | /// When splitting a value of the specified type into parts, does the Lo |
1561 | /// or Hi part come first? This usually follows the endianness, except |
1562 | /// for ppcf128, where the Hi part always comes first. |
1563 | bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const { |
1564 | return DL.isBigEndian() || VT == MVT::ppcf128; |
1565 | } |
1566 | |
1567 | /// If true, the target has custom DAG combine transformations that it can |
1568 | /// perform for the specified node. |
1569 | bool hasTargetDAGCombine(ISD::NodeType NT) const { |
1570 | assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray))(static_cast <bool> (unsigned(NT >> 3) < array_lengthof (TargetDAGCombineArray)) ? void (0) : __assert_fail ("unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1570, __extension__ __PRETTY_FUNCTION__)); |
1571 | return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7)); |
1572 | } |
1573 | |
1574 | unsigned getGatherAllAliasesMaxDepth() const { |
1575 | return GatherAllAliasesMaxDepth; |
1576 | } |
1577 | |
1578 | /// Returns the size of the platform's va_list object. |
1579 | virtual unsigned getVaListSizeInBits(const DataLayout &DL) const { |
1580 | return getPointerTy(DL).getSizeInBits(); |
1581 | } |
1582 | |
1583 | /// Get maximum # of store operations permitted for llvm.memset |
1584 | /// |
1585 | /// This function returns the maximum number of store operations permitted |
1586 | /// to replace a call to llvm.memset. The value is set by the target at the |
1587 | /// performance threshold for such a replacement. If OptSize is true, |
1588 | /// return the limit for functions that have OptSize attribute. |
1589 | unsigned getMaxStoresPerMemset(bool OptSize) const { |
1590 | return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset; |
1591 | } |
1592 | |
1593 | /// Get maximum # of store operations permitted for llvm.memcpy |
1594 | /// |
1595 | /// This function returns the maximum number of store operations permitted |
1596 | /// to replace a call to llvm.memcpy. The value is set by the target at the |
1597 | /// performance threshold for such a replacement. If OptSize is true, |
1598 | /// return the limit for functions that have OptSize attribute. |
1599 | unsigned getMaxStoresPerMemcpy(bool OptSize) const { |
1600 | return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy; |
1601 | } |
1602 | |
1603 | /// \brief Get maximum # of store operations to be glued together |
1604 | /// |
1605 | /// This function returns the maximum number of store operations permitted |
1606 | /// to glue together during lowering of llvm.memcpy. The value is set by |
1607 | // the target at the performance threshold for such a replacement. |
1608 | virtual unsigned getMaxGluedStoresPerMemcpy() const { |
1609 | return MaxGluedStoresPerMemcpy; |
1610 | } |
1611 | |
1612 | /// Get maximum # of load operations permitted for memcmp |
1613 | /// |
1614 | /// This function returns the maximum number of load operations permitted |
1615 | /// to replace a call to memcmp. The value is set by the target at the |
1616 | /// performance threshold for such a replacement. If OptSize is true, |
1617 | /// return the limit for functions that have OptSize attribute. |
1618 | unsigned getMaxExpandSizeMemcmp(bool OptSize) const { |
1619 | return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp; |
1620 | } |
1621 | |
1622 | /// Get maximum # of store operations permitted for llvm.memmove |
1623 | /// |
1624 | /// This function returns the maximum number of store operations permitted |
1625 | /// to replace a call to llvm.memmove. The value is set by the target at the |
1626 | /// performance threshold for such a replacement. If OptSize is true, |
1627 | /// return the limit for functions that have OptSize attribute. |
1628 | unsigned getMaxStoresPerMemmove(bool OptSize) const { |
1629 | return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove; |
1630 | } |
1631 | |
1632 | /// Determine if the target supports unaligned memory accesses. |
1633 | /// |
1634 | /// This function returns true if the target allows unaligned memory accesses |
1635 | /// of the specified type in the given address space. If true, it also returns |
1636 | /// whether the unaligned memory access is "fast" in the last argument by |
1637 | /// reference. This is used, for example, in situations where an array |
1638 | /// copy/move/set is converted to a sequence of store operations. Its use |
1639 | /// helps to ensure that such replacements don't generate code that causes an |
1640 | /// alignment error (trap) on the target machine. |
1641 | virtual bool allowsMisalignedMemoryAccesses( |
1642 | EVT, unsigned AddrSpace = 0, Align Alignment = Align(1), |
1643 | MachineMemOperand::Flags Flags = MachineMemOperand::MONone, |
1644 | bool * /*Fast*/ = nullptr) const { |
1645 | return false; |
1646 | } |
1647 | |
1648 | /// LLT handling variant. |
1649 | virtual bool allowsMisalignedMemoryAccesses( |
1650 | LLT, unsigned AddrSpace = 0, Align Alignment = Align(1), |
1651 | MachineMemOperand::Flags Flags = MachineMemOperand::MONone, |
1652 | bool * /*Fast*/ = nullptr) const { |
1653 | return false; |
1654 | } |
1655 | |
1656 | /// This function returns true if the memory access is aligned or if the |
1657 | /// target allows this specific unaligned memory access. If the access is |
1658 | /// allowed, the optional final parameter returns if the access is also fast |
1659 | /// (as defined by the target). |
1660 | bool allowsMemoryAccessForAlignment( |
1661 | LLVMContext &Context, const DataLayout &DL, EVT VT, |
1662 | unsigned AddrSpace = 0, Align Alignment = Align(1), |
1663 | MachineMemOperand::Flags Flags = MachineMemOperand::MONone, |
1664 | bool *Fast = nullptr) const; |
1665 | |
1666 | /// Return true if the memory access of this type is aligned or if the target |
1667 | /// allows this specific unaligned access for the given MachineMemOperand. |
1668 | /// If the access is allowed, the optional final parameter returns if the |
1669 | /// access is also fast (as defined by the target). |
1670 | bool allowsMemoryAccessForAlignment(LLVMContext &Context, |
1671 | const DataLayout &DL, EVT VT, |
1672 | const MachineMemOperand &MMO, |
1673 | bool *Fast = nullptr) const; |
1674 | |
1675 | /// Return true if the target supports a memory access of this type for the |
1676 | /// given address space and alignment. If the access is allowed, the optional |
1677 | /// final parameter returns if the access is also fast (as defined by the |
1678 | /// target). |
1679 | virtual bool |
1680 | allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, |
1681 | unsigned AddrSpace = 0, Align Alignment = Align(1), |
1682 | MachineMemOperand::Flags Flags = MachineMemOperand::MONone, |
1683 | bool *Fast = nullptr) const; |
1684 | |
1685 | /// Return true if the target supports a memory access of this type for the |
1686 | /// given MachineMemOperand. If the access is allowed, the optional |
1687 | /// final parameter returns if the access is also fast (as defined by the |
1688 | /// target). |
1689 | bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, |
1690 | const MachineMemOperand &MMO, |
1691 | bool *Fast = nullptr) const; |
1692 | |
1693 | /// LLT handling variant. |
1694 | bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, LLT Ty, |
1695 | const MachineMemOperand &MMO, |
1696 | bool *Fast = nullptr) const; |
1697 | |
1698 | /// Returns the target specific optimal type for load and store operations as |
1699 | /// a result of memset, memcpy, and memmove lowering. |
1700 | /// It returns EVT::Other if the type should be determined using generic |
1701 | /// target-independent logic. |
1702 | virtual EVT |
1703 | getOptimalMemOpType(const MemOp &Op, |
1704 | const AttributeList & /*FuncAttributes*/) const { |
1705 | return MVT::Other; |
1706 | } |
1707 | |
1708 | /// LLT returning variant. |
1709 | virtual LLT |
1710 | getOptimalMemOpLLT(const MemOp &Op, |
1711 | const AttributeList & /*FuncAttributes*/) const { |
1712 | return LLT(); |
1713 | } |
1714 | |
1715 | /// Returns true if it's safe to use load / store of the specified type to |
1716 | /// expand memcpy / memset inline. |
1717 | /// |
1718 | /// This is mostly true for all types except for some special cases. For |
1719 | /// example, on X86 targets without SSE2 f64 load / store are done with fldl / |
1720 | /// fstpl which also does type conversion. Note the specified type doesn't |
1721 | /// have to be legal as the hook is used before type legalization. |
1722 | virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; } |
1723 | |
1724 | /// Return lower limit for number of blocks in a jump table. |
1725 | virtual unsigned getMinimumJumpTableEntries() const; |
1726 | |
1727 | /// Return lower limit of the density in a jump table. |
1728 | unsigned getMinimumJumpTableDensity(bool OptForSize) const; |
1729 | |
1730 | /// Return upper limit for number of entries in a jump table. |
1731 | /// Zero if no limit. |
1732 | unsigned getMaximumJumpTableSize() const; |
1733 | |
1734 | virtual bool isJumpTableRelative() const; |
1735 | |
1736 | /// If a physical register, this specifies the register that |
1737 | /// llvm.savestack/llvm.restorestack should save and restore. |
1738 | Register getStackPointerRegisterToSaveRestore() const { |
1739 | return StackPointerRegisterToSaveRestore; |
1740 | } |
1741 | |
1742 | /// If a physical register, this returns the register that receives the |
1743 | /// exception address on entry to an EH pad. |
1744 | virtual Register |
1745 | getExceptionPointerRegister(const Constant *PersonalityFn) const { |
1746 | return Register(); |
1747 | } |
1748 | |
1749 | /// If a physical register, this returns the register that receives the |
1750 | /// exception typeid on entry to a landing pad. |
1751 | virtual Register |
1752 | getExceptionSelectorRegister(const Constant *PersonalityFn) const { |
1753 | return Register(); |
1754 | } |
1755 | |
1756 | virtual bool needsFixedCatchObjects() const { |
1757 | report_fatal_error("Funclet EH is not implemented for this target"); |
1758 | } |
1759 | |
1760 | /// Return the minimum stack alignment of an argument. |
1761 | Align getMinStackArgumentAlignment() const { |
1762 | return MinStackArgumentAlignment; |
1763 | } |
1764 | |
1765 | /// Return the minimum function alignment. |
1766 | Align getMinFunctionAlignment() const { return MinFunctionAlignment; } |
1767 | |
1768 | /// Return the preferred function alignment. |
1769 | Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; } |
1770 | |
1771 | /// Return the preferred loop alignment. |
1772 | virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const; |
1773 | |
1774 | /// Should loops be aligned even when the function is marked OptSize (but not |
1775 | /// MinSize). |
1776 | virtual bool alignLoopsWithOptSize() const { |
1777 | return false; |
1778 | } |
1779 | |
1780 | /// If the target has a standard location for the stack protector guard, |
1781 | /// returns the address of that location. Otherwise, returns nullptr. |
1782 | /// DEPRECATED: please override useLoadStackGuardNode and customize |
1783 | /// LOAD_STACK_GUARD, or customize \@llvm.stackguard(). |
1784 | virtual Value *getIRStackGuard(IRBuilderBase &IRB) const; |
1785 | |
1786 | /// Inserts necessary declarations for SSP (stack protection) purpose. |
1787 | /// Should be used only when getIRStackGuard returns nullptr. |
1788 | virtual void insertSSPDeclarations(Module &M) const; |
1789 | |
1790 | /// Return the variable that's previously inserted by insertSSPDeclarations, |
1791 | /// if any, otherwise return nullptr. Should be used only when |
1792 | /// getIRStackGuard returns nullptr. |
1793 | virtual Value *getSDagStackGuard(const Module &M) const; |
1794 | |
1795 | /// If this function returns true, stack protection checks should XOR the |
1796 | /// frame pointer (or whichever pointer is used to address locals) into the |
1797 | /// stack guard value before checking it. getIRStackGuard must return nullptr |
1798 | /// if this returns true. |
1799 | virtual bool useStackGuardXorFP() const { return false; } |
1800 | |
1801 | /// If the target has a standard stack protection check function that |
1802 | /// performs validation and error handling, returns the function. Otherwise, |
1803 | /// returns nullptr. Must be previously inserted by insertSSPDeclarations. |
1804 | /// Should be used only when getIRStackGuard returns nullptr. |
1805 | virtual Function *getSSPStackGuardCheck(const Module &M) const; |
1806 | |
1807 | /// \returns true if a constant G_UBFX is legal on the target. |
1808 | virtual bool isConstantUnsignedBitfieldExtactLegal(unsigned Opc, LLT Ty1, |
1809 | LLT Ty2) const { |
1810 | return false; |
1811 | } |
1812 | |
1813 | protected: |
1814 | Value *getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, |
1815 | bool UseTLS) const; |
1816 | |
1817 | public: |
1818 | /// Returns the target-specific address of the unsafe stack pointer. |
1819 | virtual Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const; |
1820 | |
1821 | /// Returns the name of the symbol used to emit stack probes or the empty |
1822 | /// string if not applicable. |
1823 | virtual bool hasStackProbeSymbol(MachineFunction &MF) const { return false; } |
1824 | |
1825 | virtual bool hasInlineStackProbe(MachineFunction &MF) const { return false; } |
1826 | |
1827 | virtual StringRef getStackProbeSymbolName(MachineFunction &MF) const { |
1828 | return ""; |
1829 | } |
1830 | |
1831 | /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we |
1832 | /// are happy to sink it into basic blocks. A cast may be free, but not |
1833 | /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer. |
1834 | virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const; |
1835 | |
1836 | /// Return true if the pointer arguments to CI should be aligned by aligning |
1837 | /// the object whose address is being passed. If so then MinSize is set to the |
1838 | /// minimum size the object must be to be aligned and PrefAlign is set to the |
1839 | /// preferred alignment. |
1840 | virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/, |
1841 | unsigned & /*PrefAlign*/) const { |
1842 | return false; |
1843 | } |
1844 | |
1845 | //===--------------------------------------------------------------------===// |
1846 | /// \name Helpers for TargetTransformInfo implementations |
1847 | /// @{ |
1848 | |
1849 | /// Get the ISD node that corresponds to the Instruction class opcode. |
1850 | int InstructionOpcodeToISD(unsigned Opcode) const; |
1851 | |
1852 | /// Estimate the cost of type-legalization and the legalized type. |
1853 | std::pair<InstructionCost, MVT> getTypeLegalizationCost(const DataLayout &DL, |
1854 | Type *Ty) const; |
1855 | |
1856 | /// @} |
1857 | |
1858 | //===--------------------------------------------------------------------===// |
1859 | /// \name Helpers for atomic expansion. |
1860 | /// @{ |
1861 | |
1862 | /// Returns the maximum atomic operation size (in bits) supported by |
1863 | /// the backend. Atomic operations greater than this size (as well |
1864 | /// as ones that are not naturally aligned), will be expanded by |
1865 | /// AtomicExpandPass into an __atomic_* library call. |
1866 | unsigned getMaxAtomicSizeInBitsSupported() const { |
1867 | return MaxAtomicSizeInBitsSupported; |
1868 | } |
1869 | |
1870 | /// Returns the size of the smallest cmpxchg or ll/sc instruction |
1871 | /// the backend supports. Any smaller operations are widened in |
1872 | /// AtomicExpandPass. |
1873 | /// |
1874 | /// Note that *unlike* operations above the maximum size, atomic ops |
1875 | /// are still natively supported below the minimum; they just |
1876 | /// require a more complex expansion. |
1877 | unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; } |
1878 | |
1879 | /// Whether the target supports unaligned atomic operations. |
1880 | bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; } |
1881 | |
1882 | /// Whether AtomicExpandPass should automatically insert fences and reduce |
1883 | /// ordering for this atomic. This should be true for most architectures with |
1884 | /// weak memory ordering. Defaults to false. |
1885 | virtual bool shouldInsertFencesForAtomic(const Instruction *I) const { |
1886 | return false; |
1887 | } |
1888 | |
1889 | /// Perform a load-linked operation on Addr, returning a "Value *" with the |
1890 | /// corresponding pointee type. This may entail some non-trivial operations to |
1891 | /// truncate or reconstruct types that will be illegal in the backend. See |
1892 | /// ARMISelLowering for an example implementation. |
1893 | virtual Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, |
1894 | Value *Addr, AtomicOrdering Ord) const { |
1895 | llvm_unreachable("Load linked unimplemented on this target")::llvm::llvm_unreachable_internal("Load linked unimplemented on this target" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1895); |
1896 | } |
1897 | |
1898 | /// Perform a store-conditional operation to Addr. Return the status of the |
1899 | /// store. This should be 0 if the store succeeded, non-zero otherwise. |
1900 | virtual Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, |
1901 | Value *Addr, AtomicOrdering Ord) const { |
1902 | llvm_unreachable("Store conditional unimplemented on this target")::llvm::llvm_unreachable_internal("Store conditional unimplemented on this target" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1902); |
1903 | } |
1904 | |
1905 | /// Perform a masked atomicrmw using a target-specific intrinsic. This |
1906 | /// represents the core LL/SC loop which will be lowered at a late stage by |
1907 | /// the backend. |
1908 | virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, |
1909 | AtomicRMWInst *AI, |
1910 | Value *AlignedAddr, Value *Incr, |
1911 | Value *Mask, Value *ShiftAmt, |
1912 | AtomicOrdering Ord) const { |
1913 | llvm_unreachable("Masked atomicrmw expansion unimplemented on this target")::llvm::llvm_unreachable_internal("Masked atomicrmw expansion unimplemented on this target" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1913); |
1914 | } |
1915 | |
1916 | /// Perform a masked cmpxchg using a target-specific intrinsic. This |
1917 | /// represents the core LL/SC loop which will be lowered at a late stage by |
1918 | /// the backend. |
1919 | virtual Value *emitMaskedAtomicCmpXchgIntrinsic( |
1920 | IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, |
1921 | Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { |
1922 | llvm_unreachable("Masked cmpxchg expansion unimplemented on this target")::llvm::llvm_unreachable_internal("Masked cmpxchg expansion unimplemented on this target" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 1922); |
1923 | } |
1924 | |
1925 | /// Inserts in the IR a target-specific intrinsic specifying a fence. |
1926 | /// It is called by AtomicExpandPass before expanding an |
1927 | /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad |
1928 | /// if shouldInsertFencesForAtomic returns true. |
1929 | /// |
1930 | /// Inst is the original atomic instruction, prior to other expansions that |
1931 | /// may be performed. |
1932 | /// |
1933 | /// This function should either return a nullptr, or a pointer to an IR-level |
1934 | /// Instruction*. Even complex fence sequences can be represented by a |
1935 | /// single Instruction* through an intrinsic to be lowered later. |
1936 | /// Backends should override this method to produce target-specific intrinsic |
1937 | /// for their fences. |
1938 | /// FIXME: Please note that the default implementation here in terms of |
1939 | /// IR-level fences exists for historical/compatibility reasons and is |
1940 | /// *unsound* ! Fences cannot, in general, be used to restore sequential |
1941 | /// consistency. For example, consider the following example: |
1942 | /// atomic<int> x = y = 0; |
1943 | /// int r1, r2, r3, r4; |
1944 | /// Thread 0: |
1945 | /// x.store(1); |
1946 | /// Thread 1: |
1947 | /// y.store(1); |
1948 | /// Thread 2: |
1949 | /// r1 = x.load(); |
1950 | /// r2 = y.load(); |
1951 | /// Thread 3: |
1952 | /// r3 = y.load(); |
1953 | /// r4 = x.load(); |
1954 | /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all |
1955 | /// seq_cst. But if they are lowered to monotonic accesses, no amount of |
1956 | /// IR-level fences can prevent it. |
1957 | /// @{ |
1958 | virtual Instruction *emitLeadingFence(IRBuilderBase &Builder, |
1959 | Instruction *Inst, |
1960 | AtomicOrdering Ord) const; |
1961 | |
1962 | virtual Instruction *emitTrailingFence(IRBuilderBase &Builder, |
1963 | Instruction *Inst, |
1964 | AtomicOrdering Ord) const; |
1965 | /// @} |
1966 | |
1967 | // Emits code that executes when the comparison result in the ll/sc |
1968 | // expansion of a cmpxchg instruction is such that the store-conditional will |
1969 | // not execute. This makes it possible to balance out the load-linked with |
1970 | // a dedicated instruction, if desired. |
1971 | // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would |
1972 | // be unnecessarily held, except if clrex, inserted by this hook, is executed. |
1973 | virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const {} |
1974 | |
1975 | /// Returns true if the given (atomic) store should be expanded by the |
1976 | /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input. |
1977 | virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const { |
1978 | return false; |
1979 | } |
1980 | |
1981 | /// Returns true if arguments should be sign-extended in lib calls. |
1982 | virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const { |
1983 | return IsSigned; |
1984 | } |
1985 | |
1986 | /// Returns true if arguments should be extended in lib calls. |
1987 | virtual bool shouldExtendTypeInLibCall(EVT Type) const { |
1988 | return true; |
1989 | } |
1990 | |
1991 | /// Returns how the given (atomic) load should be expanded by the |
1992 | /// IR-level AtomicExpand pass. |
1993 | virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const { |
1994 | return AtomicExpansionKind::None; |
1995 | } |
1996 | |
1997 | /// Returns how the given atomic cmpxchg should be expanded by the IR-level |
1998 | /// AtomicExpand pass. |
1999 | virtual AtomicExpansionKind |
2000 | shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { |
2001 | return AtomicExpansionKind::None; |
2002 | } |
2003 | |
2004 | /// Returns how the IR-level AtomicExpand pass should expand the given |
2005 | /// AtomicRMW, if at all. Default is to never expand. |
2006 | virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { |
2007 | return RMW->isFloatingPointOperation() ? |
2008 | AtomicExpansionKind::CmpXChg : AtomicExpansionKind::None; |
2009 | } |
2010 | |
2011 | /// On some platforms, an AtomicRMW that never actually modifies the value |
2012 | /// (such as fetch_add of 0) can be turned into a fence followed by an |
2013 | /// atomic load. This may sound useless, but it makes it possible for the |
2014 | /// processor to keep the cacheline shared, dramatically improving |
2015 | /// performance. And such idempotent RMWs are useful for implementing some |
2016 | /// kinds of locks, see for example (justification + benchmarks): |
2017 | /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf |
2018 | /// This method tries doing that transformation, returning the atomic load if |
2019 | /// it succeeds, and nullptr otherwise. |
2020 | /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo |
2021 | /// another round of expansion. |
2022 | virtual LoadInst * |
2023 | lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const { |
2024 | return nullptr; |
2025 | } |
2026 | |
2027 | /// Returns how the platform's atomic operations are extended (ZERO_EXTEND, |
2028 | /// SIGN_EXTEND, or ANY_EXTEND). |
2029 | virtual ISD::NodeType getExtendForAtomicOps() const { |
2030 | return ISD::ZERO_EXTEND; |
2031 | } |
2032 | |
2033 | /// Returns how the platform's atomic compare and swap expects its comparison |
2034 | /// value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). This is |
2035 | /// separate from getExtendForAtomicOps, which is concerned with the |
2036 | /// sign-extension of the instruction's output, whereas here we are concerned |
2037 | /// with the sign-extension of the input. For targets with compare-and-swap |
2038 | /// instructions (or sub-word comparisons in their LL/SC loop expansions), |
2039 | /// the input can be ANY_EXTEND, but the output will still have a specific |
2040 | /// extension. |
2041 | virtual ISD::NodeType getExtendForAtomicCmpSwapArg() const { |
2042 | return ISD::ANY_EXTEND; |
2043 | } |
2044 | |
2045 | /// @} |
2046 | |
2047 | /// Returns true if we should normalize |
2048 | /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and |
2049 | /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely |
2050 | /// that it saves us from materializing N0 and N1 in an integer register. |
2051 | /// Targets that are able to perform and/or on flags should return false here. |
2052 | virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context, |
2053 | EVT VT) const { |
2054 | // If a target has multiple condition registers, then it likely has logical |
2055 | // operations on those registers. |
2056 | if (hasMultipleConditionRegisters()) |
2057 | return false; |
2058 | // Only do the transform if the value won't be split into multiple |
2059 | // registers. |
2060 | LegalizeTypeAction Action = getTypeAction(Context, VT); |
2061 | return Action != TypeExpandInteger && Action != TypeExpandFloat && |
2062 | Action != TypeSplitVector; |
2063 | } |
2064 | |
2065 | virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; } |
2066 | |
2067 | /// Return true if a select of constants (select Cond, C1, C2) should be |
2068 | /// transformed into simple math ops with the condition value. For example: |
2069 | /// select Cond, C1, C1-1 --> add (zext Cond), C1-1 |
2070 | virtual bool convertSelectOfConstantsToMath(EVT VT) const { |
2071 | return false; |
2072 | } |
2073 | |
2074 | /// Return true if it is profitable to transform an integer |
2075 | /// multiplication-by-constant into simpler operations like shifts and adds. |
2076 | /// This may be true if the target does not directly support the |
2077 | /// multiplication operation for the specified type or the sequence of simpler |
2078 | /// ops is faster than the multiply. |
2079 | virtual bool decomposeMulByConstant(LLVMContext &Context, |
2080 | EVT VT, SDValue C) const { |
2081 | return false; |
2082 | } |
2083 | |
2084 | /// Return true if it may be profitable to transform |
2085 | /// (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2). |
2086 | /// This may not be true if c1 and c2 can be represented as immediates but |
2087 | /// c1*c2 cannot, for example. |
2088 | /// The target should check if c1, c2 and c1*c2 can be represented as |
2089 | /// immediates, or have to be materialized into registers. If it is not sure |
2090 | /// about some cases, a default true can be returned to let the DAGCombiner |
2091 | /// decide. |
2092 | /// AddNode is (add x, c1), and ConstNode is c2. |
2093 | virtual bool isMulAddWithConstProfitable(const SDValue &AddNode, |
2094 | const SDValue &ConstNode) const { |
2095 | return true; |
2096 | } |
2097 | |
2098 | /// Return true if it is more correct/profitable to use strict FP_TO_INT |
2099 | /// conversion operations - canonicalizing the FP source value instead of |
2100 | /// converting all cases and then selecting based on value. |
2101 | /// This may be true if the target throws exceptions for out of bounds |
2102 | /// conversions or has fast FP CMOV. |
2103 | virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT, |
2104 | bool IsSigned) const { |
2105 | return false; |
2106 | } |
2107 | |
2108 | //===--------------------------------------------------------------------===// |
2109 | // TargetLowering Configuration Methods - These methods should be invoked by |
2110 | // the derived class constructor to configure this object for the target. |
2111 | // |
2112 | protected: |
2113 | /// Specify how the target extends the result of integer and floating point |
2114 | /// boolean values from i1 to a wider type. See getBooleanContents. |
2115 | void setBooleanContents(BooleanContent Ty) { |
2116 | BooleanContents = Ty; |
2117 | BooleanFloatContents = Ty; |
2118 | } |
2119 | |
2120 | /// Specify how the target extends the result of integer and floating point |
2121 | /// boolean values from i1 to a wider type. See getBooleanContents. |
2122 | void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) { |
2123 | BooleanContents = IntTy; |
2124 | BooleanFloatContents = FloatTy; |
2125 | } |
2126 | |
2127 | /// Specify how the target extends the result of a vector boolean value from a |
2128 | /// vector of i1 to a wider type. See getBooleanContents. |
2129 | void setBooleanVectorContents(BooleanContent Ty) { |
2130 | BooleanVectorContents = Ty; |
2131 | } |
2132 | |
2133 | /// Specify the target scheduling preference. |
2134 | void setSchedulingPreference(Sched::Preference Pref) { |
2135 | SchedPreferenceInfo = Pref; |
2136 | } |
2137 | |
2138 | /// Indicate the minimum number of blocks to generate jump tables. |
2139 | void setMinimumJumpTableEntries(unsigned Val); |
2140 | |
2141 | /// Indicate the maximum number of entries in jump tables. |
2142 | /// Set to zero to generate unlimited jump tables. |
2143 | void setMaximumJumpTableSize(unsigned); |
2144 | |
2145 | /// If set to a physical register, this specifies the register that |
2146 | /// llvm.savestack/llvm.restorestack should save and restore. |
2147 | void setStackPointerRegisterToSaveRestore(Register R) { |
2148 | StackPointerRegisterToSaveRestore = R; |
2149 | } |
2150 | |
2151 | /// Tells the code generator that the target has multiple (allocatable) |
2152 | /// condition registers that can be used to store the results of comparisons |
2153 | /// for use by selects and conditional branches. With multiple condition |
2154 | /// registers, the code generator will not aggressively sink comparisons into |
2155 | /// the blocks of their users. |
2156 | void setHasMultipleConditionRegisters(bool hasManyRegs = true) { |
2157 | HasMultipleConditionRegisters = hasManyRegs; |
2158 | } |
2159 | |
2160 | /// Tells the code generator that the target has BitExtract instructions. |
2161 | /// The code generator will aggressively sink "shift"s into the blocks of |
2162 | /// their users if the users will generate "and" instructions which can be |
2163 | /// combined with "shift" to BitExtract instructions. |
2164 | void setHasExtractBitsInsn(bool hasExtractInsn = true) { |
2165 | HasExtractBitsInsn = hasExtractInsn; |
2166 | } |
2167 | |
2168 | /// Tells the code generator not to expand logic operations on comparison |
2169 | /// predicates into separate sequences that increase the amount of flow |
2170 | /// control. |
2171 | void setJumpIsExpensive(bool isExpensive = true); |
2172 | |
2173 | /// Tells the code generator which bitwidths to bypass. |
2174 | void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) { |
2175 | BypassSlowDivWidths[SlowBitWidth] = FastBitWidth; |
2176 | } |
2177 | |
2178 | /// Add the specified register class as an available regclass for the |
2179 | /// specified value type. This indicates the selector can handle values of |
2180 | /// that class natively. |
2181 | void addRegisterClass(MVT VT, const TargetRegisterClass *RC) { |
2182 | assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT))(static_cast <bool> ((unsigned)VT.SimpleTy < array_lengthof (RegClassForVT)) ? void (0) : __assert_fail ("(unsigned)VT.SimpleTy < array_lengthof(RegClassForVT)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2182, __extension__ __PRETTY_FUNCTION__)); |
2183 | RegClassForVT[VT.SimpleTy] = RC; |
2184 | } |
2185 | |
2186 | /// Return the largest legal super-reg register class of the register class |
2187 | /// for the specified type and its associated "cost". |
2188 | virtual std::pair<const TargetRegisterClass *, uint8_t> |
2189 | findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const; |
2190 | |
2191 | /// Once all of the register classes are added, this allows us to compute |
2192 | /// derived properties we expose. |
2193 | void computeRegisterProperties(const TargetRegisterInfo *TRI); |
2194 | |
2195 | /// Indicate that the specified operation does not work with the specified |
2196 | /// type and indicate what to do about it. Note that VT may refer to either |
2197 | /// the type of a result or that of an operand of Op. |
2198 | void setOperationAction(unsigned Op, MVT VT, |
2199 | LegalizeAction Action) { |
2200 | assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!")(static_cast <bool> (Op < array_lengthof(OpActions[0 ]) && "Table isn't big enough!") ? void (0) : __assert_fail ("Op < array_lengthof(OpActions[0]) && \"Table isn't big enough!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2200, __extension__ __PRETTY_FUNCTION__)); |
2201 | OpActions[(unsigned)VT.SimpleTy][Op] = Action; |
2202 | } |
2203 | |
2204 | /// Indicate that the specified load with extension does not work with the |
2205 | /// specified type and indicate what to do about it. |
2206 | void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, |
2207 | LegalizeAction Action) { |
2208 | assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&(static_cast <bool> (ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!") ? void (0) : __assert_fail ("ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() && MemVT.isValid() && \"Table isn't big enough!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2209, __extension__ __PRETTY_FUNCTION__)) |
2209 | MemVT.isValid() && "Table isn't big enough!")(static_cast <bool> (ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!") ? void (0) : __assert_fail ("ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() && MemVT.isValid() && \"Table isn't big enough!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2209, __extension__ __PRETTY_FUNCTION__)); |
2210 | assert((unsigned)Action < 0x10 && "too many bits for bitfield array")(static_cast <bool> ((unsigned)Action < 0x10 && "too many bits for bitfield array") ? void (0) : __assert_fail ("(unsigned)Action < 0x10 && \"too many bits for bitfield array\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2210, __extension__ __PRETTY_FUNCTION__)); |
2211 | unsigned Shift = 4 * ExtType; |
2212 | LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift); |
2213 | LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift; |
2214 | } |
2215 | |
2216 | /// Indicate that the specified truncating store does not work with the |
2217 | /// specified type and indicate what to do about it. |
2218 | void setTruncStoreAction(MVT ValVT, MVT MemVT, |
2219 | LegalizeAction Action) { |
2220 | assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!")(static_cast <bool> (ValVT.isValid() && MemVT.isValid () && "Table isn't big enough!") ? void (0) : __assert_fail ("ValVT.isValid() && MemVT.isValid() && \"Table isn't big enough!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2220, __extension__ __PRETTY_FUNCTION__)); |
2221 | TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action; |
2222 | } |
2223 | |
2224 | /// Indicate that the specified indexed load does or does not work with the |
2225 | /// specified type and indicate what to do abort it. |
2226 | /// |
2227 | /// NOTE: All indexed mode loads are initialized to Expand in |
2228 | /// TargetLowering.cpp |
2229 | void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action) { |
2230 | setIndexedModeAction(IdxMode, VT, IMAB_Load, Action); |
2231 | } |
2232 | |
2233 | /// Indicate that the specified indexed store does or does not work with the |
2234 | /// specified type and indicate what to do about it. |
2235 | /// |
2236 | /// NOTE: All indexed mode stores are initialized to Expand in |
2237 | /// TargetLowering.cpp |
2238 | void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action) { |
2239 | setIndexedModeAction(IdxMode, VT, IMAB_Store, Action); |
2240 | } |
2241 | |
2242 | /// Indicate that the specified indexed masked load does or does not work with |
2243 | /// the specified type and indicate what to do about it. |
2244 | /// |
2245 | /// NOTE: All indexed mode masked loads are initialized to Expand in |
2246 | /// TargetLowering.cpp |
2247 | void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT, |
2248 | LegalizeAction Action) { |
2249 | setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action); |
2250 | } |
2251 | |
2252 | /// Indicate that the specified indexed masked store does or does not work |
2253 | /// with the specified type and indicate what to do about it. |
2254 | /// |
2255 | /// NOTE: All indexed mode masked stores are initialized to Expand in |
2256 | /// TargetLowering.cpp |
2257 | void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT, |
2258 | LegalizeAction Action) { |
2259 | setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action); |
2260 | } |
2261 | |
2262 | /// Indicate that the specified condition code is or isn't supported on the |
2263 | /// target and indicate what to do about it. |
2264 | void setCondCodeAction(ISD::CondCode CC, MVT VT, |
2265 | LegalizeAction Action) { |
2266 | assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&(static_cast <bool> (VT.isValid() && (unsigned) CC < array_lengthof(CondCodeActions) && "Table isn't big enough!" ) ? void (0) : __assert_fail ("VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) && \"Table isn't big enough!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2267, __extension__ __PRETTY_FUNCTION__)) |
2267 | "Table isn't big enough!")(static_cast <bool> (VT.isValid() && (unsigned) CC < array_lengthof(CondCodeActions) && "Table isn't big enough!" ) ? void (0) : __assert_fail ("VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) && \"Table isn't big enough!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2267, __extension__ __PRETTY_FUNCTION__)); |
2268 | assert((unsigned)Action < 0x10 && "too many bits for bitfield array")(static_cast <bool> ((unsigned)Action < 0x10 && "too many bits for bitfield array") ? void (0) : __assert_fail ("(unsigned)Action < 0x10 && \"too many bits for bitfield array\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2268, __extension__ __PRETTY_FUNCTION__)); |
2269 | /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 32-bit |
2270 | /// value and the upper 29 bits index into the second dimension of the array |
2271 | /// to select what 32-bit value to use. |
2272 | uint32_t Shift = 4 * (VT.SimpleTy & 0x7); |
2273 | CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift); |
2274 | CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift; |
2275 | } |
2276 | |
2277 | /// If Opc/OrigVT is specified as being promoted, the promotion code defaults |
2278 | /// to trying a larger integer/fp until it can find one that works. If that |
2279 | /// default is insufficient, this method can be used by the target to override |
2280 | /// the default. |
2281 | void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { |
2282 | PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy; |
2283 | } |
2284 | |
2285 | /// Convenience method to set an operation to Promote and specify the type |
2286 | /// in a single call. |
2287 | void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { |
2288 | setOperationAction(Opc, OrigVT, Promote); |
2289 | AddPromotedToType(Opc, OrigVT, DestVT); |
2290 | } |
2291 | |
2292 | /// Targets should invoke this method for each target independent node that |
2293 | /// they want to provide a custom DAG combiner for by implementing the |
2294 | /// PerformDAGCombine virtual method. |
2295 | void setTargetDAGCombine(ISD::NodeType NT) { |
2296 | assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray))(static_cast <bool> (unsigned(NT >> 3) < array_lengthof (TargetDAGCombineArray)) ? void (0) : __assert_fail ("unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2296, __extension__ __PRETTY_FUNCTION__)); |
2297 | TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7); |
2298 | } |
2299 | |
2300 | /// Set the target's minimum function alignment. |
2301 | void setMinFunctionAlignment(Align Alignment) { |
2302 | MinFunctionAlignment = Alignment; |
2303 | } |
2304 | |
2305 | /// Set the target's preferred function alignment. This should be set if |
2306 | /// there is a performance benefit to higher-than-minimum alignment |
2307 | void setPrefFunctionAlignment(Align Alignment) { |
2308 | PrefFunctionAlignment = Alignment; |
2309 | } |
2310 | |
2311 | /// Set the target's preferred loop alignment. Default alignment is one, it |
2312 | /// means the target does not care about loop alignment. The target may also |
2313 | /// override getPrefLoopAlignment to provide per-loop values. |
2314 | void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; } |
2315 | |
2316 | /// Set the minimum stack alignment of an argument. |
2317 | void setMinStackArgumentAlignment(Align Alignment) { |
2318 | MinStackArgumentAlignment = Alignment; |
2319 | } |
2320 | |
2321 | /// Set the maximum atomic operation size supported by the |
2322 | /// backend. Atomic operations greater than this size (as well as |
2323 | /// ones that are not naturally aligned), will be expanded by |
2324 | /// AtomicExpandPass into an __atomic_* library call. |
2325 | void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) { |
2326 | MaxAtomicSizeInBitsSupported = SizeInBits; |
2327 | } |
2328 | |
2329 | /// Sets the minimum cmpxchg or ll/sc size supported by the backend. |
2330 | void setMinCmpXchgSizeInBits(unsigned SizeInBits) { |
2331 | MinCmpXchgSizeInBits = SizeInBits; |
2332 | } |
2333 | |
2334 | /// Sets whether unaligned atomic operations are supported. |
2335 | void setSupportsUnalignedAtomics(bool UnalignedSupported) { |
2336 | SupportsUnalignedAtomics = UnalignedSupported; |
2337 | } |
2338 | |
2339 | public: |
2340 | //===--------------------------------------------------------------------===// |
2341 | // Addressing mode description hooks (used by LSR etc). |
2342 | // |
2343 | |
2344 | /// CodeGenPrepare sinks address calculations into the same BB as Load/Store |
2345 | /// instructions reading the address. This allows as much computation as |
2346 | /// possible to be done in the address mode for that operand. This hook lets |
2347 | /// targets also pass back when this should be done on intrinsics which |
2348 | /// load/store. |
2349 | virtual bool getAddrModeArguments(IntrinsicInst * /*I*/, |
2350 | SmallVectorImpl<Value*> &/*Ops*/, |
2351 | Type *&/*AccessTy*/) const { |
2352 | return false; |
2353 | } |
2354 | |
2355 | /// This represents an addressing mode of: |
2356 | /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg |
2357 | /// If BaseGV is null, there is no BaseGV. |
2358 | /// If BaseOffs is zero, there is no base offset. |
2359 | /// If HasBaseReg is false, there is no base register. |
2360 | /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with |
2361 | /// no scale. |
2362 | struct AddrMode { |
2363 | GlobalValue *BaseGV = nullptr; |
2364 | int64_t BaseOffs = 0; |
2365 | bool HasBaseReg = false; |
2366 | int64_t Scale = 0; |
2367 | AddrMode() = default; |
2368 | }; |
2369 | |
2370 | /// Return true if the addressing mode represented by AM is legal for this |
2371 | /// target, for a load/store of the specified type. |
2372 | /// |
2373 | /// The type may be VoidTy, in which case only return true if the addressing |
2374 | /// mode is legal for a load/store of any legal type. TODO: Handle |
2375 | /// pre/postinc as well. |
2376 | /// |
2377 | /// If the address space cannot be determined, it will be -1. |
2378 | /// |
2379 | /// TODO: Remove default argument |
2380 | virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, |
2381 | Type *Ty, unsigned AddrSpace, |
2382 | Instruction *I = nullptr) const; |
2383 | |
2384 | /// Return the cost of the scaling factor used in the addressing mode |
2385 | /// represented by AM for this target, for a load/store of the specified type. |
2386 | /// |
2387 | /// If the AM is supported, the return value must be >= 0. |
2388 | /// If the AM is not supported, it returns a negative value. |
2389 | /// TODO: Handle pre/postinc as well. |
2390 | /// TODO: Remove default argument |
2391 | virtual InstructionCost getScalingFactorCost(const DataLayout &DL, |
2392 | const AddrMode &AM, Type *Ty, |
2393 | unsigned AS = 0) const { |
2394 | // Default: assume that any scaling factor used in a legal AM is free. |
2395 | if (isLegalAddressingMode(DL, AM, Ty, AS)) |
2396 | return 0; |
2397 | return -1; |
2398 | } |
2399 | |
2400 | /// Return true if the specified immediate is legal icmp immediate, that is |
2401 | /// the target has icmp instructions which can compare a register against the |
2402 | /// immediate without having to materialize the immediate into a register. |
2403 | virtual bool isLegalICmpImmediate(int64_t) const { |
2404 | return true; |
2405 | } |
2406 | |
2407 | /// Return true if the specified immediate is legal add immediate, that is the |
2408 | /// target has add instructions which can add a register with the immediate |
2409 | /// without having to materialize the immediate into a register. |
2410 | virtual bool isLegalAddImmediate(int64_t) const { |
2411 | return true; |
2412 | } |
2413 | |
2414 | /// Return true if the specified immediate is legal for the value input of a |
2415 | /// store instruction. |
2416 | virtual bool isLegalStoreImmediate(int64_t Value) const { |
2417 | // Default implementation assumes that at least 0 works since it is likely |
2418 | // that a zero register exists or a zero immediate is allowed. |
2419 | return Value == 0; |
2420 | } |
2421 | |
2422 | /// Return true if it's significantly cheaper to shift a vector by a uniform |
2423 | /// scalar than by an amount which will vary across each lane. On x86 before |
2424 | /// AVX2 for example, there is a "psllw" instruction for the former case, but |
2425 | /// no simple instruction for a general "a << b" operation on vectors. |
2426 | /// This should also apply to lowering for vector funnel shifts (rotates). |
2427 | virtual bool isVectorShiftByScalarCheap(Type *Ty) const { |
2428 | return false; |
2429 | } |
2430 | |
2431 | /// Given a shuffle vector SVI representing a vector splat, return a new |
2432 | /// scalar type of size equal to SVI's scalar type if the new type is more |
2433 | /// profitable. Returns nullptr otherwise. For example under MVE float splats |
2434 | /// are converted to integer to prevent the need to move from SPR to GPR |
2435 | /// registers. |
2436 | virtual Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const { |
2437 | return nullptr; |
2438 | } |
2439 | |
2440 | /// Given a set in interconnected phis of type 'From' that are loaded/stored |
2441 | /// or bitcast to type 'To', return true if the set should be converted to |
2442 | /// 'To'. |
2443 | virtual bool shouldConvertPhiType(Type *From, Type *To) const { |
2444 | return (From->isIntegerTy() || From->isFloatingPointTy()) && |
2445 | (To->isIntegerTy() || To->isFloatingPointTy()); |
2446 | } |
2447 | |
2448 | /// Returns true if the opcode is a commutative binary operation. |
2449 | virtual bool isCommutativeBinOp(unsigned Opcode) const { |
2450 | // FIXME: This should get its info from the td file. |
2451 | switch (Opcode) { |
2452 | case ISD::ADD: |
2453 | case ISD::SMIN: |
2454 | case ISD::SMAX: |
2455 | case ISD::UMIN: |
2456 | case ISD::UMAX: |
2457 | case ISD::MUL: |
2458 | case ISD::MULHU: |
2459 | case ISD::MULHS: |
2460 | case ISD::SMUL_LOHI: |
2461 | case ISD::UMUL_LOHI: |
2462 | case ISD::FADD: |
2463 | case ISD::FMUL: |
2464 | case ISD::AND: |
2465 | case ISD::OR: |
2466 | case ISD::XOR: |
2467 | case ISD::SADDO: |
2468 | case ISD::UADDO: |
2469 | case ISD::ADDC: |
2470 | case ISD::ADDE: |
2471 | case ISD::SADDSAT: |
2472 | case ISD::UADDSAT: |
2473 | case ISD::FMINNUM: |
2474 | case ISD::FMAXNUM: |
2475 | case ISD::FMINNUM_IEEE: |
2476 | case ISD::FMAXNUM_IEEE: |
2477 | case ISD::FMINIMUM: |
2478 | case ISD::FMAXIMUM: |
2479 | return true; |
2480 | default: return false; |
2481 | } |
2482 | } |
2483 | |
2484 | /// Return true if the node is a math/logic binary operator. |
2485 | virtual bool isBinOp(unsigned Opcode) const { |
2486 | // A commutative binop must be a binop. |
2487 | if (isCommutativeBinOp(Opcode)) |
2488 | return true; |
2489 | // These are non-commutative binops. |
2490 | switch (Opcode) { |
2491 | case ISD::SUB: |
2492 | case ISD::SHL: |
2493 | case ISD::SRL: |
2494 | case ISD::SRA: |
2495 | case ISD::SDIV: |
2496 | case ISD::UDIV: |
2497 | case ISD::SREM: |
2498 | case ISD::UREM: |
2499 | case ISD::SSUBSAT: |
2500 | case ISD::USUBSAT: |
2501 | case ISD::FSUB: |
2502 | case ISD::FDIV: |
2503 | case ISD::FREM: |
2504 | return true; |
2505 | default: |
2506 | return false; |
2507 | } |
2508 | } |
2509 | |
2510 | /// Return true if it's free to truncate a value of type FromTy to type |
2511 | /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16 |
2512 | /// by referencing its sub-register AX. |
2513 | /// Targets must return false when FromTy <= ToTy. |
2514 | virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const { |
2515 | return false; |
2516 | } |
2517 | |
2518 | /// Return true if a truncation from FromTy to ToTy is permitted when deciding |
2519 | /// whether a call is in tail position. Typically this means that both results |
2520 | /// would be assigned to the same register or stack slot, but it could mean |
2521 | /// the target performs adequate checks of its own before proceeding with the |
2522 | /// tail call. Targets must return false when FromTy <= ToTy. |
2523 | virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const { |
2524 | return false; |
2525 | } |
2526 | |
2527 | virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const { return false; } |
2528 | virtual bool isTruncateFree(LLT FromTy, LLT ToTy, const DataLayout &DL, |
2529 | LLVMContext &Ctx) const { |
2530 | return isTruncateFree(getApproximateEVTForLLT(FromTy, DL, Ctx), |
2531 | getApproximateEVTForLLT(ToTy, DL, Ctx)); |
2532 | } |
2533 | |
2534 | virtual bool isProfitableToHoist(Instruction *I) const { return true; } |
2535 | |
2536 | /// Return true if the extension represented by \p I is free. |
2537 | /// Unlikely the is[Z|FP]ExtFree family which is based on types, |
2538 | /// this method can use the context provided by \p I to decide |
2539 | /// whether or not \p I is free. |
2540 | /// This method extends the behavior of the is[Z|FP]ExtFree family. |
2541 | /// In other words, if is[Z|FP]Free returns true, then this method |
2542 | /// returns true as well. The converse is not true. |
2543 | /// The target can perform the adequate checks by overriding isExtFreeImpl. |
2544 | /// \pre \p I must be a sign, zero, or fp extension. |
2545 | bool isExtFree(const Instruction *I) const { |
2546 | switch (I->getOpcode()) { |
2547 | case Instruction::FPExt: |
2548 | if (isFPExtFree(EVT::getEVT(I->getType()), |
2549 | EVT::getEVT(I->getOperand(0)->getType()))) |
2550 | return true; |
2551 | break; |
2552 | case Instruction::ZExt: |
2553 | if (isZExtFree(I->getOperand(0)->getType(), I->getType())) |
2554 | return true; |
2555 | break; |
2556 | case Instruction::SExt: |
2557 | break; |
2558 | default: |
2559 | llvm_unreachable("Instruction is not an extension")::llvm::llvm_unreachable_internal("Instruction is not an extension" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2559); |
2560 | } |
2561 | return isExtFreeImpl(I); |
2562 | } |
2563 | |
2564 | /// Return true if \p Load and \p Ext can form an ExtLoad. |
2565 | /// For example, in AArch64 |
2566 | /// %L = load i8, i8* %ptr |
2567 | /// %E = zext i8 %L to i32 |
2568 | /// can be lowered into one load instruction |
2569 | /// ldrb w0, [x0] |
2570 | bool isExtLoad(const LoadInst *Load, const Instruction *Ext, |
2571 | const DataLayout &DL) const { |
2572 | EVT VT = getValueType(DL, Ext->getType()); |
2573 | EVT LoadVT = getValueType(DL, Load->getType()); |
2574 | |
2575 | // If the load has other users and the truncate is not free, the ext |
2576 | // probably isn't free. |
2577 | if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) && |
2578 | !isTruncateFree(Ext->getType(), Load->getType())) |
2579 | return false; |
2580 | |
2581 | // Check whether the target supports casts folded into loads. |
2582 | unsigned LType; |
2583 | if (isa<ZExtInst>(Ext)) |
2584 | LType = ISD::ZEXTLOAD; |
2585 | else { |
2586 | assert(isa<SExtInst>(Ext) && "Unexpected ext type!")(static_cast <bool> (isa<SExtInst>(Ext) && "Unexpected ext type!") ? void (0) : __assert_fail ("isa<SExtInst>(Ext) && \"Unexpected ext type!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2586, __extension__ __PRETTY_FUNCTION__)); |
2587 | LType = ISD::SEXTLOAD; |
2588 | } |
2589 | |
2590 | return isLoadExtLegal(LType, VT, LoadVT); |
2591 | } |
2592 | |
2593 | /// Return true if any actual instruction that defines a value of type FromTy |
2594 | /// implicitly zero-extends the value to ToTy in the result register. |
2595 | /// |
2596 | /// The function should return true when it is likely that the truncate can |
2597 | /// be freely folded with an instruction defining a value of FromTy. If |
2598 | /// the defining instruction is unknown (because you're looking at a |
2599 | /// function argument, PHI, etc.) then the target may require an |
2600 | /// explicit truncate, which is not necessarily free, but this function |
2601 | /// does not deal with those cases. |
2602 | /// Targets must return false when FromTy >= ToTy. |
2603 | virtual bool isZExtFree(Type *FromTy, Type *ToTy) const { |
2604 | return false; |
2605 | } |
2606 | |
2607 | virtual bool isZExtFree(EVT FromTy, EVT ToTy) const { return false; } |
2608 | virtual bool isZExtFree(LLT FromTy, LLT ToTy, const DataLayout &DL, |
2609 | LLVMContext &Ctx) const { |
2610 | return isZExtFree(getApproximateEVTForLLT(FromTy, DL, Ctx), |
2611 | getApproximateEVTForLLT(ToTy, DL, Ctx)); |
2612 | } |
2613 | |
2614 | /// Return true if sign-extension from FromTy to ToTy is cheaper than |
2615 | /// zero-extension. |
2616 | virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const { |
2617 | return false; |
2618 | } |
2619 | |
2620 | /// Return true if sinking I's operands to the same basic block as I is |
2621 | /// profitable, e.g. because the operands can be folded into a target |
2622 | /// instruction during instruction selection. After calling the function |
2623 | /// \p Ops contains the Uses to sink ordered by dominance (dominating users |
2624 | /// come first). |
2625 | virtual bool shouldSinkOperands(Instruction *I, |
2626 | SmallVectorImpl<Use *> &Ops) const { |
2627 | return false; |
2628 | } |
2629 | |
2630 | /// Return true if the target supplies and combines to a paired load |
2631 | /// two loaded values of type LoadedType next to each other in memory. |
2632 | /// RequiredAlignment gives the minimal alignment constraints that must be met |
2633 | /// to be able to select this paired load. |
2634 | /// |
2635 | /// This information is *not* used to generate actual paired loads, but it is |
2636 | /// used to generate a sequence of loads that is easier to combine into a |
2637 | /// paired load. |
2638 | /// For instance, something like this: |
2639 | /// a = load i64* addr |
2640 | /// b = trunc i64 a to i32 |
2641 | /// c = lshr i64 a, 32 |
2642 | /// d = trunc i64 c to i32 |
2643 | /// will be optimized into: |
2644 | /// b = load i32* addr1 |
2645 | /// d = load i32* addr2 |
2646 | /// Where addr1 = addr2 +/- sizeof(i32). |
2647 | /// |
2648 | /// In other words, unless the target performs a post-isel load combining, |
2649 | /// this information should not be provided because it will generate more |
2650 | /// loads. |
2651 | virtual bool hasPairedLoad(EVT /*LoadedType*/, |
2652 | Align & /*RequiredAlignment*/) const { |
2653 | return false; |
2654 | } |
2655 | |
2656 | /// Return true if the target has a vector blend instruction. |
2657 | virtual bool hasVectorBlend() const { return false; } |
2658 | |
2659 | /// Get the maximum supported factor for interleaved memory accesses. |
2660 | /// Default to be the minimum interleave factor: 2. |
2661 | virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; } |
2662 | |
2663 | /// Lower an interleaved load to target specific intrinsics. Return |
2664 | /// true on success. |
2665 | /// |
2666 | /// \p LI is the vector load instruction. |
2667 | /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector. |
2668 | /// \p Indices is the corresponding indices for each shufflevector. |
2669 | /// \p Factor is the interleave factor. |
2670 | virtual bool lowerInterleavedLoad(LoadInst *LI, |
2671 | ArrayRef<ShuffleVectorInst *> Shuffles, |
2672 | ArrayRef<unsigned> Indices, |
2673 | unsigned Factor) const { |
2674 | return false; |
2675 | } |
2676 | |
2677 | /// Lower an interleaved store to target specific intrinsics. Return |
2678 | /// true on success. |
2679 | /// |
2680 | /// \p SI is the vector store instruction. |
2681 | /// \p SVI is the shufflevector to RE-interleave the stored vector. |
2682 | /// \p Factor is the interleave factor. |
2683 | virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, |
2684 | unsigned Factor) const { |
2685 | return false; |
2686 | } |
2687 | |
2688 | /// Return true if zero-extending the specific node Val to type VT2 is free |
2689 | /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or |
2690 | /// because it's folded such as X86 zero-extending loads). |
2691 | virtual bool isZExtFree(SDValue Val, EVT VT2) const { |
2692 | return isZExtFree(Val.getValueType(), VT2); |
2693 | } |
2694 | |
2695 | /// Return true if an fpext operation is free (for instance, because |
2696 | /// single-precision floating-point numbers are implicitly extended to |
2697 | /// double-precision). |
2698 | virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const { |
2699 | assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&(static_cast <bool> (SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() && "invalid fpext types") ? void (0) : __assert_fail ("SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() && \"invalid fpext types\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2700, __extension__ __PRETTY_FUNCTION__)) |
2700 | "invalid fpext types")(static_cast <bool> (SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() && "invalid fpext types") ? void (0) : __assert_fail ("SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() && \"invalid fpext types\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2700, __extension__ __PRETTY_FUNCTION__)); |
2701 | return false; |
2702 | } |
2703 | |
2704 | /// Return true if an fpext operation input to an \p Opcode operation is free |
2705 | /// (for instance, because half-precision floating-point numbers are |
2706 | /// implicitly extended to float-precision) for an FMA instruction. |
2707 | virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, |
2708 | EVT DestVT, EVT SrcVT) const { |
2709 | assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&(static_cast <bool> (DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && "invalid fpext types") ? void (0) : __assert_fail ("DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && \"invalid fpext types\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2710, __extension__ __PRETTY_FUNCTION__)) |
2710 | "invalid fpext types")(static_cast <bool> (DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && "invalid fpext types") ? void (0) : __assert_fail ("DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && \"invalid fpext types\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2710, __extension__ __PRETTY_FUNCTION__)); |
2711 | return isFPExtFree(DestVT, SrcVT); |
2712 | } |
2713 | |
2714 | /// Return true if folding a vector load into ExtVal (a sign, zero, or any |
2715 | /// extend node) is profitable. |
2716 | virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; } |
2717 | |
2718 | /// Return true if an fneg operation is free to the point where it is never |
2719 | /// worthwhile to replace it with a bitwise operation. |
2720 | virtual bool isFNegFree(EVT VT) const { |
2721 | assert(VT.isFloatingPoint())(static_cast <bool> (VT.isFloatingPoint()) ? void (0) : __assert_fail ("VT.isFloatingPoint()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2721, __extension__ __PRETTY_FUNCTION__)); |
2722 | return false; |
2723 | } |
2724 | |
2725 | /// Return true if an fabs operation is free to the point where it is never |
2726 | /// worthwhile to replace it with a bitwise operation. |
2727 | virtual bool isFAbsFree(EVT VT) const { |
2728 | assert(VT.isFloatingPoint())(static_cast <bool> (VT.isFloatingPoint()) ? void (0) : __assert_fail ("VT.isFloatingPoint()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2728, __extension__ __PRETTY_FUNCTION__)); |
2729 | return false; |
2730 | } |
2731 | |
2732 | /// Return true if an FMA operation is faster than a pair of fmul and fadd |
2733 | /// instructions. fmuladd intrinsics will be expanded to FMAs when this method |
2734 | /// returns true, otherwise fmuladd is expanded to fmul + fadd. |
2735 | /// |
2736 | /// NOTE: This may be called before legalization on types for which FMAs are |
2737 | /// not legal, but should return true if those types will eventually legalize |
2738 | /// to types that support FMAs. After legalization, it will only be called on |
2739 | /// types that support FMAs (via Legal or Custom actions) |
2740 | virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, |
2741 | EVT) const { |
2742 | return false; |
2743 | } |
2744 | |
2745 | /// IR version |
2746 | virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const { |
2747 | return false; |
2748 | } |
2749 | |
2750 | /// Returns true if be combined with to form an ISD::FMAD. \p N may be an |
2751 | /// ISD::FADD, ISD::FSUB, or an ISD::FMUL which will be distributed into an |
2752 | /// fadd/fsub. |
2753 | virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const { |
2754 | assert((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB ||(static_cast <bool> ((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB || N->getOpcode() == ISD:: FMUL) && "unexpected node in FMAD forming combine") ? void (0) : __assert_fail ("(N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB || N->getOpcode() == ISD::FMUL) && \"unexpected node in FMAD forming combine\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2756, __extension__ __PRETTY_FUNCTION__)) |
2755 | N->getOpcode() == ISD::FMUL) &&(static_cast <bool> ((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB || N->getOpcode() == ISD:: FMUL) && "unexpected node in FMAD forming combine") ? void (0) : __assert_fail ("(N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB || N->getOpcode() == ISD::FMUL) && \"unexpected node in FMAD forming combine\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2756, __extension__ __PRETTY_FUNCTION__)) |
2756 | "unexpected node in FMAD forming combine")(static_cast <bool> ((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB || N->getOpcode() == ISD:: FMUL) && "unexpected node in FMAD forming combine") ? void (0) : __assert_fail ("(N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB || N->getOpcode() == ISD::FMUL) && \"unexpected node in FMAD forming combine\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 2756, __extension__ __PRETTY_FUNCTION__)); |
2757 | return isOperationLegal(ISD::FMAD, N->getValueType(0)); |
2758 | } |
2759 | |
2760 | // Return true when the decision to generate FMA's (or FMS, FMLA etc) rather |
2761 | // than FMUL and ADD is delegated to the machine combiner. |
2762 | virtual bool generateFMAsInMachineCombiner(EVT VT, |
2763 | CodeGenOpt::Level OptLevel) const { |
2764 | return false; |
2765 | } |
2766 | |
2767 | /// Return true if it's profitable to narrow operations of type VT1 to |
2768 | /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from |
2769 | /// i32 to i16. |
2770 | virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const { |
2771 | return false; |
2772 | } |
2773 | |
2774 | /// Return true if it is beneficial to convert a load of a constant to |
2775 | /// just the constant itself. |
2776 | /// On some targets it might be more efficient to use a combination of |
2777 | /// arithmetic instructions to materialize the constant instead of loading it |
2778 | /// from a constant pool. |
2779 | virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, |
2780 | Type *Ty) const { |
2781 | return false; |
2782 | } |
2783 | |
2784 | /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type |
2785 | /// from this source type with this index. This is needed because |
2786 | /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of |
2787 | /// the first element, and only the target knows which lowering is cheap. |
2788 | virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, |
2789 | unsigned Index) const { |
2790 | return false; |
2791 | } |
2792 | |
2793 | /// Try to convert an extract element of a vector binary operation into an |
2794 | /// extract element followed by a scalar operation. |
2795 | virtual bool shouldScalarizeBinop(SDValue VecOp) const { |
2796 | return false; |
2797 | } |
2798 | |
2799 | /// Return true if extraction of a scalar element from the given vector type |
2800 | /// at the given index is cheap. For example, if scalar operations occur on |
2801 | /// the same register file as vector operations, then an extract element may |
2802 | /// be a sub-register rename rather than an actual instruction. |
2803 | virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const { |
2804 | return false; |
2805 | } |
2806 | |
2807 | /// Try to convert math with an overflow comparison into the corresponding DAG |
2808 | /// node operation. Targets may want to override this independently of whether |
2809 | /// the operation is legal/custom for the given type because it may obscure |
2810 | /// matching of other patterns. |
2811 | virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, |
2812 | bool MathUsed) const { |
2813 | // TODO: The default logic is inherited from code in CodeGenPrepare. |
2814 | // The opcode should not make a difference by default? |
2815 | if (Opcode != ISD::UADDO) |
2816 | return false; |
2817 | |
2818 | // Allow the transform as long as we have an integer type that is not |
2819 | // obviously illegal and unsupported and if the math result is used |
2820 | // besides the overflow check. On some targets (e.g. SPARC), it is |
2821 | // not profitable to form on overflow op if the math result has no |
2822 | // concrete users. |
2823 | if (VT.isVector()) |
2824 | return false; |
2825 | return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT)); |
2826 | } |
2827 | |
2828 | // Return true if it is profitable to use a scalar input to a BUILD_VECTOR |
2829 | // even if the vector itself has multiple uses. |
2830 | virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const { |
2831 | return false; |
2832 | } |
2833 | |
2834 | // Return true if CodeGenPrepare should consider splitting large offset of a |
2835 | // GEP to make the GEP fit into the addressing mode and can be sunk into the |
2836 | // same blocks of its users. |
2837 | virtual bool shouldConsiderGEPOffsetSplit() const { return false; } |
2838 | |
2839 | /// Return true if creating a shift of the type by the given |
2840 | /// amount is not profitable. |
2841 | virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const { |
2842 | return false; |
2843 | } |
2844 | |
2845 | /// Does this target require the clearing of high-order bits in a register |
2846 | /// passed to the fp16 to fp conversion library function. |
2847 | virtual bool shouldKeepZExtForFP16Conv() const { return false; } |
2848 | |
2849 | //===--------------------------------------------------------------------===// |
2850 | // Runtime Library hooks |
2851 | // |
2852 | |
2853 | /// Rename the default libcall routine name for the specified libcall. |
2854 | void setLibcallName(RTLIB::Libcall Call, const char *Name) { |
2855 | LibcallRoutineNames[Call] = Name; |
2856 | } |
2857 | |
2858 | /// Get the libcall routine name for the specified libcall. |
2859 | const char *getLibcallName(RTLIB::Libcall Call) const { |
2860 | return LibcallRoutineNames[Call]; |
2861 | } |
2862 | |
2863 | /// Override the default CondCode to be used to test the result of the |
2864 | /// comparison libcall against zero. |
2865 | void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) { |
2866 | CmpLibcallCCs[Call] = CC; |
2867 | } |
2868 | |
2869 | /// Get the CondCode that's to be used to test the result of the comparison |
2870 | /// libcall against zero. |
2871 | ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const { |
2872 | return CmpLibcallCCs[Call]; |
2873 | } |
2874 | |
2875 | /// Set the CallingConv that should be used for the specified libcall. |
2876 | void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) { |
2877 | LibcallCallingConvs[Call] = CC; |
2878 | } |
2879 | |
2880 | /// Get the CallingConv that should be used for the specified libcall. |
2881 | CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const { |
2882 | return LibcallCallingConvs[Call]; |
2883 | } |
2884 | |
2885 | /// Execute target specific actions to finalize target lowering. |
2886 | /// This is used to set extra flags in MachineFrameInformation and freezing |
2887 | /// the set of reserved registers. |
2888 | /// The default implementation just freezes the set of reserved registers. |
2889 | virtual void finalizeLowering(MachineFunction &MF) const; |
2890 | |
2891 | //===----------------------------------------------------------------------===// |
2892 | // GlobalISel Hooks |
2893 | //===----------------------------------------------------------------------===// |
2894 | /// Check whether or not \p MI needs to be moved close to its uses. |
2895 | virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const; |
2896 | |
2897 | |
2898 | private: |
2899 | const TargetMachine &TM; |
2900 | |
2901 | /// Tells the code generator that the target has multiple (allocatable) |
2902 | /// condition registers that can be used to store the results of comparisons |
2903 | /// for use by selects and conditional branches. With multiple condition |
2904 | /// registers, the code generator will not aggressively sink comparisons into |
2905 | /// the blocks of their users. |
2906 | bool HasMultipleConditionRegisters; |
2907 | |
2908 | /// Tells the code generator that the target has BitExtract instructions. |
2909 | /// The code generator will aggressively sink "shift"s into the blocks of |
2910 | /// their users if the users will generate "and" instructions which can be |
2911 | /// combined with "shift" to BitExtract instructions. |
2912 | bool HasExtractBitsInsn; |
2913 | |
2914 | /// Tells the code generator to bypass slow divide or remainder |
2915 | /// instructions. For example, BypassSlowDivWidths[32,8] tells the code |
2916 | /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer |
2917 | /// div/rem when the operands are positive and less than 256. |
2918 | DenseMap <unsigned int, unsigned int> BypassSlowDivWidths; |
2919 | |
2920 | /// Tells the code generator that it shouldn't generate extra flow control |
2921 | /// instructions and should attempt to combine flow control instructions via |
2922 | /// predication. |
2923 | bool JumpIsExpensive; |
2924 | |
2925 | /// Information about the contents of the high-bits in boolean values held in |
2926 | /// a type wider than i1. See getBooleanContents. |
2927 | BooleanContent BooleanContents; |
2928 | |
2929 | /// Information about the contents of the high-bits in boolean values held in |
2930 | /// a type wider than i1. See getBooleanContents. |
2931 | BooleanContent BooleanFloatContents; |
2932 | |
2933 | /// Information about the contents of the high-bits in boolean vector values |
2934 | /// when the element type is wider than i1. See getBooleanContents. |
2935 | BooleanContent BooleanVectorContents; |
2936 | |
2937 | /// The target scheduling preference: shortest possible total cycles or lowest |
2938 | /// register usage. |
2939 | Sched::Preference SchedPreferenceInfo; |
2940 | |
2941 | /// The minimum alignment that any argument on the stack needs to have. |
2942 | Align MinStackArgumentAlignment; |
2943 | |
2944 | /// The minimum function alignment (used when optimizing for size, and to |
2945 | /// prevent explicitly provided alignment from leading to incorrect code). |
2946 | Align MinFunctionAlignment; |
2947 | |
2948 | /// The preferred function alignment (used when alignment unspecified and |
2949 | /// optimizing for speed). |
2950 | Align PrefFunctionAlignment; |
2951 | |
2952 | /// The preferred loop alignment (in log2 bot in bytes). |
2953 | Align PrefLoopAlignment; |
2954 | |
2955 | /// Size in bits of the maximum atomics size the backend supports. |
2956 | /// Accesses larger than this will be expanded by AtomicExpandPass. |
2957 | unsigned MaxAtomicSizeInBitsSupported; |
2958 | |
2959 | /// Size in bits of the minimum cmpxchg or ll/sc operation the |
2960 | /// backend supports. |
2961 | unsigned MinCmpXchgSizeInBits; |
2962 | |
2963 | /// This indicates if the target supports unaligned atomic operations. |
2964 | bool SupportsUnalignedAtomics; |
2965 | |
2966 | /// If set to a physical register, this specifies the register that |
2967 | /// llvm.savestack/llvm.restorestack should save and restore. |
2968 | Register StackPointerRegisterToSaveRestore; |
2969 | |
2970 | /// This indicates the default register class to use for each ValueType the |
2971 | /// target supports natively. |
2972 | const TargetRegisterClass *RegClassForVT[MVT::VALUETYPE_SIZE]; |
2973 | uint16_t NumRegistersForVT[MVT::VALUETYPE_SIZE]; |
2974 | MVT RegisterTypeForVT[MVT::VALUETYPE_SIZE]; |
2975 | |
2976 | /// This indicates the "representative" register class to use for each |
2977 | /// ValueType the target supports natively. This information is used by the |
2978 | /// scheduler to track register pressure. By default, the representative |
2979 | /// register class is the largest legal super-reg register class of the |
2980 | /// register class of the specified type. e.g. On x86, i8, i16, and i32's |
2981 | /// representative class would be GR32. |
2982 | const TargetRegisterClass *RepRegClassForVT[MVT::VALUETYPE_SIZE]; |
2983 | |
2984 | /// This indicates the "cost" of the "representative" register class for each |
2985 | /// ValueType. The cost is used by the scheduler to approximate register |
2986 | /// pressure. |
2987 | uint8_t RepRegClassCostForVT[MVT::VALUETYPE_SIZE]; |
2988 | |
2989 | /// For any value types we are promoting or expanding, this contains the value |
2990 | /// type that we are changing to. For Expanded types, this contains one step |
2991 | /// of the expand (e.g. i64 -> i32), even if there are multiple steps required |
2992 | /// (e.g. i64 -> i16). For types natively supported by the system, this holds |
2993 | /// the same type (e.g. i32 -> i32). |
2994 | MVT TransformToType[MVT::VALUETYPE_SIZE]; |
2995 | |
2996 | /// For each operation and each value type, keep a LegalizeAction that |
2997 | /// indicates how instruction selection should deal with the operation. Most |
2998 | /// operations are Legal (aka, supported natively by the target), but |
2999 | /// operations that are not should be described. Note that operations on |
3000 | /// non-legal value types are not described here. |
3001 | LegalizeAction OpActions[MVT::VALUETYPE_SIZE][ISD::BUILTIN_OP_END]; |
3002 | |
3003 | /// For each load extension type and each value type, keep a LegalizeAction |
3004 | /// that indicates how instruction selection should deal with a load of a |
3005 | /// specific value type and extension type. Uses 4-bits to store the action |
3006 | /// for each of the 4 load ext types. |
3007 | uint16_t LoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE]; |
3008 | |
3009 | /// For each value type pair keep a LegalizeAction that indicates whether a |
3010 | /// truncating store of a specific value type and truncating type is legal. |
3011 | LegalizeAction TruncStoreActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE]; |
3012 | |
3013 | /// For each indexed mode and each value type, keep a quad of LegalizeAction |
3014 | /// that indicates how instruction selection should deal with the load / |
3015 | /// store / maskedload / maskedstore. |
3016 | /// |
3017 | /// The first dimension is the value_type for the reference. The second |
3018 | /// dimension represents the various modes for load store. |
3019 | uint16_t IndexedModeActions[MVT::VALUETYPE_SIZE][ISD::LAST_INDEXED_MODE]; |
3020 | |
3021 | /// For each condition code (ISD::CondCode) keep a LegalizeAction that |
3022 | /// indicates how instruction selection should deal with the condition code. |
3023 | /// |
3024 | /// Because each CC action takes up 4 bits, we need to have the array size be |
3025 | /// large enough to fit all of the value types. This can be done by rounding |
3026 | /// up the MVT::VALUETYPE_SIZE value to the next multiple of 8. |
3027 | uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::VALUETYPE_SIZE + 7) / 8]; |
3028 | |
3029 | ValueTypeActionImpl ValueTypeActions; |
3030 | |
3031 | private: |
3032 | LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const; |
3033 | |
3034 | /// Targets can specify ISD nodes that they would like PerformDAGCombine |
3035 | /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this |
3036 | /// array. |
3037 | unsigned char |
3038 | TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT8-1)/CHAR_BIT8]; |
3039 | |
3040 | /// For operations that must be promoted to a specific type, this holds the |
3041 | /// destination type. This map should be sparse, so don't hold it as an |
3042 | /// array. |
3043 | /// |
3044 | /// Targets add entries to this map with AddPromotedToType(..), clients access |
3045 | /// this with getTypeToPromoteTo(..). |
3046 | std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType> |
3047 | PromoteToType; |
3048 | |
3049 | /// Stores the name each libcall. |
3050 | const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1]; |
3051 | |
3052 | /// The ISD::CondCode that should be used to test the result of each of the |
3053 | /// comparison libcall against zero. |
3054 | ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL]; |
3055 | |
3056 | /// Stores the CallingConv that should be used for each libcall. |
3057 | CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL]; |
3058 | |
3059 | /// Set default libcall names and calling conventions. |
3060 | void InitLibcalls(const Triple &TT); |
3061 | |
3062 | /// The bits of IndexedModeActions used to store the legalisation actions |
3063 | /// We store the data as | ML | MS | L | S | each taking 4 bits. |
3064 | enum IndexedModeActionsBits { |
3065 | IMAB_Store = 0, |
3066 | IMAB_Load = 4, |
3067 | IMAB_MaskedStore = 8, |
3068 | IMAB_MaskedLoad = 12 |
3069 | }; |
3070 | |
3071 | void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift, |
3072 | LegalizeAction Action) { |
3073 | assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&(static_cast <bool> (VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE && (unsigned)Action < 0xf && "Table isn't big enough!") ? void (0) : __assert_fail ("VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE && (unsigned)Action < 0xf && \"Table isn't big enough!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 3074, __extension__ __PRETTY_FUNCTION__)) |
3074 | (unsigned)Action < 0xf && "Table isn't big enough!")(static_cast <bool> (VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE && (unsigned)Action < 0xf && "Table isn't big enough!") ? void (0) : __assert_fail ("VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE && (unsigned)Action < 0xf && \"Table isn't big enough!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 3074, __extension__ __PRETTY_FUNCTION__)); |
3075 | unsigned Ty = (unsigned)VT.SimpleTy; |
3076 | IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift); |
3077 | IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift; |
3078 | } |
3079 | |
3080 | LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT, |
3081 | unsigned Shift) const { |
3082 | assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&(static_cast <bool> (IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() && "Table isn't big enough!" ) ? void (0) : __assert_fail ("IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() && \"Table isn't big enough!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 3083, __extension__ __PRETTY_FUNCTION__)) |
3083 | "Table isn't big enough!")(static_cast <bool> (IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() && "Table isn't big enough!" ) ? void (0) : __assert_fail ("IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() && \"Table isn't big enough!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 3083, __extension__ __PRETTY_FUNCTION__)); |
3084 | unsigned Ty = (unsigned)VT.SimpleTy; |
3085 | return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf); |
3086 | } |
3087 | |
3088 | protected: |
3089 | /// Return true if the extension represented by \p I is free. |
3090 | /// \pre \p I is a sign, zero, or fp extension and |
3091 | /// is[Z|FP]ExtFree of the related types is not true. |
3092 | virtual bool isExtFreeImpl(const Instruction *I) const { return false; } |
3093 | |
3094 | /// Depth that GatherAllAliases should should continue looking for chain |
3095 | /// dependencies when trying to find a more preferable chain. As an |
3096 | /// approximation, this should be more than the number of consecutive stores |
3097 | /// expected to be merged. |
3098 | unsigned GatherAllAliasesMaxDepth; |
3099 | |
3100 | /// \brief Specify maximum number of store instructions per memset call. |
3101 | /// |
3102 | /// When lowering \@llvm.memset this field specifies the maximum number of |
3103 | /// store operations that may be substituted for the call to memset. Targets |
3104 | /// must set this value based on the cost threshold for that target. Targets |
3105 | /// should assume that the memset will be done using as many of the largest |
3106 | /// store operations first, followed by smaller ones, if necessary, per |
3107 | /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine |
3108 | /// with 16-bit alignment would result in four 2-byte stores and one 1-byte |
3109 | /// store. This only applies to setting a constant array of a constant size. |
3110 | unsigned MaxStoresPerMemset; |
3111 | /// Likewise for functions with the OptSize attribute. |
3112 | unsigned MaxStoresPerMemsetOptSize; |
3113 | |
3114 | /// \brief Specify maximum number of store instructions per memcpy call. |
3115 | /// |
3116 | /// When lowering \@llvm.memcpy this field specifies the maximum number of |
3117 | /// store operations that may be substituted for a call to memcpy. Targets |
3118 | /// must set this value based on the cost threshold for that target. Targets |
3119 | /// should assume that the memcpy will be done using as many of the largest |
3120 | /// store operations first, followed by smaller ones, if necessary, per |
3121 | /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine |
3122 | /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store |
3123 | /// and one 1-byte store. This only applies to copying a constant array of |
3124 | /// constant size. |
3125 | unsigned MaxStoresPerMemcpy; |
3126 | /// Likewise for functions with the OptSize attribute. |
3127 | unsigned MaxStoresPerMemcpyOptSize; |
3128 | /// \brief Specify max number of store instructions to glue in inlined memcpy. |
3129 | /// |
3130 | /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number |
3131 | /// of store instructions to keep together. This helps in pairing and |
3132 | // vectorization later on. |
3133 | unsigned MaxGluedStoresPerMemcpy = 0; |
3134 | |
3135 | /// \brief Specify maximum number of load instructions per memcmp call. |
3136 | /// |
3137 | /// When lowering \@llvm.memcmp this field specifies the maximum number of |
3138 | /// pairs of load operations that may be substituted for a call to memcmp. |
3139 | /// Targets must set this value based on the cost threshold for that target. |
3140 | /// Targets should assume that the memcmp will be done using as many of the |
3141 | /// largest load operations first, followed by smaller ones, if necessary, per |
3142 | /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine |
3143 | /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load |
3144 | /// and one 1-byte load. This only applies to copying a constant array of |
3145 | /// constant size. |
3146 | unsigned MaxLoadsPerMemcmp; |
3147 | /// Likewise for functions with the OptSize attribute. |
3148 | unsigned MaxLoadsPerMemcmpOptSize; |
3149 | |
3150 | /// \brief Specify maximum number of store instructions per memmove call. |
3151 | /// |
3152 | /// When lowering \@llvm.memmove this field specifies the maximum number of |
3153 | /// store instructions that may be substituted for a call to memmove. Targets |
3154 | /// must set this value based on the cost threshold for that target. Targets |
3155 | /// should assume that the memmove will be done using as many of the largest |
3156 | /// store operations first, followed by smaller ones, if necessary, per |
3157 | /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine |
3158 | /// with 8-bit alignment would result in nine 1-byte stores. This only |
3159 | /// applies to copying a constant array of constant size. |
3160 | unsigned MaxStoresPerMemmove; |
3161 | /// Likewise for functions with the OptSize attribute. |
3162 | unsigned MaxStoresPerMemmoveOptSize; |
3163 | |
3164 | /// Tells the code generator that select is more expensive than a branch if |
3165 | /// the branch is usually predicted right. |
3166 | bool PredictableSelectIsExpensive; |
3167 | |
3168 | /// \see enableExtLdPromotion. |
3169 | bool EnableExtLdPromotion; |
3170 | |
3171 | /// Return true if the value types that can be represented by the specified |
3172 | /// register class are all legal. |
3173 | bool isLegalRC(const TargetRegisterInfo &TRI, |
3174 | const TargetRegisterClass &RC) const; |
3175 | |
3176 | /// Replace/modify any TargetFrameIndex operands with a targte-dependent |
3177 | /// sequence of memory operands that is recognized by PrologEpilogInserter. |
3178 | MachineBasicBlock *emitPatchPoint(MachineInstr &MI, |
3179 | MachineBasicBlock *MBB) const; |
3180 | |
3181 | bool IsStrictFPEnabled; |
3182 | }; |
3183 | |
3184 | /// This class defines information used to lower LLVM code to legal SelectionDAG |
3185 | /// operators that the target instruction selector can accept natively. |
3186 | /// |
3187 | /// This class also defines callbacks that targets must implement to lower |
3188 | /// target-specific constructs to SelectionDAG operators. |
3189 | class TargetLowering : public TargetLoweringBase { |
3190 | public: |
3191 | struct DAGCombinerInfo; |
3192 | struct MakeLibCallOptions; |
3193 | |
3194 | TargetLowering(const TargetLowering &) = delete; |
3195 | TargetLowering &operator=(const TargetLowering &) = delete; |
3196 | |
3197 | explicit TargetLowering(const TargetMachine &TM); |
3198 | |
3199 | bool isPositionIndependent() const; |
3200 | |
3201 | virtual bool isSDNodeSourceOfDivergence(const SDNode *N, |
3202 | FunctionLoweringInfo *FLI, |
3203 | LegacyDivergenceAnalysis *DA) const { |
3204 | return false; |
3205 | } |
3206 | |
3207 | virtual bool isSDNodeAlwaysUniform(const SDNode * N) const { |
3208 | return false; |
3209 | } |
3210 | |
3211 | /// Returns true by value, base pointer and offset pointer and addressing mode |
3212 | /// by reference if the node's address can be legally represented as |
3213 | /// pre-indexed load / store address. |
3214 | virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/, |
3215 | SDValue &/*Offset*/, |
3216 | ISD::MemIndexedMode &/*AM*/, |
3217 | SelectionDAG &/*DAG*/) const { |
3218 | return false; |
3219 | } |
3220 | |
3221 | /// Returns true by value, base pointer and offset pointer and addressing mode |
3222 | /// by reference if this node can be combined with a load / store to form a |
3223 | /// post-indexed load / store. |
3224 | virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/, |
3225 | SDValue &/*Base*/, |
3226 | SDValue &/*Offset*/, |
3227 | ISD::MemIndexedMode &/*AM*/, |
3228 | SelectionDAG &/*DAG*/) const { |
3229 | return false; |
3230 | } |
3231 | |
3232 | /// Returns true if the specified base+offset is a legal indexed addressing |
3233 | /// mode for this target. \p MI is the load or store instruction that is being |
3234 | /// considered for transformation. |
3235 | virtual bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset, |
3236 | bool IsPre, MachineRegisterInfo &MRI) const { |
3237 | return false; |
3238 | } |
3239 | |
3240 | /// Return the entry encoding for a jump table in the current function. The |
3241 | /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum. |
3242 | virtual unsigned getJumpTableEncoding() const; |
3243 | |
3244 | virtual const MCExpr * |
3245 | LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/, |
3246 | const MachineBasicBlock * /*MBB*/, unsigned /*uid*/, |
3247 | MCContext &/*Ctx*/) const { |
3248 | llvm_unreachable("Need to implement this hook if target has custom JTIs")::llvm::llvm_unreachable_internal("Need to implement this hook if target has custom JTIs" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 3248); |
3249 | } |
3250 | |
3251 | /// Returns relocation base for the given PIC jumptable. |
3252 | virtual SDValue getPICJumpTableRelocBase(SDValue Table, |
3253 | SelectionDAG &DAG) const; |
3254 | |
3255 | /// This returns the relocation base for the given PIC jumptable, the same as |
3256 | /// getPICJumpTableRelocBase, but as an MCExpr. |
3257 | virtual const MCExpr * |
3258 | getPICJumpTableRelocBaseExpr(const MachineFunction *MF, |
3259 | unsigned JTI, MCContext &Ctx) const; |
3260 | |
3261 | /// Return true if folding a constant offset with the given GlobalAddress is |
3262 | /// legal. It is frequently not legal in PIC relocation models. |
3263 | virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; |
3264 | |
3265 | bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, |
3266 | SDValue &Chain) const; |
3267 | |
3268 | void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, |
3269 | SDValue &NewRHS, ISD::CondCode &CCCode, |
3270 | const SDLoc &DL, const SDValue OldLHS, |
3271 | const SDValue OldRHS) const; |
3272 | |
3273 | void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, |
3274 | SDValue &NewRHS, ISD::CondCode &CCCode, |
3275 | const SDLoc &DL, const SDValue OldLHS, |
3276 | const SDValue OldRHS, SDValue &Chain, |
3277 | bool IsSignaling = false) const; |
3278 | |
3279 | /// Returns a pair of (return value, chain). |
3280 | /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC. |
3281 | std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, |
3282 | EVT RetVT, ArrayRef<SDValue> Ops, |
3283 | MakeLibCallOptions CallOptions, |
3284 | const SDLoc &dl, |
3285 | SDValue Chain = SDValue()) const; |
3286 | |
3287 | /// Check whether parameters to a call that are passed in callee saved |
3288 | /// registers are the same as from the calling function. This needs to be |
3289 | /// checked for tail call eligibility. |
3290 | bool parametersInCSRMatch(const MachineRegisterInfo &MRI, |
3291 | const uint32_t *CallerPreservedMask, |
3292 | const SmallVectorImpl<CCValAssign> &ArgLocs, |
3293 | const SmallVectorImpl<SDValue> &OutVals) const; |
3294 | |
3295 | //===--------------------------------------------------------------------===// |
3296 | // TargetLowering Optimization Methods |
3297 | // |
3298 | |
3299 | /// A convenience struct that encapsulates a DAG, and two SDValues for |
3300 | /// returning information from TargetLowering to its clients that want to |
3301 | /// combine. |
3302 | struct TargetLoweringOpt { |
3303 | SelectionDAG &DAG; |
3304 | bool LegalTys; |
3305 | bool LegalOps; |
3306 | SDValue Old; |
3307 | SDValue New; |
3308 | |
3309 | explicit TargetLoweringOpt(SelectionDAG &InDAG, |
3310 | bool LT, bool LO) : |
3311 | DAG(InDAG), LegalTys(LT), LegalOps(LO) {} |
3312 | |
3313 | bool LegalTypes() const { return LegalTys; } |
3314 | bool LegalOperations() const { return LegalOps; } |
3315 | |
3316 | bool CombineTo(SDValue O, SDValue N) { |
3317 | Old = O; |
3318 | New = N; |
3319 | return true; |
3320 | } |
3321 | }; |
3322 | |
3323 | /// Determines the optimal series of memory ops to replace the memset / memcpy. |
3324 | /// Return true if the number of memory ops is below the threshold (Limit). |
3325 | /// It returns the types of the sequence of memory ops to perform |
3326 | /// memset / memcpy by reference. |
3327 | bool findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit, |
3328 | const MemOp &Op, unsigned DstAS, unsigned SrcAS, |
3329 | const AttributeList &FuncAttributes) const; |
3330 | |
3331 | /// Check to see if the specified operand of the specified instruction is a |
3332 | /// constant integer. If so, check to see if there are any bits set in the |
3333 | /// constant that are not demanded. If so, shrink the constant and return |
3334 | /// true. |
3335 | bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, |
3336 | const APInt &DemandedElts, |
3337 | TargetLoweringOpt &TLO) const; |
3338 | |
3339 | /// Helper wrapper around ShrinkDemandedConstant, demanding all elements. |
3340 | bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, |
3341 | TargetLoweringOpt &TLO) const; |
3342 | |
3343 | // Target hook to do target-specific const optimization, which is called by |
3344 | // ShrinkDemandedConstant. This function should return true if the target |
3345 | // doesn't want ShrinkDemandedConstant to further optimize the constant. |
3346 | virtual bool targetShrinkDemandedConstant(SDValue Op, |
3347 | const APInt &DemandedBits, |
3348 | const APInt &DemandedElts, |
3349 | TargetLoweringOpt &TLO) const { |
3350 | return false; |
3351 | } |
3352 | |
3353 | /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This |
3354 | /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be |
3355 | /// generalized for targets with other types of implicit widening casts. |
3356 | bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded, |
3357 | TargetLoweringOpt &TLO) const; |
3358 | |
3359 | /// Look at Op. At this point, we know that only the DemandedBits bits of the |
3360 | /// result of Op are ever used downstream. If we can use this information to |
3361 | /// simplify Op, create a new simplified DAG node and return true, returning |
3362 | /// the original and new nodes in Old and New. Otherwise, analyze the |
3363 | /// expression and return a mask of KnownOne and KnownZero bits for the |
3364 | /// expression (used to simplify the caller). The KnownZero/One bits may only |
3365 | /// be accurate for those bits in the Demanded masks. |
3366 | /// \p AssumeSingleUse When this parameter is true, this function will |
3367 | /// attempt to simplify \p Op even if there are multiple uses. |
3368 | /// Callers are responsible for correctly updating the DAG based on the |
3369 | /// results of this function, because simply replacing replacing TLO.Old |
3370 | /// with TLO.New will be incorrect when this parameter is true and TLO.Old |
3371 | /// has multiple uses. |
3372 | bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, |
3373 | const APInt &DemandedElts, KnownBits &Known, |
3374 | TargetLoweringOpt &TLO, unsigned Depth = 0, |
3375 | bool AssumeSingleUse = false) const; |
3376 | |
3377 | /// Helper wrapper around SimplifyDemandedBits, demanding all elements. |
3378 | /// Adds Op back to the worklist upon success. |
3379 | bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, |
3380 | KnownBits &Known, TargetLoweringOpt &TLO, |
3381 | unsigned Depth = 0, |
3382 | bool AssumeSingleUse = false) const; |
3383 | |
3384 | /// Helper wrapper around SimplifyDemandedBits. |
3385 | /// Adds Op back to the worklist upon success. |
3386 | bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, |
3387 | DAGCombinerInfo &DCI) const; |
3388 | |
3389 | /// More limited version of SimplifyDemandedBits that can be used to "look |
3390 | /// through" ops that don't contribute to the DemandedBits/DemandedElts - |
3391 | /// bitwise ops etc. |
3392 | SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, |
3393 | const APInt &DemandedElts, |
3394 | SelectionDAG &DAG, |
3395 | unsigned Depth) const; |
3396 | |
3397 | /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all |
3398 | /// elements. |
3399 | SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, |
3400 | SelectionDAG &DAG, |
3401 | unsigned Depth = 0) const; |
3402 | |
3403 | /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all |
3404 | /// bits from only some vector elements. |
3405 | SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op, |
3406 | const APInt &DemandedElts, |
3407 | SelectionDAG &DAG, |
3408 | unsigned Depth = 0) const; |
3409 | |
3410 | /// Look at Vector Op. At this point, we know that only the DemandedElts |
3411 | /// elements of the result of Op are ever used downstream. If we can use |
3412 | /// this information to simplify Op, create a new simplified DAG node and |
3413 | /// return true, storing the original and new nodes in TLO. |
3414 | /// Otherwise, analyze the expression and return a mask of KnownUndef and |
3415 | /// KnownZero elements for the expression (used to simplify the caller). |
3416 | /// The KnownUndef/Zero elements may only be accurate for those bits |
3417 | /// in the DemandedMask. |
3418 | /// \p AssumeSingleUse When this parameter is true, this function will |
3419 | /// attempt to simplify \p Op even if there are multiple uses. |
3420 | /// Callers are responsible for correctly updating the DAG based on the |
3421 | /// results of this function, because simply replacing replacing TLO.Old |
3422 | /// with TLO.New will be incorrect when this parameter is true and TLO.Old |
3423 | /// has multiple uses. |
3424 | bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, |
3425 | APInt &KnownUndef, APInt &KnownZero, |
3426 | TargetLoweringOpt &TLO, unsigned Depth = 0, |
3427 | bool AssumeSingleUse = false) const; |
3428 | |
3429 | /// Helper wrapper around SimplifyDemandedVectorElts. |
3430 | /// Adds Op back to the worklist upon success. |
3431 | bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts, |
3432 | APInt &KnownUndef, APInt &KnownZero, |
3433 | DAGCombinerInfo &DCI) const; |
3434 | |
3435 | /// Determine which of the bits specified in Mask are known to be either zero |
3436 | /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts |
3437 | /// argument allows us to only collect the known bits that are shared by the |
3438 | /// requested vector elements. |
3439 | virtual void computeKnownBitsForTargetNode(const SDValue Op, |
3440 | KnownBits &Known, |
3441 | const APInt &DemandedElts, |
3442 | const SelectionDAG &DAG, |
3443 | unsigned Depth = 0) const; |
3444 | |
3445 | /// Determine which of the bits specified in Mask are known to be either zero |
3446 | /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts |
3447 | /// argument allows us to only collect the known bits that are shared by the |
3448 | /// requested vector elements. This is for GISel. |
3449 | virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis, |
3450 | Register R, KnownBits &Known, |
3451 | const APInt &DemandedElts, |
3452 | const MachineRegisterInfo &MRI, |
3453 | unsigned Depth = 0) const; |
3454 | |
3455 | /// Determine the known alignment for the pointer value \p R. This is can |
3456 | /// typically be inferred from the number of low known 0 bits. However, for a |
3457 | /// pointer with a non-integral address space, the alignment value may be |
3458 | /// independent from the known low bits. |
3459 | virtual Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis, |
3460 | Register R, |
3461 | const MachineRegisterInfo &MRI, |
3462 | unsigned Depth = 0) const; |
3463 | |
3464 | /// Determine which of the bits of FrameIndex \p FIOp are known to be 0. |
3465 | /// Default implementation computes low bits based on alignment |
3466 | /// information. This should preserve known bits passed into it. |
3467 | virtual void computeKnownBitsForFrameIndex(int FIOp, |
3468 | KnownBits &Known, |
3469 | const MachineFunction &MF) const; |
3470 | |
3471 | /// This method can be implemented by targets that want to expose additional |
3472 | /// information about sign bits to the DAG Combiner. The DemandedElts |
3473 | /// argument allows us to only collect the minimum sign bits that are shared |
3474 | /// by the requested vector elements. |
3475 | virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, |
3476 | const APInt &DemandedElts, |
3477 | const SelectionDAG &DAG, |
3478 | unsigned Depth = 0) const; |
3479 | |
3480 | /// This method can be implemented by targets that want to expose additional |
3481 | /// information about sign bits to GlobalISel combiners. The DemandedElts |
3482 | /// argument allows us to only collect the minimum sign bits that are shared |
3483 | /// by the requested vector elements. |
3484 | virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis, |
3485 | Register R, |
3486 | const APInt &DemandedElts, |
3487 | const MachineRegisterInfo &MRI, |
3488 | unsigned Depth = 0) const; |
3489 | |
3490 | /// Attempt to simplify any target nodes based on the demanded vector |
3491 | /// elements, returning true on success. Otherwise, analyze the expression and |
3492 | /// return a mask of KnownUndef and KnownZero elements for the expression |
3493 | /// (used to simplify the caller). The KnownUndef/Zero elements may only be |
3494 | /// accurate for those bits in the DemandedMask. |
3495 | virtual bool SimplifyDemandedVectorEltsForTargetNode( |
3496 | SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, |
3497 | APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const; |
3498 | |
3499 | /// Attempt to simplify any target nodes based on the demanded bits/elts, |
3500 | /// returning true on success. Otherwise, analyze the |
3501 | /// expression and return a mask of KnownOne and KnownZero bits for the |
3502 | /// expression (used to simplify the caller). The KnownZero/One bits may only |
3503 | /// be accurate for those bits in the Demanded masks. |
3504 | virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, |
3505 | const APInt &DemandedBits, |
3506 | const APInt &DemandedElts, |
3507 | KnownBits &Known, |
3508 | TargetLoweringOpt &TLO, |
3509 | unsigned Depth = 0) const; |
3510 | |
3511 | /// More limited version of SimplifyDemandedBits that can be used to "look |
3512 | /// through" ops that don't contribute to the DemandedBits/DemandedElts - |
3513 | /// bitwise ops etc. |
3514 | virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode( |
3515 | SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, |
3516 | SelectionDAG &DAG, unsigned Depth) const; |
3517 | |
3518 | /// Return true if this function can prove that \p Op is never poison |
3519 | /// and, if \p PoisonOnly is false, does not have undef bits. The DemandedElts |
3520 | /// argument limits the check to the requested vector elements. |
3521 | virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode( |
3522 | SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, |
3523 | bool PoisonOnly, unsigned Depth) const; |
3524 | |
3525 | /// Tries to build a legal vector shuffle using the provided parameters |
3526 | /// or equivalent variations. The Mask argument maybe be modified as the |
3527 | /// function tries different variations. |
3528 | /// Returns an empty SDValue if the operation fails. |
3529 | SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, |
3530 | SDValue N1, MutableArrayRef<int> Mask, |
3531 | SelectionDAG &DAG) const; |
3532 | |
3533 | /// This method returns the constant pool value that will be loaded by LD. |
3534 | /// NOTE: You must check for implicit extensions of the constant by LD. |
3535 | virtual const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const; |
3536 | |
3537 | /// If \p SNaN is false, \returns true if \p Op is known to never be any |
3538 | /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling |
3539 | /// NaN. |
3540 | virtual bool isKnownNeverNaNForTargetNode(SDValue Op, |
3541 | const SelectionDAG &DAG, |
3542 | bool SNaN = false, |
3543 | unsigned Depth = 0) const; |
3544 | struct DAGCombinerInfo { |
3545 | void *DC; // The DAG Combiner object. |
3546 | CombineLevel Level; |
3547 | bool CalledByLegalizer; |
3548 | |
3549 | public: |
3550 | SelectionDAG &DAG; |
3551 | |
3552 | DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc) |
3553 | : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {} |
3554 | |
3555 | bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; } |
3556 | bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; } |
3557 | bool isAfterLegalizeDAG() const { return Level >= AfterLegalizeDAG; } |
3558 | CombineLevel getDAGCombineLevel() { return Level; } |
3559 | bool isCalledByLegalizer() const { return CalledByLegalizer; } |
3560 | |
3561 | void AddToWorklist(SDNode *N); |
3562 | SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true); |
3563 | SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true); |
3564 | SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true); |
3565 | |
3566 | bool recursivelyDeleteUnusedNodes(SDNode *N); |
3567 | |
3568 | void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO); |
3569 | }; |
3570 | |
3571 | /// Return if the N is a constant or constant vector equal to the true value |
3572 | /// from getBooleanContents(). |
3573 | bool isConstTrueVal(const SDNode *N) const; |
3574 | |
3575 | /// Return if the N is a constant or constant vector equal to the false value |
3576 | /// from getBooleanContents(). |
3577 | bool isConstFalseVal(const SDNode *N) const; |
3578 | |
3579 | /// Return if \p N is a True value when extended to \p VT. |
3580 | bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const; |
3581 | |
3582 | /// Try to simplify a setcc built with the specified operands and cc. If it is |
3583 | /// unable to simplify it, return a null SDValue. |
3584 | SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, |
3585 | bool foldBooleans, DAGCombinerInfo &DCI, |
3586 | const SDLoc &dl) const; |
3587 | |
3588 | // For targets which wrap address, unwrap for analysis. |
3589 | virtual SDValue unwrapAddress(SDValue N) const { return N; } |
3590 | |
3591 | /// Returns true (and the GlobalValue and the offset) if the node is a |
3592 | /// GlobalAddress + offset. |
3593 | virtual bool |
3594 | isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; |
3595 | |
3596 | /// This method will be invoked for all target nodes and for any |
3597 | /// target-independent nodes that the target has registered with invoke it |
3598 | /// for. |
3599 | /// |
3600 | /// The semantics are as follows: |
3601 | /// Return Value: |
3602 | /// SDValue.Val == 0 - No change was made |
3603 | /// SDValue.Val == N - N was replaced, is dead, and is already handled. |
3604 | /// otherwise - N should be replaced by the returned Operand. |
3605 | /// |
3606 | /// In addition, methods provided by DAGCombinerInfo may be used to perform |
3607 | /// more complex transformations. |
3608 | /// |
3609 | virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; |
3610 | |
3611 | /// Return true if it is profitable to move this shift by a constant amount |
3612 | /// though its operand, adjusting any immediate operands as necessary to |
3613 | /// preserve semantics. This transformation may not be desirable if it |
3614 | /// disrupts a particularly auspicious target-specific tree (e.g. bitfield |
3615 | /// extraction in AArch64). By default, it returns true. |
3616 | /// |
3617 | /// @param N the shift node |
3618 | /// @param Level the current DAGCombine legalization level. |
3619 | virtual bool isDesirableToCommuteWithShift(const SDNode *N, |
3620 | CombineLevel Level) const { |
3621 | return true; |
3622 | } |
3623 | |
3624 | /// Return true if the target has native support for the specified value type |
3625 | /// and it is 'desirable' to use the type for the given node type. e.g. On x86 |
3626 | /// i16 is legal, but undesirable since i16 instruction encodings are longer |
3627 | /// and some i16 instructions are slow. |
3628 | virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const { |
3629 | // By default, assume all legal types are desirable. |
3630 | return isTypeLegal(VT); |
3631 | } |
3632 | |
3633 | /// Return true if it is profitable for dag combiner to transform a floating |
3634 | /// point op of specified opcode to a equivalent op of an integer |
3635 | /// type. e.g. f32 load -> i32 load can be profitable on ARM. |
3636 | virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/, |
3637 | EVT /*VT*/) const { |
3638 | return false; |
3639 | } |
3640 | |
3641 | /// This method query the target whether it is beneficial for dag combiner to |
3642 | /// promote the specified node. If true, it should return the desired |
3643 | /// promotion type by reference. |
3644 | virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const { |
3645 | return false; |
3646 | } |
3647 | |
3648 | /// Return true if the target supports swifterror attribute. It optimizes |
3649 | /// loads and stores to reading and writing a specific register. |
3650 | virtual bool supportSwiftError() const { |
3651 | return false; |
3652 | } |
3653 | |
3654 | /// Return true if the target supports that a subset of CSRs for the given |
3655 | /// machine function is handled explicitly via copies. |
3656 | virtual bool supportSplitCSR(MachineFunction *MF) const { |
3657 | return false; |
3658 | } |
3659 | |
3660 | /// Perform necessary initialization to handle a subset of CSRs explicitly |
3661 | /// via copies. This function is called at the beginning of instruction |
3662 | /// selection. |
3663 | virtual void initializeSplitCSR(MachineBasicBlock *Entry) const { |
3664 | llvm_unreachable("Not Implemented")::llvm::llvm_unreachable_internal("Not Implemented", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 3664); |
3665 | } |
3666 | |
3667 | /// Insert explicit copies in entry and exit blocks. We copy a subset of |
3668 | /// CSRs to virtual registers in the entry block, and copy them back to |
3669 | /// physical registers in the exit blocks. This function is called at the end |
3670 | /// of instruction selection. |
3671 | virtual void insertCopiesSplitCSR( |
3672 | MachineBasicBlock *Entry, |
3673 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const { |
3674 | llvm_unreachable("Not Implemented")::llvm::llvm_unreachable_internal("Not Implemented", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 3674); |
3675 | } |
3676 | |
3677 | /// Return the newly negated expression if the cost is not expensive and |
3678 | /// set the cost in \p Cost to indicate that if it is cheaper or neutral to |
3679 | /// do the negation. |
3680 | virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, |
3681 | bool LegalOps, bool OptForSize, |
3682 | NegatibleCost &Cost, |
3683 | unsigned Depth = 0) const; |
3684 | |
3685 | /// This is the helper function to return the newly negated expression only |
3686 | /// when the cost is cheaper. |
3687 | SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG, |
3688 | bool LegalOps, bool OptForSize, |
3689 | unsigned Depth = 0) const { |
3690 | NegatibleCost Cost = NegatibleCost::Expensive; |
3691 | SDValue Neg = |
3692 | getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth); |
3693 | if (Neg && Cost == NegatibleCost::Cheaper) |
3694 | return Neg; |
3695 | // Remove the new created node to avoid the side effect to the DAG. |
3696 | if (Neg && Neg.getNode()->use_empty()) |
3697 | DAG.RemoveDeadNode(Neg.getNode()); |
3698 | return SDValue(); |
3699 | } |
3700 | |
3701 | /// This is the helper function to return the newly negated expression if |
3702 | /// the cost is not expensive. |
3703 | SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, |
3704 | bool OptForSize, unsigned Depth = 0) const { |
3705 | NegatibleCost Cost = NegatibleCost::Expensive; |
3706 | return getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth); |
3707 | } |
3708 | |
3709 | //===--------------------------------------------------------------------===// |
3710 | // Lowering methods - These methods must be implemented by targets so that |
3711 | // the SelectionDAGBuilder code knows how to lower these. |
3712 | // |
3713 | |
3714 | /// Target-specific splitting of values into parts that fit a register |
3715 | /// storing a legal type |
3716 | virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, |
3717 | SDValue Val, SDValue *Parts, |
3718 | unsigned NumParts, MVT PartVT, |
3719 | Optional<CallingConv::ID> CC) const { |
3720 | return false; |
3721 | } |
3722 | |
3723 | /// Target-specific combining of register parts into its original value |
3724 | virtual SDValue |
3725 | joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, |
3726 | const SDValue *Parts, unsigned NumParts, |
3727 | MVT PartVT, EVT ValueVT, |
3728 | Optional<CallingConv::ID> CC) const { |
3729 | return SDValue(); |
3730 | } |
3731 | |
3732 | /// This hook must be implemented to lower the incoming (formal) arguments, |
3733 | /// described by the Ins array, into the specified DAG. The implementation |
3734 | /// should fill in the InVals array with legal-type argument values, and |
3735 | /// return the resulting token chain value. |
3736 | virtual SDValue LowerFormalArguments( |
3737 | SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/, |
3738 | const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/, |
3739 | SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const { |
3740 | llvm_unreachable("Not Implemented")::llvm::llvm_unreachable_internal("Not Implemented", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 3740); |
3741 | } |
3742 | |
3743 | /// This structure contains all information that is necessary for lowering |
3744 | /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder |
3745 | /// needs to lower a call, and targets will see this struct in their LowerCall |
3746 | /// implementation. |
3747 | struct CallLoweringInfo { |
3748 | SDValue Chain; |
3749 | Type *RetTy = nullptr; |
3750 | bool RetSExt : 1; |
3751 | bool RetZExt : 1; |
3752 | bool IsVarArg : 1; |
3753 | bool IsInReg : 1; |
3754 | bool DoesNotReturn : 1; |
3755 | bool IsReturnValueUsed : 1; |
3756 | bool IsConvergent : 1; |
3757 | bool IsPatchPoint : 1; |
3758 | bool IsPreallocated : 1; |
3759 | bool NoMerge : 1; |
3760 | |
3761 | // IsTailCall should be modified by implementations of |
3762 | // TargetLowering::LowerCall that perform tail call conversions. |
3763 | bool IsTailCall = false; |
3764 | |
3765 | // Is Call lowering done post SelectionDAG type legalization. |
3766 | bool IsPostTypeLegalization = false; |
3767 | |
3768 | unsigned NumFixedArgs = -1; |
3769 | CallingConv::ID CallConv = CallingConv::C; |
3770 | SDValue Callee; |
3771 | ArgListTy Args; |
3772 | SelectionDAG &DAG; |
3773 | SDLoc DL; |
3774 | const CallBase *CB = nullptr; |
3775 | SmallVector<ISD::OutputArg, 32> Outs; |
3776 | SmallVector<SDValue, 32> OutVals; |
3777 | SmallVector<ISD::InputArg, 32> Ins; |
3778 | SmallVector<SDValue, 4> InVals; |
3779 | |
3780 | CallLoweringInfo(SelectionDAG &DAG) |
3781 | : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false), |
3782 | DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false), |
3783 | IsPatchPoint(false), IsPreallocated(false), NoMerge(false), |
3784 | DAG(DAG) {} |
3785 | |
3786 | CallLoweringInfo &setDebugLoc(const SDLoc &dl) { |
3787 | DL = dl; |
3788 | return *this; |
3789 | } |
3790 | |
3791 | CallLoweringInfo &setChain(SDValue InChain) { |
3792 | Chain = InChain; |
3793 | return *this; |
3794 | } |
3795 | |
3796 | // setCallee with target/module-specific attributes |
3797 | CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType, |
3798 | SDValue Target, ArgListTy &&ArgsList) { |
3799 | RetTy = ResultType; |
3800 | Callee = Target; |
3801 | CallConv = CC; |
3802 | NumFixedArgs = ArgsList.size(); |
3803 | Args = std::move(ArgsList); |
3804 | |
3805 | DAG.getTargetLoweringInfo().markLibCallAttributes( |
3806 | &(DAG.getMachineFunction()), CC, Args); |
3807 | return *this; |
3808 | } |
3809 | |
3810 | CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType, |
3811 | SDValue Target, ArgListTy &&ArgsList) { |
3812 | RetTy = ResultType; |
3813 | Callee = Target; |
3814 | CallConv = CC; |
3815 | NumFixedArgs = ArgsList.size(); |
3816 | Args = std::move(ArgsList); |
3817 | return *this; |
3818 | } |
3819 | |
3820 | CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy, |
3821 | SDValue Target, ArgListTy &&ArgsList, |
3822 | const CallBase &Call) { |
3823 | RetTy = ResultType; |
3824 | |
3825 | IsInReg = Call.hasRetAttr(Attribute::InReg); |
3826 | DoesNotReturn = |
3827 | Call.doesNotReturn() || |
3828 | (!isa<InvokeInst>(Call) && isa<UnreachableInst>(Call.getNextNode())); |
3829 | IsVarArg = FTy->isVarArg(); |
3830 | IsReturnValueUsed = !Call.use_empty(); |
3831 | RetSExt = Call.hasRetAttr(Attribute::SExt); |
3832 | RetZExt = Call.hasRetAttr(Attribute::ZExt); |
3833 | NoMerge = Call.hasFnAttr(Attribute::NoMerge); |
3834 | |
3835 | Callee = Target; |
3836 | |
3837 | CallConv = Call.getCallingConv(); |
3838 | NumFixedArgs = FTy->getNumParams(); |
3839 | Args = std::move(ArgsList); |
3840 | |
3841 | CB = &Call; |
3842 | |
3843 | return *this; |
3844 | } |
3845 | |
3846 | CallLoweringInfo &setInRegister(bool Value = true) { |
3847 | IsInReg = Value; |
3848 | return *this; |
3849 | } |
3850 | |
3851 | CallLoweringInfo &setNoReturn(bool Value = true) { |
3852 | DoesNotReturn = Value; |
3853 | return *this; |
3854 | } |
3855 | |
3856 | CallLoweringInfo &setVarArg(bool Value = true) { |
3857 | IsVarArg = Value; |
3858 | return *this; |
3859 | } |
3860 | |
3861 | CallLoweringInfo &setTailCall(bool Value = true) { |
3862 | IsTailCall = Value; |
3863 | return *this; |
3864 | } |
3865 | |
3866 | CallLoweringInfo &setDiscardResult(bool Value = true) { |
3867 | IsReturnValueUsed = !Value; |
3868 | return *this; |
3869 | } |
3870 | |
3871 | CallLoweringInfo &setConvergent(bool Value = true) { |
3872 | IsConvergent = Value; |
3873 | return *this; |
3874 | } |
3875 | |
3876 | CallLoweringInfo &setSExtResult(bool Value = true) { |
3877 | RetSExt = Value; |
3878 | return *this; |
3879 | } |
3880 | |
3881 | CallLoweringInfo &setZExtResult(bool Value = true) { |
3882 | RetZExt = Value; |
3883 | return *this; |
3884 | } |
3885 | |
3886 | CallLoweringInfo &setIsPatchPoint(bool Value = true) { |
3887 | IsPatchPoint = Value; |
3888 | return *this; |
3889 | } |
3890 | |
3891 | CallLoweringInfo &setIsPreallocated(bool Value = true) { |
3892 | IsPreallocated = Value; |
3893 | return *this; |
3894 | } |
3895 | |
3896 | CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) { |
3897 | IsPostTypeLegalization = Value; |
3898 | return *this; |
3899 | } |
3900 | |
3901 | ArgListTy &getArgs() { |
3902 | return Args; |
3903 | } |
3904 | }; |
3905 | |
3906 | /// This structure is used to pass arguments to makeLibCall function. |
3907 | struct MakeLibCallOptions { |
3908 | // By passing type list before soften to makeLibCall, the target hook |
3909 | // shouldExtendTypeInLibCall can get the original type before soften. |
3910 | ArrayRef<EVT> OpsVTBeforeSoften; |
3911 | EVT RetVTBeforeSoften; |
3912 | bool IsSExt : 1; |
3913 | bool DoesNotReturn : 1; |
3914 | bool IsReturnValueUsed : 1; |
3915 | bool IsPostTypeLegalization : 1; |
3916 | bool IsSoften : 1; |
3917 | |
3918 | MakeLibCallOptions() |
3919 | : IsSExt(false), DoesNotReturn(false), IsReturnValueUsed(true), |
3920 | IsPostTypeLegalization(false), IsSoften(false) {} |
3921 | |
3922 | MakeLibCallOptions &setSExt(bool Value = true) { |
3923 | IsSExt = Value; |
3924 | return *this; |
3925 | } |
3926 | |
3927 | MakeLibCallOptions &setNoReturn(bool Value = true) { |
3928 | DoesNotReturn = Value; |
3929 | return *this; |
3930 | } |
3931 | |
3932 | MakeLibCallOptions &setDiscardResult(bool Value = true) { |
3933 | IsReturnValueUsed = !Value; |
3934 | return *this; |
3935 | } |
3936 | |
3937 | MakeLibCallOptions &setIsPostTypeLegalization(bool Value = true) { |
3938 | IsPostTypeLegalization = Value; |
3939 | return *this; |
3940 | } |
3941 | |
3942 | MakeLibCallOptions &setTypeListBeforeSoften(ArrayRef<EVT> OpsVT, EVT RetVT, |
3943 | bool Value = true) { |
3944 | OpsVTBeforeSoften = OpsVT; |
3945 | RetVTBeforeSoften = RetVT; |
3946 | IsSoften = Value; |
3947 | return *this; |
3948 | } |
3949 | }; |
3950 | |
3951 | /// This function lowers an abstract call to a function into an actual call. |
3952 | /// This returns a pair of operands. The first element is the return value |
3953 | /// for the function (if RetTy is not VoidTy). The second element is the |
3954 | /// outgoing token chain. It calls LowerCall to do the actual lowering. |
3955 | std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const; |
3956 | |
3957 | /// This hook must be implemented to lower calls into the specified |
3958 | /// DAG. The outgoing arguments to the call are described by the Outs array, |
3959 | /// and the values to be returned by the call are described by the Ins |
3960 | /// array. The implementation should fill in the InVals array with legal-type |
3961 | /// return values from the call, and return the resulting token chain value. |
3962 | virtual SDValue |
3963 | LowerCall(CallLoweringInfo &/*CLI*/, |
3964 | SmallVectorImpl<SDValue> &/*InVals*/) const { |
3965 | llvm_unreachable("Not Implemented")::llvm::llvm_unreachable_internal("Not Implemented", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 3965); |
3966 | } |
3967 | |
3968 | /// Target-specific cleanup for formal ByVal parameters. |
3969 | virtual void HandleByVal(CCState *, unsigned &, Align) const {} |
3970 | |
3971 | /// This hook should be implemented to check whether the return values |
3972 | /// described by the Outs array can fit into the return registers. If false |
3973 | /// is returned, an sret-demotion is performed. |
3974 | virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/, |
3975 | MachineFunction &/*MF*/, bool /*isVarArg*/, |
3976 | const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, |
3977 | LLVMContext &/*Context*/) const |
3978 | { |
3979 | // Return true by default to get preexisting behavior. |
3980 | return true; |
3981 | } |
3982 | |
3983 | /// This hook must be implemented to lower outgoing return values, described |
3984 | /// by the Outs array, into the specified DAG. The implementation should |
3985 | /// return the resulting token chain value. |
3986 | virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/, |
3987 | bool /*isVarArg*/, |
3988 | const SmallVectorImpl<ISD::OutputArg> & /*Outs*/, |
3989 | const SmallVectorImpl<SDValue> & /*OutVals*/, |
3990 | const SDLoc & /*dl*/, |
3991 | SelectionDAG & /*DAG*/) const { |
3992 | llvm_unreachable("Not Implemented")::llvm::llvm_unreachable_internal("Not Implemented", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 3992); |
3993 | } |
3994 | |
3995 | /// Return true if result of the specified node is used by a return node |
3996 | /// only. It also compute and return the input chain for the tail call. |
3997 | /// |
3998 | /// This is used to determine whether it is possible to codegen a libcall as |
3999 | /// tail call at legalization time. |
4000 | virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const { |
4001 | return false; |
4002 | } |
4003 | |
4004 | /// Return true if the target may be able emit the call instruction as a tail |
4005 | /// call. This is used by optimization passes to determine if it's profitable |
4006 | /// to duplicate return instructions to enable tailcall optimization. |
4007 | virtual bool mayBeEmittedAsTailCall(const CallInst *) const { |
4008 | return false; |
4009 | } |
4010 | |
4011 | /// Return the builtin name for the __builtin___clear_cache intrinsic |
4012 | /// Default is to invoke the clear cache library call |
4013 | virtual const char * getClearCacheBuiltinName() const { |
4014 | return "__clear_cache"; |
4015 | } |
4016 | |
4017 | /// Return the register ID of the name passed in. Used by named register |
4018 | /// global variables extension. There is no target-independent behaviour |
4019 | /// so the default action is to bail. |
4020 | virtual Register getRegisterByName(const char* RegName, LLT Ty, |
4021 | const MachineFunction &MF) const { |
4022 | report_fatal_error("Named registers not implemented for this target"); |
4023 | } |
4024 | |
4025 | /// Return the type that should be used to zero or sign extend a |
4026 | /// zeroext/signext integer return value. FIXME: Some C calling conventions |
4027 | /// require the return type to be promoted, but this is not true all the time, |
4028 | /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling |
4029 | /// conventions. The frontend should handle this and include all of the |
4030 | /// necessary information. |
4031 | virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, |
4032 | ISD::NodeType /*ExtendKind*/) const { |
4033 | EVT MinVT = getRegisterType(Context, MVT::i32); |
4034 | return VT.bitsLT(MinVT) ? MinVT : VT; |
4035 | } |
4036 | |
4037 | /// For some targets, an LLVM struct type must be broken down into multiple |
4038 | /// simple types, but the calling convention specifies that the entire struct |
4039 | /// must be passed in a block of consecutive registers. |
4040 | virtual bool |
4041 | functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, |
4042 | bool isVarArg, |
4043 | const DataLayout &DL) const { |
4044 | return false; |
4045 | } |
4046 | |
4047 | /// For most targets, an LLVM type must be broken down into multiple |
4048 | /// smaller types. Usually the halves are ordered according to the endianness |
4049 | /// but for some platform that would break. So this method will default to |
4050 | /// matching the endianness but can be overridden. |
4051 | virtual bool |
4052 | shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout &DL) const { |
4053 | return DL.isLittleEndian(); |
4054 | } |
4055 | |
4056 | /// Returns a 0 terminated array of registers that can be safely used as |
4057 | /// scratch registers. |
4058 | virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const { |
4059 | return nullptr; |
4060 | } |
4061 | |
4062 | /// This callback is used to prepare for a volatile or atomic load. |
4063 | /// It takes a chain node as input and returns the chain for the load itself. |
4064 | /// |
4065 | /// Having a callback like this is necessary for targets like SystemZ, |
4066 | /// which allows a CPU to reuse the result of a previous load indefinitely, |
4067 | /// even if a cache-coherent store is performed by another CPU. The default |
4068 | /// implementation does nothing. |
4069 | virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, |
4070 | SelectionDAG &DAG) const { |
4071 | return Chain; |
4072 | } |
4073 | |
4074 | /// Should SelectionDAG lower an atomic store of the given kind as a normal |
4075 | /// StoreSDNode (as opposed to an AtomicSDNode)? NOTE: The intention is to |
4076 | /// eventually migrate all targets to the using StoreSDNodes, but porting is |
4077 | /// being done target at a time. |
4078 | virtual bool lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const { |
4079 | assert(SI.isAtomic() && "violated precondition")(static_cast <bool> (SI.isAtomic() && "violated precondition" ) ? void (0) : __assert_fail ("SI.isAtomic() && \"violated precondition\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 4079, __extension__ __PRETTY_FUNCTION__)); |
4080 | return false; |
4081 | } |
4082 | |
4083 | /// Should SelectionDAG lower an atomic load of the given kind as a normal |
4084 | /// LoadSDNode (as opposed to an AtomicSDNode)? NOTE: The intention is to |
4085 | /// eventually migrate all targets to the using LoadSDNodes, but porting is |
4086 | /// being done target at a time. |
4087 | virtual bool lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const { |
4088 | assert(LI.isAtomic() && "violated precondition")(static_cast <bool> (LI.isAtomic() && "violated precondition" ) ? void (0) : __assert_fail ("LI.isAtomic() && \"violated precondition\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 4088, __extension__ __PRETTY_FUNCTION__)); |
4089 | return false; |
4090 | } |
4091 | |
4092 | |
4093 | /// This callback is invoked by the type legalizer to legalize nodes with an |
4094 | /// illegal operand type but legal result types. It replaces the |
4095 | /// LowerOperation callback in the type Legalizer. The reason we can not do |
4096 | /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to |
4097 | /// use this callback. |
4098 | /// |
4099 | /// TODO: Consider merging with ReplaceNodeResults. |
4100 | /// |
4101 | /// The target places new result values for the node in Results (their number |
4102 | /// and types must exactly match those of the original return values of |
4103 | /// the node), or leaves Results empty, which indicates that the node is not |
4104 | /// to be custom lowered after all. |
4105 | /// The default implementation calls LowerOperation. |
4106 | virtual void LowerOperationWrapper(SDNode *N, |
4107 | SmallVectorImpl<SDValue> &Results, |
4108 | SelectionDAG &DAG) const; |
4109 | |
4110 | /// This callback is invoked for operations that are unsupported by the |
4111 | /// target, which are registered to use 'custom' lowering, and whose defined |
4112 | /// values are all legal. If the target has no operations that require custom |
4113 | /// lowering, it need not implement this. The default implementation of this |
4114 | /// aborts. |
4115 | virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; |
4116 | |
4117 | /// This callback is invoked when a node result type is illegal for the |
4118 | /// target, and the operation was registered to use 'custom' lowering for that |
4119 | /// result type. The target places new result values for the node in Results |
4120 | /// (their number and types must exactly match those of the original return |
4121 | /// values of the node), or leaves Results empty, which indicates that the |
4122 | /// node is not to be custom lowered after all. |
4123 | /// |
4124 | /// If the target has no operations that require custom lowering, it need not |
4125 | /// implement this. The default implementation aborts. |
4126 | virtual void ReplaceNodeResults(SDNode * /*N*/, |
4127 | SmallVectorImpl<SDValue> &/*Results*/, |
4128 | SelectionDAG &/*DAG*/) const { |
4129 | llvm_unreachable("ReplaceNodeResults not implemented for this target!")::llvm::llvm_unreachable_internal("ReplaceNodeResults not implemented for this target!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 4129); |
4130 | } |
4131 | |
4132 | /// This method returns the name of a target specific DAG node. |
4133 | virtual const char *getTargetNodeName(unsigned Opcode) const; |
4134 | |
4135 | /// This method returns a target specific FastISel object, or null if the |
4136 | /// target does not support "fast" ISel. |
4137 | virtual FastISel *createFastISel(FunctionLoweringInfo &, |
4138 | const TargetLibraryInfo *) const { |
4139 | return nullptr; |
4140 | } |
4141 | |
4142 | bool verifyReturnAddressArgumentIsConstant(SDValue Op, |
4143 | SelectionDAG &DAG) const; |
4144 | |
4145 | //===--------------------------------------------------------------------===// |
4146 | // Inline Asm Support hooks |
4147 | // |
4148 | |
4149 | /// This hook allows the target to expand an inline asm call to be explicit |
4150 | /// llvm code if it wants to. This is useful for turning simple inline asms |
4151 | /// into LLVM intrinsics, which gives the compiler more information about the |
4152 | /// behavior of the code. |
4153 | virtual bool ExpandInlineAsm(CallInst *) const { |
4154 | return false; |
4155 | } |
4156 | |
4157 | enum ConstraintType { |
4158 | C_Register, // Constraint represents specific register(s). |
4159 | C_RegisterClass, // Constraint represents any of register(s) in class. |
4160 | C_Memory, // Memory constraint. |
4161 | C_Immediate, // Requires an immediate. |
4162 | C_Other, // Something else. |
4163 | C_Unknown // Unsupported constraint. |
4164 | }; |
4165 | |
4166 | enum ConstraintWeight { |
4167 | // Generic weights. |
4168 | CW_Invalid = -1, // No match. |
4169 | CW_Okay = 0, // Acceptable. |
4170 | CW_Good = 1, // Good weight. |
4171 | CW_Better = 2, // Better weight. |
4172 | CW_Best = 3, // Best weight. |
4173 | |
4174 | // Well-known weights. |
4175 | CW_SpecificReg = CW_Okay, // Specific register operands. |
4176 | CW_Register = CW_Good, // Register operands. |
4177 | CW_Memory = CW_Better, // Memory operands. |
4178 | CW_Constant = CW_Best, // Constant operand. |
4179 | CW_Default = CW_Okay // Default or don't know type. |
4180 | }; |
4181 | |
4182 | /// This contains information for each constraint that we are lowering. |
4183 | struct AsmOperandInfo : public InlineAsm::ConstraintInfo { |
4184 | /// This contains the actual string for the code, like "m". TargetLowering |
4185 | /// picks the 'best' code from ConstraintInfo::Codes that most closely |
4186 | /// matches the operand. |
4187 | std::string ConstraintCode; |
4188 | |
4189 | /// Information about the constraint code, e.g. Register, RegisterClass, |
4190 | /// Memory, Other, Unknown. |
4191 | TargetLowering::ConstraintType ConstraintType = TargetLowering::C_Unknown; |
4192 | |
4193 | /// If this is the result output operand or a clobber, this is null, |
4194 | /// otherwise it is the incoming operand to the CallInst. This gets |
4195 | /// modified as the asm is processed. |
4196 | Value *CallOperandVal = nullptr; |
4197 | |
4198 | /// The ValueType for the operand value. |
4199 | MVT ConstraintVT = MVT::Other; |
4200 | |
4201 | /// Copy constructor for copying from a ConstraintInfo. |
4202 | AsmOperandInfo(InlineAsm::ConstraintInfo Info) |
4203 | : InlineAsm::ConstraintInfo(std::move(Info)) {} |
4204 | |
4205 | /// Return true of this is an input operand that is a matching constraint |
4206 | /// like "4". |
4207 | bool isMatchingInputConstraint() const; |
4208 | |
4209 | /// If this is an input matching constraint, this method returns the output |
4210 | /// operand it matches. |
4211 | unsigned getMatchedOperand() const; |
4212 | }; |
4213 | |
4214 | using AsmOperandInfoVector = std::vector<AsmOperandInfo>; |
4215 | |
4216 | /// Split up the constraint string from the inline assembly value into the |
4217 | /// specific constraints and their prefixes, and also tie in the associated |
4218 | /// operand values. If this returns an empty vector, and if the constraint |
4219 | /// string itself isn't empty, there was an error parsing. |
4220 | virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, |
4221 | const TargetRegisterInfo *TRI, |
4222 | const CallBase &Call) const; |
4223 | |
4224 | /// Examine constraint type and operand type and determine a weight value. |
4225 | /// The operand object must already have been set up with the operand type. |
4226 | virtual ConstraintWeight getMultipleConstraintMatchWeight( |
4227 | AsmOperandInfo &info, int maIndex) const; |
4228 | |
4229 | /// Examine constraint string and operand type and determine a weight value. |
4230 | /// The operand object must already have been set up with the operand type. |
4231 | virtual ConstraintWeight getSingleConstraintMatchWeight( |
4232 | AsmOperandInfo &info, const char *constraint) const; |
4233 | |
4234 | /// Determines the constraint code and constraint type to use for the specific |
4235 | /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. |
4236 | /// If the actual operand being passed in is available, it can be passed in as |
4237 | /// Op, otherwise an empty SDValue can be passed. |
4238 | virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, |
4239 | SDValue Op, |
4240 | SelectionDAG *DAG = nullptr) const; |
4241 | |
4242 | /// Given a constraint, return the type of constraint it is for this target. |
4243 | virtual ConstraintType getConstraintType(StringRef Constraint) const; |
4244 | |
4245 | /// Given a physical register constraint (e.g. {edx}), return the register |
4246 | /// number and the register class for the register. |
4247 | /// |
4248 | /// Given a register class constraint, like 'r', if this corresponds directly |
4249 | /// to an LLVM register class, return a register of 0 and the register class |
4250 | /// pointer. |
4251 | /// |
4252 | /// This should only be used for C_Register constraints. On error, this |
4253 | /// returns a register number of 0 and a null register class pointer. |
4254 | virtual std::pair<unsigned, const TargetRegisterClass *> |
4255 | getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, |
4256 | StringRef Constraint, MVT VT) const; |
4257 | |
4258 | virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const { |
4259 | if (ConstraintCode == "m") |
4260 | return InlineAsm::Constraint_m; |
4261 | if (ConstraintCode == "o") |
4262 | return InlineAsm::Constraint_o; |
4263 | if (ConstraintCode == "X") |
4264 | return InlineAsm::Constraint_X; |
4265 | return InlineAsm::Constraint_Unknown; |
4266 | } |
4267 | |
4268 | /// Try to replace an X constraint, which matches anything, with another that |
4269 | /// has more specific requirements based on the type of the corresponding |
4270 | /// operand. This returns null if there is no replacement to make. |
4271 | virtual const char *LowerXConstraint(EVT ConstraintVT) const; |
4272 | |
4273 | /// Lower the specified operand into the Ops vector. If it is invalid, don't |
4274 | /// add anything to Ops. |
4275 | virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, |
4276 | std::vector<SDValue> &Ops, |
4277 | SelectionDAG &DAG) const; |
4278 | |
4279 | // Lower custom output constraints. If invalid, return SDValue(). |
4280 | virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag, |
4281 | const SDLoc &DL, |
4282 | const AsmOperandInfo &OpInfo, |
4283 | SelectionDAG &DAG) const; |
4284 | |
4285 | //===--------------------------------------------------------------------===// |
4286 | // Div utility functions |
4287 | // |
4288 | SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, |
4289 | SmallVectorImpl<SDNode *> &Created) const; |
4290 | SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, |
4291 | SmallVectorImpl<SDNode *> &Created) const; |
4292 | |
4293 | /// Targets may override this function to provide custom SDIV lowering for |
4294 | /// power-of-2 denominators. If the target returns an empty SDValue, LLVM |
4295 | /// assumes SDIV is expensive and replaces it with a series of other integer |
4296 | /// operations. |
4297 | virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, |
4298 | SelectionDAG &DAG, |
4299 | SmallVectorImpl<SDNode *> &Created) const; |
4300 | |
4301 | /// Indicate whether this target prefers to combine FDIVs with the same |
4302 | /// divisor. If the transform should never be done, return zero. If the |
4303 | /// transform should be done, return the minimum number of divisor uses |
4304 | /// that must exist. |
4305 | virtual unsigned combineRepeatedFPDivisors() const { |
4306 | return 0; |
4307 | } |
4308 | |
4309 | /// Hooks for building estimates in place of slower divisions and square |
4310 | /// roots. |
4311 | |
4312 | /// Return either a square root or its reciprocal estimate value for the input |
4313 | /// operand. |
4314 | /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or |
4315 | /// 'Enabled' as set by a potential default override attribute. |
4316 | /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson |
4317 | /// refinement iterations required to generate a sufficient (though not |
4318 | /// necessarily IEEE-754 compliant) estimate is returned in that parameter. |
4319 | /// The boolean UseOneConstNR output is used to select a Newton-Raphson |
4320 | /// algorithm implementation that uses either one or two constants. |
4321 | /// The boolean Reciprocal is used to select whether the estimate is for the |
4322 | /// square root of the input operand or the reciprocal of its square root. |
4323 | /// A target may choose to implement its own refinement within this function. |
4324 | /// If that's true, then return '0' as the number of RefinementSteps to avoid |
4325 | /// any further refinement of the estimate. |
4326 | /// An empty SDValue return means no estimate sequence can be created. |
4327 | virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, |
4328 | int Enabled, int &RefinementSteps, |
4329 | bool &UseOneConstNR, bool Reciprocal) const { |
4330 | return SDValue(); |
4331 | } |
4332 | |
4333 | /// Return a reciprocal estimate value for the input operand. |
4334 | /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or |
4335 | /// 'Enabled' as set by a potential default override attribute. |
4336 | /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson |
4337 | /// refinement iterations required to generate a sufficient (though not |
4338 | /// necessarily IEEE-754 compliant) estimate is returned in that parameter. |
4339 | /// A target may choose to implement its own refinement within this function. |
4340 | /// If that's true, then return '0' as the number of RefinementSteps to avoid |
4341 | /// any further refinement of the estimate. |
4342 | /// An empty SDValue return means no estimate sequence can be created. |
4343 | virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, |
4344 | int Enabled, int &RefinementSteps) const { |
4345 | return SDValue(); |
4346 | } |
4347 | |
4348 | /// Return a target-dependent comparison result if the input operand is |
4349 | /// suitable for use with a square root estimate calculation. For example, the |
4350 | /// comparison may check if the operand is NAN, INF, zero, normal, etc. The |
4351 | /// result should be used as the condition operand for a select or branch. |
4352 | virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, |
4353 | const DenormalMode &Mode) const; |
4354 | |
4355 | /// Return a target-dependent result if the input operand is not suitable for |
4356 | /// use with a square root estimate calculation. |
4357 | virtual SDValue getSqrtResultForDenormInput(SDValue Operand, |
4358 | SelectionDAG &DAG) const { |
4359 | return DAG.getConstantFP(0.0, SDLoc(Operand), Operand.getValueType()); |
4360 | } |
4361 | |
4362 | //===--------------------------------------------------------------------===// |
4363 | // Legalization utility functions |
4364 | // |
4365 | |
4366 | /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes, |
4367 | /// respectively, each computing an n/2-bit part of the result. |
4368 | /// \param Result A vector that will be filled with the parts of the result |
4369 | /// in little-endian order. |
4370 | /// \param LL Low bits of the LHS of the MUL. You can use this parameter |
4371 | /// if you want to control how low bits are extracted from the LHS. |
4372 | /// \param LH High bits of the LHS of the MUL. See LL for meaning. |
4373 | /// \param RL Low bits of the RHS of the MUL. See LL for meaning |
4374 | /// \param RH High bits of the RHS of the MUL. See LL for meaning. |
4375 | /// \returns true if the node has been expanded, false if it has not |
4376 | bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS, |
4377 | SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT, |
4378 | SelectionDAG &DAG, MulExpansionKind Kind, |
4379 | SDValue LL = SDValue(), SDValue LH = SDValue(), |
4380 | SDValue RL = SDValue(), SDValue RH = SDValue()) const; |
4381 | |
4382 | /// Expand a MUL into two nodes. One that computes the high bits of |
4383 | /// the result and one that computes the low bits. |
4384 | /// \param HiLoVT The value type to use for the Lo and Hi nodes. |
4385 | /// \param LL Low bits of the LHS of the MUL. You can use this parameter |
4386 | /// if you want to control how low bits are extracted from the LHS. |
4387 | /// \param LH High bits of the LHS of the MUL. See LL for meaning. |
4388 | /// \param RL Low bits of the RHS of the MUL. See LL for meaning |
4389 | /// \param RH High bits of the RHS of the MUL. See LL for meaning. |
4390 | /// \returns true if the node has been expanded. false if it has not |
4391 | bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, |
4392 | SelectionDAG &DAG, MulExpansionKind Kind, |
4393 | SDValue LL = SDValue(), SDValue LH = SDValue(), |
4394 | SDValue RL = SDValue(), SDValue RH = SDValue()) const; |
4395 | |
4396 | /// Expand funnel shift. |
4397 | /// \param N Node to expand |
4398 | /// \param Result output after conversion |
4399 | /// \returns True, if the expansion was successful, false otherwise |
4400 | bool expandFunnelShift(SDNode *N, SDValue &Result, SelectionDAG &DAG) const; |
4401 | |
4402 | /// Expand rotations. |
4403 | /// \param N Node to expand |
4404 | /// \param AllowVectorOps expand vector rotate, this should only be performed |
4405 | /// if the legalization is happening outside of LegalizeVectorOps |
4406 | /// \param Result output after conversion |
4407 | /// \returns True, if the expansion was successful, false otherwise |
4408 | bool expandROT(SDNode *N, bool AllowVectorOps, SDValue &Result, |
4409 | SelectionDAG &DAG) const; |
4410 | |
4411 | /// Expand shift-by-parts. |
4412 | /// \param N Node to expand |
4413 | /// \param Lo lower-output-part after conversion |
4414 | /// \param Hi upper-output-part after conversion |
4415 | void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, |
4416 | SelectionDAG &DAG) const; |
4417 | |
4418 | /// Expand float(f32) to SINT(i64) conversion |
4419 | /// \param N Node to expand |
4420 | /// \param Result output after conversion |
4421 | /// \returns True, if the expansion was successful, false otherwise |
4422 | bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const; |
4423 | |
4424 | /// Expand float to UINT conversion |
4425 | /// \param N Node to expand |
4426 | /// \param Result output after conversion |
4427 | /// \param Chain output chain after conversion |
4428 | /// \returns True, if the expansion was successful, false otherwise |
4429 | bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain, |
4430 | SelectionDAG &DAG) const; |
4431 | |
4432 | /// Expand UINT(i64) to double(f64) conversion |
4433 | /// \param N Node to expand |
4434 | /// \param Result output after conversion |
4435 | /// \param Chain output chain after conversion |
4436 | /// \returns True, if the expansion was successful, false otherwise |
4437 | bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain, |
4438 | SelectionDAG &DAG) const; |
4439 | |
4440 | /// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs. |
4441 | SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const; |
4442 | |
4443 | /// Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max. |
4444 | /// \param N Node to expand |
4445 | /// \returns The expansion result |
4446 | SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const; |
4447 | |
4448 | /// Expand isnan depending on function attributes. |
4449 | SDValue expandISNAN(EVT ResultVT, SDValue Op, SDNodeFlags Flags, |
4450 | const SDLoc &DL, SelectionDAG &DAG) const; |
4451 | |
4452 | /// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes, |
4453 | /// vector nodes can only succeed if all operations are legal/custom. |
4454 | /// \param N Node to expand |
4455 | /// \param Result output after conversion |
4456 | /// \returns True, if the expansion was successful, false otherwise |
4457 | bool expandCTPOP(SDNode *N, SDValue &Result, SelectionDAG &DAG) const; |
4458 | |
4459 | /// Expand CTLZ/CTLZ_ZERO_UNDEF nodes. Expands vector/scalar CTLZ nodes, |
4460 | /// vector nodes can only succeed if all operations are legal/custom. |
4461 | /// \param N Node to expand |
4462 | /// \param Result output after conversion |
4463 | /// \returns True, if the expansion was successful, false otherwise |
4464 | bool expandCTLZ(SDNode *N, SDValue &Result, SelectionDAG &DAG) const; |
4465 | |
4466 | /// Expand CTTZ/CTTZ_ZERO_UNDEF nodes. Expands vector/scalar CTTZ nodes, |
4467 | /// vector nodes can only succeed if all operations are legal/custom. |
4468 | /// \param N Node to expand |
4469 | /// \param Result output after conversion |
4470 | /// \returns True, if the expansion was successful, false otherwise |
4471 | bool expandCTTZ(SDNode *N, SDValue &Result, SelectionDAG &DAG) const; |
4472 | |
4473 | /// Expand ABS nodes. Expands vector/scalar ABS nodes, |
4474 | /// vector nodes can only succeed if all operations are legal/custom. |
4475 | /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size)) |
4476 | /// \param N Node to expand |
4477 | /// \param Result output after conversion |
4478 | /// \param IsNegative indicate negated abs |
4479 | /// \returns True, if the expansion was successful, false otherwise |
4480 | bool expandABS(SDNode *N, SDValue &Result, SelectionDAG &DAG, |
4481 | bool IsNegative = false) const; |
4482 | |
4483 | /// Expand BSWAP nodes. Expands scalar/vector BSWAP nodes with i16/i32/i64 |
4484 | /// scalar types. Returns SDValue() if expand fails. |
4485 | /// \param N Node to expand |
4486 | /// \returns The expansion result or SDValue() if it fails. |
4487 | SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const; |
4488 | |
4489 | /// Expand BITREVERSE nodes. Expands scalar/vector BITREVERSE nodes. |
4490 | /// Returns SDValue() if expand fails. |
4491 | /// \param N Node to expand |
4492 | /// \returns The expansion result or SDValue() if it fails. |
4493 | SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const; |
4494 | |
4495 | /// Turn load of vector type into a load of the individual elements. |
4496 | /// \param LD load to expand |
4497 | /// \returns BUILD_VECTOR and TokenFactor nodes. |
4498 | std::pair<SDValue, SDValue> scalarizeVectorLoad(LoadSDNode *LD, |
4499 | SelectionDAG &DAG) const; |
4500 | |
4501 | // Turn a store of a vector type into stores of the individual elements. |
4502 | /// \param ST Store with a vector value type |
4503 | /// \returns TokenFactor of the individual store chains. |
4504 | SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const; |
4505 | |
4506 | /// Expands an unaligned load to 2 half-size loads for an integer, and |
4507 | /// possibly more for vectors. |
4508 | std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD, |
4509 | SelectionDAG &DAG) const; |
4510 | |
4511 | /// Expands an unaligned store to 2 half-size stores for integer values, and |
4512 | /// possibly more for vectors. |
4513 | SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const; |
4514 | |
4515 | /// Increments memory address \p Addr according to the type of the value |
4516 | /// \p DataVT that should be stored. If the data is stored in compressed |
4517 | /// form, the memory address should be incremented according to the number of |
4518 | /// the stored elements. This number is equal to the number of '1's bits |
4519 | /// in the \p Mask. |
4520 | /// \p DataVT is a vector type. \p Mask is a vector value. |
4521 | /// \p DataVT and \p Mask have the same number of vector elements. |
4522 | SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, |
4523 | EVT DataVT, SelectionDAG &DAG, |
4524 | bool IsCompressedMemory) const; |
4525 | |
4526 | /// Get a pointer to vector element \p Idx located in memory for a vector of |
4527 | /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of |
4528 | /// bounds the returned pointer is unspecified, but will be within the vector |
4529 | /// bounds. |
4530 | SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, |
4531 | SDValue Index) const; |
4532 | |
4533 | /// Get a pointer to a sub-vector of type \p SubVecVT at index \p Idx located |
4534 | /// in memory for a vector of type \p VecVT starting at a base address of |
4535 | /// \p VecPtr. If \p Idx plus the size of \p SubVecVT is out of bounds the |
4536 | /// returned pointer is unspecified, but the value returned will be such that |
4537 | /// the entire subvector would be within the vector bounds. |
4538 | SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, |
4539 | EVT SubVecVT, SDValue Index) const; |
4540 | |
4541 | /// Method for building the DAG expansion of ISD::[US][MIN|MAX]. This |
4542 | /// method accepts integers as its arguments. |
4543 | SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const; |
4544 | |
4545 | /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This |
4546 | /// method accepts integers as its arguments. |
4547 | SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const; |
4548 | |
4549 | /// Method for building the DAG expansion of ISD::[US]SHLSAT. This |
4550 | /// method accepts integers as its arguments. |
4551 | SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const; |
4552 | |
4553 | /// Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT]. This |
4554 | /// method accepts integers as its arguments. |
4555 | SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const; |
4556 | |
4557 | /// Method for building the DAG expansion of ISD::[US]DIVFIX[SAT]. This |
4558 | /// method accepts integers as its arguments. |
4559 | /// Note: This method may fail if the division could not be performed |
4560 | /// within the type. Clients must retry with a wider type if this happens. |
4561 | SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, |
4562 | SDValue LHS, SDValue RHS, |
4563 | unsigned Scale, SelectionDAG &DAG) const; |
4564 | |
4565 | /// Method for building the DAG expansion of ISD::U(ADD|SUB)O. Expansion |
4566 | /// always suceeds and populates the Result and Overflow arguments. |
4567 | void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, |
4568 | SelectionDAG &DAG) const; |
4569 | |
4570 | /// Method for building the DAG expansion of ISD::S(ADD|SUB)O. Expansion |
4571 | /// always suceeds and populates the Result and Overflow arguments. |
4572 | void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, |
4573 | SelectionDAG &DAG) const; |
4574 | |
4575 | /// Method for building the DAG expansion of ISD::[US]MULO. Returns whether |
4576 | /// expansion was successful and populates the Result and Overflow arguments. |
4577 | bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow, |
4578 | SelectionDAG &DAG) const; |
4579 | |
4580 | /// Expand a VECREDUCE_* into an explicit calculation. If Count is specified, |
4581 | /// only the first Count elements of the vector are used. |
4582 | SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const; |
4583 | |
4584 | /// Expand a VECREDUCE_SEQ_* into an explicit ordered calculation. |
4585 | SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const; |
4586 | |
4587 | /// Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal. |
4588 | /// Returns true if the expansion was successful. |
4589 | bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const; |
4590 | |
4591 | /// Method for building the DAG expansion of ISD::VECTOR_SPLICE. This |
4592 | /// method accepts vectors as its arguments. |
4593 | SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const; |
4594 | |
4595 | /// Legalize a SETCC with given LHS and RHS and condition code CC on the |
4596 | /// current target. |
4597 | /// |
4598 | /// If the SETCC has been legalized using AND / OR, then the legalized node |
4599 | /// will be stored in LHS. RHS and CC will be set to SDValue(). NeedInvert |
4600 | /// will be set to false. |
4601 | /// |
4602 | /// If the SETCC has been legalized by using getSetCCSwappedOperands(), |
4603 | /// then the values of LHS and RHS will be swapped, CC will be set to the |
4604 | /// new condition, and NeedInvert will be set to false. |
4605 | /// |
4606 | /// If the SETCC has been legalized using the inverse condcode, then LHS and |
4607 | /// RHS will be unchanged, CC will set to the inverted condcode, and |
4608 | /// NeedInvert will be set to true. The caller must invert the result of the |
4609 | /// SETCC with SelectionDAG::getLogicalNOT() or take equivalent action to swap |
4610 | /// the effect of a true/false result. |
4611 | /// |
4612 | /// \returns true if the SetCC has been legalized, false if it hasn't. |
4613 | bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS, |
4614 | SDValue &RHS, SDValue &CC, bool &NeedInvert, |
4615 | const SDLoc &dl, SDValue &Chain, |
4616 | bool IsSignaling = false) const; |
4617 | |
4618 | //===--------------------------------------------------------------------===// |
4619 | // Instruction Emitting Hooks |
4620 | // |
4621 | |
4622 | /// This method should be implemented by targets that mark instructions with |
4623 | /// the 'usesCustomInserter' flag. These instructions are special in various |
4624 | /// ways, which require special support to insert. The specified MachineInstr |
4625 | /// is created but not inserted into any basic blocks, and this method is |
4626 | /// called to expand it into a sequence of instructions, potentially also |
4627 | /// creating new basic blocks and control flow. |
4628 | /// As long as the returned basic block is different (i.e., we created a new |
4629 | /// one), the custom inserter is free to modify the rest of \p MBB. |
4630 | virtual MachineBasicBlock * |
4631 | EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const; |
4632 | |
4633 | /// This method should be implemented by targets that mark instructions with |
4634 | /// the 'hasPostISelHook' flag. These instructions must be adjusted after |
4635 | /// instruction selection by target hooks. e.g. To fill in optional defs for |
4636 | /// ARM 's' setting instructions. |
4637 | virtual void AdjustInstrPostInstrSelection(MachineInstr &MI, |
4638 | SDNode *Node) const; |
4639 | |
4640 | /// If this function returns true, SelectionDAGBuilder emits a |
4641 | /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector. |
4642 | virtual bool useLoadStackGuardNode() const { |
4643 | return false; |
4644 | } |
4645 | |
4646 | virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, |
4647 | const SDLoc &DL) const { |
4648 | llvm_unreachable("not implemented for this target")::llvm::llvm_unreachable_internal("not implemented for this target" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/TargetLowering.h" , 4648); |
4649 | } |
4650 | |
4651 | /// Lower TLS global address SDNode for target independent emulated TLS model. |
4652 | virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, |
4653 | SelectionDAG &DAG) const; |
4654 | |
4655 | /// Expands target specific indirect branch for the case of JumpTable |
4656 | /// expanasion. |
4657 | virtual SDValue expandIndirectJTBranch(const SDLoc& dl, SDValue Value, SDValue Addr, |
4658 | SelectionDAG &DAG) const { |
4659 | return DAG.getNode(ISD::BRIND, dl, MVT::Other, Value, Addr); |
4660 | } |
4661 | |
4662 | // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits))) |
4663 | // If we're comparing for equality to zero and isCtlzFast is true, expose the |
4664 | // fact that this can be implemented as a ctlz/srl pair, so that the dag |
4665 | // combiner can fold the new nodes. |
4666 | SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const; |
4667 | |
4668 | /// Give targets the chance to reduce the number of distinct addresing modes. |
4669 | ISD::MemIndexType getCanonicalIndexType(ISD::MemIndexType IndexType, |
4670 | EVT MemVT, SDValue Offsets) const; |
4671 | |
4672 | private: |
4673 | SDValue foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, |
4674 | const SDLoc &DL, DAGCombinerInfo &DCI) const; |
4675 | SDValue foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, |
4676 | const SDLoc &DL, DAGCombinerInfo &DCI) const; |
4677 | |
4678 | SDValue optimizeSetCCOfSignedTruncationCheck(EVT SCCVT, SDValue N0, |
4679 | SDValue N1, ISD::CondCode Cond, |
4680 | DAGCombinerInfo &DCI, |
4681 | const SDLoc &DL) const; |
4682 | |
4683 | // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 |
4684 | SDValue optimizeSetCCByHoistingAndByConstFromLogicalShift( |
4685 | EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond, |
4686 | DAGCombinerInfo &DCI, const SDLoc &DL) const; |
4687 | |
4688 | SDValue prepareUREMEqFold(EVT SETCCVT, SDValue REMNode, |
4689 | SDValue CompTargetNode, ISD::CondCode Cond, |
4690 | DAGCombinerInfo &DCI, const SDLoc &DL, |
4691 | SmallVectorImpl<SDNode *> &Created) const; |
4692 | SDValue buildUREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode, |
4693 | ISD::CondCode Cond, DAGCombinerInfo &DCI, |
4694 | const SDLoc &DL) const; |
4695 | |
4696 | SDValue prepareSREMEqFold(EVT SETCCVT, SDValue REMNode, |
4697 | SDValue CompTargetNode, ISD::CondCode Cond, |
4698 | DAGCombinerInfo &DCI, const SDLoc &DL, |
4699 | SmallVectorImpl<SDNode *> &Created) const; |
4700 | SDValue buildSREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode, |
4701 | ISD::CondCode Cond, DAGCombinerInfo &DCI, |
4702 | const SDLoc &DL) const; |
4703 | }; |
4704 | |
4705 | /// Given an LLVM IR type and return type attributes, compute the return value |
4706 | /// EVTs and flags, and optionally also the offsets, if the return value is |
4707 | /// being lowered to memory. |
4708 | void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, |
4709 | SmallVectorImpl<ISD::OutputArg> &Outs, |
4710 | const TargetLowering &TLI, const DataLayout &DL); |
4711 | |
4712 | } // end namespace llvm |
4713 | |
4714 | #endif // LLVM_CODEGEN_TARGETLOWERING_H |
1 | //===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file declares the SDNode class and derived classes, which are used to | |||
10 | // represent the nodes and operations present in a SelectionDAG. These nodes | |||
11 | // and operations are machine code level operations, with some similarities to | |||
12 | // the GCC RTL representation. | |||
13 | // | |||
14 | // Clients should include the SelectionDAG.h file instead of this file directly. | |||
15 | // | |||
16 | //===----------------------------------------------------------------------===// | |||
17 | ||||
18 | #ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H | |||
19 | #define LLVM_CODEGEN_SELECTIONDAGNODES_H | |||
20 | ||||
21 | #include "llvm/ADT/APFloat.h" | |||
22 | #include "llvm/ADT/ArrayRef.h" | |||
23 | #include "llvm/ADT/BitVector.h" | |||
24 | #include "llvm/ADT/FoldingSet.h" | |||
25 | #include "llvm/ADT/GraphTraits.h" | |||
26 | #include "llvm/ADT/SmallPtrSet.h" | |||
27 | #include "llvm/ADT/SmallVector.h" | |||
28 | #include "llvm/ADT/ilist_node.h" | |||
29 | #include "llvm/ADT/iterator.h" | |||
30 | #include "llvm/ADT/iterator_range.h" | |||
31 | #include "llvm/CodeGen/ISDOpcodes.h" | |||
32 | #include "llvm/CodeGen/MachineMemOperand.h" | |||
33 | #include "llvm/CodeGen/Register.h" | |||
34 | #include "llvm/CodeGen/ValueTypes.h" | |||
35 | #include "llvm/IR/Constants.h" | |||
36 | #include "llvm/IR/DebugLoc.h" | |||
37 | #include "llvm/IR/Instruction.h" | |||
38 | #include "llvm/IR/Instructions.h" | |||
39 | #include "llvm/IR/Metadata.h" | |||
40 | #include "llvm/IR/Operator.h" | |||
41 | #include "llvm/Support/AlignOf.h" | |||
42 | #include "llvm/Support/AtomicOrdering.h" | |||
43 | #include "llvm/Support/Casting.h" | |||
44 | #include "llvm/Support/ErrorHandling.h" | |||
45 | #include "llvm/Support/MachineValueType.h" | |||
46 | #include "llvm/Support/TypeSize.h" | |||
47 | #include <algorithm> | |||
48 | #include <cassert> | |||
49 | #include <climits> | |||
50 | #include <cstddef> | |||
51 | #include <cstdint> | |||
52 | #include <cstring> | |||
53 | #include <iterator> | |||
54 | #include <string> | |||
55 | #include <tuple> | |||
56 | ||||
57 | namespace llvm { | |||
58 | ||||
59 | class APInt; | |||
60 | class Constant; | |||
61 | template <typename T> struct DenseMapInfo; | |||
62 | class GlobalValue; | |||
63 | class MachineBasicBlock; | |||
64 | class MachineConstantPoolValue; | |||
65 | class MCSymbol; | |||
66 | class raw_ostream; | |||
67 | class SDNode; | |||
68 | class SelectionDAG; | |||
69 | class Type; | |||
70 | class Value; | |||
71 | ||||
72 | void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr, | |||
73 | bool force = false); | |||
74 | ||||
75 | /// This represents a list of ValueType's that has been intern'd by | |||
76 | /// a SelectionDAG. Instances of this simple value class are returned by | |||
77 | /// SelectionDAG::getVTList(...). | |||
78 | /// | |||
79 | struct SDVTList { | |||
80 | const EVT *VTs; | |||
81 | unsigned int NumVTs; | |||
82 | }; | |||
83 | ||||
84 | namespace ISD { | |||
85 | ||||
86 | /// Node predicates | |||
87 | ||||
88 | /// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the | |||
89 | /// same constant or undefined, return true and return the constant value in | |||
90 | /// \p SplatValue. | |||
91 | bool isConstantSplatVector(const SDNode *N, APInt &SplatValue); | |||
92 | ||||
93 | /// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where | |||
94 | /// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to | |||
95 | /// true, it only checks BUILD_VECTOR. | |||
96 | bool isConstantSplatVectorAllOnes(const SDNode *N, | |||
97 | bool BuildVectorOnly = false); | |||
98 | ||||
99 | /// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where | |||
100 | /// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it | |||
101 | /// only checks BUILD_VECTOR. | |||
102 | bool isConstantSplatVectorAllZeros(const SDNode *N, | |||
103 | bool BuildVectorOnly = false); | |||
104 | ||||
105 | /// Return true if the specified node is a BUILD_VECTOR where all of the | |||
106 | /// elements are ~0 or undef. | |||
107 | bool isBuildVectorAllOnes(const SDNode *N); | |||
108 | ||||
109 | /// Return true if the specified node is a BUILD_VECTOR where all of the | |||
110 | /// elements are 0 or undef. | |||
111 | bool isBuildVectorAllZeros(const SDNode *N); | |||
112 | ||||
113 | /// Return true if the specified node is a BUILD_VECTOR node of all | |||
114 | /// ConstantSDNode or undef. | |||
115 | bool isBuildVectorOfConstantSDNodes(const SDNode *N); | |||
116 | ||||
117 | /// Return true if the specified node is a BUILD_VECTOR node of all | |||
118 | /// ConstantFPSDNode or undef. | |||
119 | bool isBuildVectorOfConstantFPSDNodes(const SDNode *N); | |||
120 | ||||
121 | /// Return true if the node has at least one operand and all operands of the | |||
122 | /// specified node are ISD::UNDEF. | |||
123 | bool allOperandsUndef(const SDNode *N); | |||
124 | ||||
125 | } // end namespace ISD | |||
126 | ||||
127 | //===----------------------------------------------------------------------===// | |||
128 | /// Unlike LLVM values, Selection DAG nodes may return multiple | |||
129 | /// values as the result of a computation. Many nodes return multiple values, | |||
130 | /// from loads (which define a token and a return value) to ADDC (which returns | |||
131 | /// a result and a carry value), to calls (which may return an arbitrary number | |||
132 | /// of values). | |||
133 | /// | |||
134 | /// As such, each use of a SelectionDAG computation must indicate the node that | |||
135 | /// computes it as well as which return value to use from that node. This pair | |||
136 | /// of information is represented with the SDValue value type. | |||
137 | /// | |||
138 | class SDValue { | |||
139 | friend struct DenseMapInfo<SDValue>; | |||
140 | ||||
141 | SDNode *Node = nullptr; // The node defining the value we are using. | |||
142 | unsigned ResNo = 0; // Which return value of the node we are using. | |||
143 | ||||
144 | public: | |||
145 | SDValue() = default; | |||
146 | SDValue(SDNode *node, unsigned resno); | |||
147 | ||||
148 | /// get the index which selects a specific result in the SDNode | |||
149 | unsigned getResNo() const { return ResNo; } | |||
150 | ||||
151 | /// get the SDNode which holds the desired result | |||
152 | SDNode *getNode() const { return Node; } | |||
153 | ||||
154 | /// set the SDNode | |||
155 | void setNode(SDNode *N) { Node = N; } | |||
156 | ||||
157 | inline SDNode *operator->() const { return Node; } | |||
158 | ||||
159 | bool operator==(const SDValue &O) const { | |||
160 | return Node == O.Node && ResNo == O.ResNo; | |||
161 | } | |||
162 | bool operator!=(const SDValue &O) const { | |||
163 | return !operator==(O); | |||
164 | } | |||
165 | bool operator<(const SDValue &O) const { | |||
166 | return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo); | |||
167 | } | |||
168 | explicit operator bool() const { | |||
169 | return Node != nullptr; | |||
170 | } | |||
171 | ||||
172 | SDValue getValue(unsigned R) const { | |||
173 | return SDValue(Node, R); | |||
174 | } | |||
175 | ||||
176 | /// Return true if this node is an operand of N. | |||
177 | bool isOperandOf(const SDNode *N) const; | |||
178 | ||||
179 | /// Return the ValueType of the referenced return value. | |||
180 | inline EVT getValueType() const; | |||
181 | ||||
182 | /// Return the simple ValueType of the referenced return value. | |||
183 | MVT getSimpleValueType() const { | |||
184 | return getValueType().getSimpleVT(); | |||
185 | } | |||
186 | ||||
187 | /// Returns the size of the value in bits. | |||
188 | /// | |||
189 | /// If the value type is a scalable vector type, the scalable property will | |||
190 | /// be set and the runtime size will be a positive integer multiple of the | |||
191 | /// base size. | |||
192 | TypeSize getValueSizeInBits() const { | |||
193 | return getValueType().getSizeInBits(); | |||
194 | } | |||
195 | ||||
196 | uint64_t getScalarValueSizeInBits() const { | |||
197 | return getValueType().getScalarType().getFixedSizeInBits(); | |||
198 | } | |||
199 | ||||
200 | // Forwarding methods - These forward to the corresponding methods in SDNode. | |||
201 | inline unsigned getOpcode() const; | |||
202 | inline unsigned getNumOperands() const; | |||
203 | inline const SDValue &getOperand(unsigned i) const; | |||
204 | inline uint64_t getConstantOperandVal(unsigned i) const; | |||
205 | inline const APInt &getConstantOperandAPInt(unsigned i) const; | |||
206 | inline bool isTargetMemoryOpcode() const; | |||
207 | inline bool isTargetOpcode() const; | |||
208 | inline bool isMachineOpcode() const; | |||
209 | inline bool isUndef() const; | |||
210 | inline unsigned getMachineOpcode() const; | |||
211 | inline const DebugLoc &getDebugLoc() const; | |||
212 | inline void dump() const; | |||
213 | inline void dump(const SelectionDAG *G) const; | |||
214 | inline void dumpr() const; | |||
215 | inline void dumpr(const SelectionDAG *G) const; | |||
216 | ||||
217 | /// Return true if this operand (which must be a chain) reaches the | |||
218 | /// specified operand without crossing any side-effecting instructions. | |||
219 | /// In practice, this looks through token factors and non-volatile loads. | |||
220 | /// In order to remain efficient, this only | |||
221 | /// looks a couple of nodes in, it does not do an exhaustive search. | |||
222 | bool reachesChainWithoutSideEffects(SDValue Dest, | |||
223 | unsigned Depth = 2) const; | |||
224 | ||||
225 | /// Return true if there are no nodes using value ResNo of Node. | |||
226 | inline bool use_empty() const; | |||
227 | ||||
228 | /// Return true if there is exactly one node using value ResNo of Node. | |||
229 | inline bool hasOneUse() const; | |||
230 | }; | |||
231 | ||||
232 | template<> struct DenseMapInfo<SDValue> { | |||
233 | static inline SDValue getEmptyKey() { | |||
234 | SDValue V; | |||
235 | V.ResNo = -1U; | |||
236 | return V; | |||
237 | } | |||
238 | ||||
239 | static inline SDValue getTombstoneKey() { | |||
240 | SDValue V; | |||
241 | V.ResNo = -2U; | |||
242 | return V; | |||
243 | } | |||
244 | ||||
245 | static unsigned getHashValue(const SDValue &Val) { | |||
246 | return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^ | |||
247 | (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo(); | |||
248 | } | |||
249 | ||||
250 | static bool isEqual(const SDValue &LHS, const SDValue &RHS) { | |||
251 | return LHS == RHS; | |||
252 | } | |||
253 | }; | |||
254 | ||||
255 | /// Allow casting operators to work directly on | |||
256 | /// SDValues as if they were SDNode*'s. | |||
257 | template<> struct simplify_type<SDValue> { | |||
258 | using SimpleType = SDNode *; | |||
259 | ||||
260 | static SimpleType getSimplifiedValue(SDValue &Val) { | |||
261 | return Val.getNode(); | |||
262 | } | |||
263 | }; | |||
264 | template<> struct simplify_type<const SDValue> { | |||
265 | using SimpleType = /*const*/ SDNode *; | |||
266 | ||||
267 | static SimpleType getSimplifiedValue(const SDValue &Val) { | |||
268 | return Val.getNode(); | |||
269 | } | |||
270 | }; | |||
271 | ||||
272 | /// Represents a use of a SDNode. This class holds an SDValue, | |||
273 | /// which records the SDNode being used and the result number, a | |||
274 | /// pointer to the SDNode using the value, and Next and Prev pointers, | |||
275 | /// which link together all the uses of an SDNode. | |||
276 | /// | |||
277 | class SDUse { | |||
278 | /// Val - The value being used. | |||
279 | SDValue Val; | |||
280 | /// User - The user of this value. | |||
281 | SDNode *User = nullptr; | |||
282 | /// Prev, Next - Pointers to the uses list of the SDNode referred by | |||
283 | /// this operand. | |||
284 | SDUse **Prev = nullptr; | |||
285 | SDUse *Next = nullptr; | |||
286 | ||||
287 | public: | |||
288 | SDUse() = default; | |||
289 | SDUse(const SDUse &U) = delete; | |||
290 | SDUse &operator=(const SDUse &) = delete; | |||
291 | ||||
292 | /// Normally SDUse will just implicitly convert to an SDValue that it holds. | |||
293 | operator const SDValue&() const { return Val; } | |||
294 | ||||
295 | /// If implicit conversion to SDValue doesn't work, the get() method returns | |||
296 | /// the SDValue. | |||
297 | const SDValue &get() const { return Val; } | |||
298 | ||||
299 | /// This returns the SDNode that contains this Use. | |||
300 | SDNode *getUser() { return User; } | |||
301 | ||||
302 | /// Get the next SDUse in the use list. | |||
303 | SDUse *getNext() const { return Next; } | |||
304 | ||||
305 | /// Convenience function for get().getNode(). | |||
306 | SDNode *getNode() const { return Val.getNode(); } | |||
307 | /// Convenience function for get().getResNo(). | |||
308 | unsigned getResNo() const { return Val.getResNo(); } | |||
309 | /// Convenience function for get().getValueType(). | |||
310 | EVT getValueType() const { return Val.getValueType(); } | |||
311 | ||||
312 | /// Convenience function for get().operator== | |||
313 | bool operator==(const SDValue &V) const { | |||
314 | return Val == V; | |||
315 | } | |||
316 | ||||
317 | /// Convenience function for get().operator!= | |||
318 | bool operator!=(const SDValue &V) const { | |||
319 | return Val != V; | |||
320 | } | |||
321 | ||||
322 | /// Convenience function for get().operator< | |||
323 | bool operator<(const SDValue &V) const { | |||
324 | return Val < V; | |||
325 | } | |||
326 | ||||
327 | private: | |||
328 | friend class SelectionDAG; | |||
329 | friend class SDNode; | |||
330 | // TODO: unfriend HandleSDNode once we fix its operand handling. | |||
331 | friend class HandleSDNode; | |||
332 | ||||
333 | void setUser(SDNode *p) { User = p; } | |||
334 | ||||
335 | /// Remove this use from its existing use list, assign it the | |||
336 | /// given value, and add it to the new value's node's use list. | |||
337 | inline void set(const SDValue &V); | |||
338 | /// Like set, but only supports initializing a newly-allocated | |||
339 | /// SDUse with a non-null value. | |||
340 | inline void setInitial(const SDValue &V); | |||
341 | /// Like set, but only sets the Node portion of the value, | |||
342 | /// leaving the ResNo portion unmodified. | |||
343 | inline void setNode(SDNode *N); | |||
344 | ||||
345 | void addToList(SDUse **List) { | |||
346 | Next = *List; | |||
347 | if (Next) Next->Prev = &Next; | |||
348 | Prev = List; | |||
349 | *List = this; | |||
350 | } | |||
351 | ||||
352 | void removeFromList() { | |||
353 | *Prev = Next; | |||
354 | if (Next) Next->Prev = Prev; | |||
355 | } | |||
356 | }; | |||
357 | ||||
358 | /// simplify_type specializations - Allow casting operators to work directly on | |||
359 | /// SDValues as if they were SDNode*'s. | |||
360 | template<> struct simplify_type<SDUse> { | |||
361 | using SimpleType = SDNode *; | |||
362 | ||||
363 | static SimpleType getSimplifiedValue(SDUse &Val) { | |||
364 | return Val.getNode(); | |||
365 | } | |||
366 | }; | |||
367 | ||||
368 | /// These are IR-level optimization flags that may be propagated to SDNodes. | |||
369 | /// TODO: This data structure should be shared by the IR optimizer and the | |||
370 | /// the backend. | |||
371 | struct SDNodeFlags { | |||
372 | private: | |||
373 | bool NoUnsignedWrap : 1; | |||
374 | bool NoSignedWrap : 1; | |||
375 | bool Exact : 1; | |||
376 | bool NoNaNs : 1; | |||
377 | bool NoInfs : 1; | |||
378 | bool NoSignedZeros : 1; | |||
379 | bool AllowReciprocal : 1; | |||
380 | bool AllowContract : 1; | |||
381 | bool ApproximateFuncs : 1; | |||
382 | bool AllowReassociation : 1; | |||
383 | ||||
384 | // We assume instructions do not raise floating-point exceptions by default, | |||
385 | // and only those marked explicitly may do so. We could choose to represent | |||
386 | // this via a positive "FPExcept" flags like on the MI level, but having a | |||
387 | // negative "NoFPExcept" flag here (that defaults to true) makes the flag | |||
388 | // intersection logic more straightforward. | |||
389 | bool NoFPExcept : 1; | |||
390 | ||||
391 | public: | |||
392 | /// Default constructor turns off all optimization flags. | |||
393 | SDNodeFlags() | |||
394 | : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false), | |||
395 | NoInfs(false), NoSignedZeros(false), AllowReciprocal(false), | |||
396 | AllowContract(false), ApproximateFuncs(false), | |||
397 | AllowReassociation(false), NoFPExcept(false) {} | |||
398 | ||||
399 | /// Propagate the fast-math-flags from an IR FPMathOperator. | |||
400 | void copyFMF(const FPMathOperator &FPMO) { | |||
401 | setNoNaNs(FPMO.hasNoNaNs()); | |||
402 | setNoInfs(FPMO.hasNoInfs()); | |||
403 | setNoSignedZeros(FPMO.hasNoSignedZeros()); | |||
404 | setAllowReciprocal(FPMO.hasAllowReciprocal()); | |||
405 | setAllowContract(FPMO.hasAllowContract()); | |||
406 | setApproximateFuncs(FPMO.hasApproxFunc()); | |||
407 | setAllowReassociation(FPMO.hasAllowReassoc()); | |||
408 | } | |||
409 | ||||
410 | // These are mutators for each flag. | |||
411 | void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; } | |||
412 | void setNoSignedWrap(bool b) { NoSignedWrap = b; } | |||
413 | void setExact(bool b) { Exact = b; } | |||
414 | void setNoNaNs(bool b) { NoNaNs = b; } | |||
415 | void setNoInfs(bool b) { NoInfs = b; } | |||
416 | void setNoSignedZeros(bool b) { NoSignedZeros = b; } | |||
417 | void setAllowReciprocal(bool b) { AllowReciprocal = b; } | |||
418 | void setAllowContract(bool b) { AllowContract = b; } | |||
419 | void setApproximateFuncs(bool b) { ApproximateFuncs = b; } | |||
420 | void setAllowReassociation(bool b) { AllowReassociation = b; } | |||
421 | void setNoFPExcept(bool b) { NoFPExcept = b; } | |||
422 | ||||
423 | // These are accessors for each flag. | |||
424 | bool hasNoUnsignedWrap() const { return NoUnsignedWrap; } | |||
425 | bool hasNoSignedWrap() const { return NoSignedWrap; } | |||
426 | bool hasExact() const { return Exact; } | |||
427 | bool hasNoNaNs() const { return NoNaNs; } | |||
428 | bool hasNoInfs() const { return NoInfs; } | |||
429 | bool hasNoSignedZeros() const { return NoSignedZeros; } | |||
430 | bool hasAllowReciprocal() const { return AllowReciprocal; } | |||
431 | bool hasAllowContract() const { return AllowContract; } | |||
432 | bool hasApproximateFuncs() const { return ApproximateFuncs; } | |||
433 | bool hasAllowReassociation() const { return AllowReassociation; } | |||
434 | bool hasNoFPExcept() const { return NoFPExcept; } | |||
435 | ||||
436 | /// Clear any flags in this flag set that aren't also set in Flags. All | |||
437 | /// flags will be cleared if Flags are undefined. | |||
438 | void intersectWith(const SDNodeFlags Flags) { | |||
439 | NoUnsignedWrap &= Flags.NoUnsignedWrap; | |||
440 | NoSignedWrap &= Flags.NoSignedWrap; | |||
441 | Exact &= Flags.Exact; | |||
442 | NoNaNs &= Flags.NoNaNs; | |||
443 | NoInfs &= Flags.NoInfs; | |||
444 | NoSignedZeros &= Flags.NoSignedZeros; | |||
445 | AllowReciprocal &= Flags.AllowReciprocal; | |||
446 | AllowContract &= Flags.AllowContract; | |||
447 | ApproximateFuncs &= Flags.ApproximateFuncs; | |||
448 | AllowReassociation &= Flags.AllowReassociation; | |||
449 | NoFPExcept &= Flags.NoFPExcept; | |||
450 | } | |||
451 | }; | |||
452 | ||||
453 | /// Represents one node in the SelectionDAG. | |||
454 | /// | |||
455 | class SDNode : public FoldingSetNode, public ilist_node<SDNode> { | |||
456 | private: | |||
457 | /// The operation that this node performs. | |||
458 | int16_t NodeType; | |||
459 | ||||
460 | protected: | |||
461 | // We define a set of mini-helper classes to help us interpret the bits in our | |||
462 | // SubclassData. These are designed to fit within a uint16_t so they pack | |||
463 | // with NodeType. | |||
464 | ||||
465 | #if defined(_AIX) && (!defined(__GNUC__4) || defined(__clang__1)) | |||
466 | // Except for GCC; by default, AIX compilers store bit-fields in 4-byte words | |||
467 | // and give the `pack` pragma push semantics. | |||
468 | #define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2) | |||
469 | #define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop) | |||
470 | #else | |||
471 | #define BEGIN_TWO_BYTE_PACK() | |||
472 | #define END_TWO_BYTE_PACK() | |||
473 | #endif | |||
474 | ||||
475 | BEGIN_TWO_BYTE_PACK() | |||
476 | class SDNodeBitfields { | |||
477 | friend class SDNode; | |||
478 | friend class MemIntrinsicSDNode; | |||
479 | friend class MemSDNode; | |||
480 | friend class SelectionDAG; | |||
481 | ||||
482 | uint16_t HasDebugValue : 1; | |||
483 | uint16_t IsMemIntrinsic : 1; | |||
484 | uint16_t IsDivergent : 1; | |||
485 | }; | |||
486 | enum { NumSDNodeBits = 3 }; | |||
487 | ||||
488 | class ConstantSDNodeBitfields { | |||
489 | friend class ConstantSDNode; | |||
490 | ||||
491 | uint16_t : NumSDNodeBits; | |||
492 | ||||
493 | uint16_t IsOpaque : 1; | |||
494 | }; | |||
495 | ||||
496 | class MemSDNodeBitfields { | |||
497 | friend class MemSDNode; | |||
498 | friend class MemIntrinsicSDNode; | |||
499 | friend class AtomicSDNode; | |||
500 | ||||
501 | uint16_t : NumSDNodeBits; | |||
502 | ||||
503 | uint16_t IsVolatile : 1; | |||
504 | uint16_t IsNonTemporal : 1; | |||
505 | uint16_t IsDereferenceable : 1; | |||
506 | uint16_t IsInvariant : 1; | |||
507 | }; | |||
508 | enum { NumMemSDNodeBits = NumSDNodeBits + 4 }; | |||
509 | ||||
510 | class LSBaseSDNodeBitfields { | |||
511 | friend class LSBaseSDNode; | |||
512 | friend class MaskedLoadStoreSDNode; | |||
513 | friend class MaskedGatherScatterSDNode; | |||
514 | ||||
515 | uint16_t : NumMemSDNodeBits; | |||
516 | ||||
517 | // This storage is shared between disparate class hierarchies to hold an | |||
518 | // enumeration specific to the class hierarchy in use. | |||
519 | // LSBaseSDNode => enum ISD::MemIndexedMode | |||
520 | // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode | |||
521 | // MaskedGatherScatterSDNode => enum ISD::MemIndexType | |||
522 | uint16_t AddressingMode : 3; | |||
523 | }; | |||
524 | enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 }; | |||
525 | ||||
526 | class LoadSDNodeBitfields { | |||
527 | friend class LoadSDNode; | |||
528 | friend class MaskedLoadSDNode; | |||
529 | friend class MaskedGatherSDNode; | |||
530 | ||||
531 | uint16_t : NumLSBaseSDNodeBits; | |||
532 | ||||
533 | uint16_t ExtTy : 2; // enum ISD::LoadExtType | |||
534 | uint16_t IsExpanding : 1; | |||
535 | }; | |||
536 | ||||
537 | class StoreSDNodeBitfields { | |||
538 | friend class StoreSDNode; | |||
539 | friend class MaskedStoreSDNode; | |||
540 | friend class MaskedScatterSDNode; | |||
541 | ||||
542 | uint16_t : NumLSBaseSDNodeBits; | |||
543 | ||||
544 | uint16_t IsTruncating : 1; | |||
545 | uint16_t IsCompressing : 1; | |||
546 | }; | |||
547 | ||||
548 | union { | |||
549 | char RawSDNodeBits[sizeof(uint16_t)]; | |||
550 | SDNodeBitfields SDNodeBits; | |||
551 | ConstantSDNodeBitfields ConstantSDNodeBits; | |||
552 | MemSDNodeBitfields MemSDNodeBits; | |||
553 | LSBaseSDNodeBitfields LSBaseSDNodeBits; | |||
554 | LoadSDNodeBitfields LoadSDNodeBits; | |||
555 | StoreSDNodeBitfields StoreSDNodeBits; | |||
556 | }; | |||
557 | END_TWO_BYTE_PACK() | |||
558 | #undef BEGIN_TWO_BYTE_PACK | |||
559 | #undef END_TWO_BYTE_PACK | |||
560 | ||||
561 | // RawSDNodeBits must cover the entirety of the union. This means that all of | |||
562 | // the union's members must have size <= RawSDNodeBits. We write the RHS as | |||
563 | // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter. | |||
564 | static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide"); | |||
565 | static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide"); | |||
566 | static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide"); | |||
567 | static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide"); | |||
568 | static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide"); | |||
569 | static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide"); | |||
570 | ||||
571 | private: | |||
572 | friend class SelectionDAG; | |||
573 | // TODO: unfriend HandleSDNode once we fix its operand handling. | |||
574 | friend class HandleSDNode; | |||
575 | ||||
576 | /// Unique id per SDNode in the DAG. | |||
577 | int NodeId = -1; | |||
578 | ||||
579 | /// The values that are used by this operation. | |||
580 | SDUse *OperandList = nullptr; | |||
581 | ||||
582 | /// The types of the values this node defines. SDNode's may | |||
583 | /// define multiple values simultaneously. | |||
584 | const EVT *ValueList; | |||
585 | ||||
586 | /// List of uses for this SDNode. | |||
587 | SDUse *UseList = nullptr; | |||
588 | ||||
589 | /// The number of entries in the Operand/Value list. | |||
590 | unsigned short NumOperands = 0; | |||
591 | unsigned short NumValues; | |||
592 | ||||
593 | // The ordering of the SDNodes. It roughly corresponds to the ordering of the | |||
594 | // original LLVM instructions. | |||
595 | // This is used for turning off scheduling, because we'll forgo | |||
596 | // the normal scheduling algorithms and output the instructions according to | |||
597 | // this ordering. | |||
598 | unsigned IROrder; | |||
599 | ||||
600 | /// Source line information. | |||
601 | DebugLoc debugLoc; | |||
602 | ||||
603 | /// Return a pointer to the specified value type. | |||
604 | static const EVT *getValueTypeList(EVT VT); | |||
605 | ||||
606 | SDNodeFlags Flags; | |||
607 | ||||
608 | public: | |||
609 | /// Unique and persistent id per SDNode in the DAG. | |||
610 | /// Used for debug printing. | |||
611 | uint16_t PersistentId; | |||
612 | ||||
613 | //===--------------------------------------------------------------------===// | |||
614 | // Accessors | |||
615 | // | |||
616 | ||||
617 | /// Return the SelectionDAG opcode value for this node. For | |||
618 | /// pre-isel nodes (those for which isMachineOpcode returns false), these | |||
619 | /// are the opcode values in the ISD and <target>ISD namespaces. For | |||
620 | /// post-isel opcodes, see getMachineOpcode. | |||
621 | unsigned getOpcode() const { return (unsigned short)NodeType; } | |||
622 | ||||
623 | /// Test if this node has a target-specific opcode (in the | |||
624 | /// \<target\>ISD namespace). | |||
625 | bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; } | |||
626 | ||||
627 | /// Test if this node has a target-specific opcode that may raise | |||
628 | /// FP exceptions (in the \<target\>ISD namespace and greater than | |||
629 | /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory | |||
630 | /// opcode are currently automatically considered to possibly raise | |||
631 | /// FP exceptions as well. | |||
632 | bool isTargetStrictFPOpcode() const { | |||
633 | return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE; | |||
634 | } | |||
635 | ||||
636 | /// Test if this node has a target-specific | |||
637 | /// memory-referencing opcode (in the \<target\>ISD namespace and | |||
638 | /// greater than FIRST_TARGET_MEMORY_OPCODE). | |||
639 | bool isTargetMemoryOpcode() const { | |||
640 | return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE; | |||
641 | } | |||
642 | ||||
643 | /// Return true if the type of the node type undefined. | |||
644 | bool isUndef() const { return NodeType == ISD::UNDEF; } | |||
645 | ||||
646 | /// Test if this node is a memory intrinsic (with valid pointer information). | |||
647 | /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for | |||
648 | /// non-memory intrinsics (with chains) that are not really instances of | |||
649 | /// MemSDNode. For such nodes, we need some extra state to determine the | |||
650 | /// proper classof relationship. | |||
651 | bool isMemIntrinsic() const { | |||
652 | return (NodeType == ISD::INTRINSIC_W_CHAIN || | |||
653 | NodeType == ISD::INTRINSIC_VOID) && | |||
654 | SDNodeBits.IsMemIntrinsic; | |||
655 | } | |||
656 | ||||
657 | /// Test if this node is a strict floating point pseudo-op. | |||
658 | bool isStrictFPOpcode() { | |||
659 | switch (NodeType) { | |||
660 | default: | |||
661 | return false; | |||
662 | case ISD::STRICT_FP16_TO_FP: | |||
663 | case ISD::STRICT_FP_TO_FP16: | |||
664 | #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ | |||
665 | case ISD::STRICT_##DAGN: | |||
666 | #include "llvm/IR/ConstrainedOps.def" | |||
667 | return true; | |||
668 | } | |||
669 | } | |||
670 | ||||
671 | /// Test if this node has a post-isel opcode, directly | |||
672 | /// corresponding to a MachineInstr opcode. | |||
673 | bool isMachineOpcode() const { return NodeType < 0; } | |||
674 | ||||
675 | /// This may only be called if isMachineOpcode returns | |||
676 | /// true. It returns the MachineInstr opcode value that the node's opcode | |||
677 | /// corresponds to. | |||
678 | unsigned getMachineOpcode() const { | |||
679 | assert(isMachineOpcode() && "Not a MachineInstr opcode!")(static_cast <bool> (isMachineOpcode() && "Not a MachineInstr opcode!" ) ? void (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 679, __extension__ __PRETTY_FUNCTION__)); | |||
680 | return ~NodeType; | |||
681 | } | |||
682 | ||||
683 | bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; } | |||
684 | void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; } | |||
685 | ||||
686 | bool isDivergent() const { return SDNodeBits.IsDivergent; } | |||
687 | ||||
688 | /// Return true if there are no uses of this node. | |||
689 | bool use_empty() const { return UseList == nullptr; } | |||
690 | ||||
691 | /// Return true if there is exactly one use of this node. | |||
692 | bool hasOneUse() const { return hasSingleElement(uses()); } | |||
693 | ||||
694 | /// Return the number of uses of this node. This method takes | |||
695 | /// time proportional to the number of uses. | |||
696 | size_t use_size() const { return std::distance(use_begin(), use_end()); } | |||
697 | ||||
698 | /// Return the unique node id. | |||
699 | int getNodeId() const { return NodeId; } | |||
700 | ||||
701 | /// Set unique node id. | |||
702 | void setNodeId(int Id) { NodeId = Id; } | |||
703 | ||||
704 | /// Return the node ordering. | |||
705 | unsigned getIROrder() const { return IROrder; } | |||
706 | ||||
707 | /// Set the node ordering. | |||
708 | void setIROrder(unsigned Order) { IROrder = Order; } | |||
709 | ||||
710 | /// Return the source location info. | |||
711 | const DebugLoc &getDebugLoc() const { return debugLoc; } | |||
712 | ||||
713 | /// Set source location info. Try to avoid this, putting | |||
714 | /// it in the constructor is preferable. | |||
715 | void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); } | |||
716 | ||||
717 | /// This class provides iterator support for SDUse | |||
718 | /// operands that use a specific SDNode. | |||
719 | class use_iterator { | |||
720 | friend class SDNode; | |||
721 | ||||
722 | SDUse *Op = nullptr; | |||
723 | ||||
724 | explicit use_iterator(SDUse *op) : Op(op) {} | |||
725 | ||||
726 | public: | |||
727 | using iterator_category = std::forward_iterator_tag; | |||
728 | using value_type = SDUse; | |||
729 | using difference_type = std::ptrdiff_t; | |||
730 | using pointer = value_type *; | |||
731 | using reference = value_type &; | |||
732 | ||||
733 | use_iterator() = default; | |||
734 | use_iterator(const use_iterator &I) : Op(I.Op) {} | |||
735 | ||||
736 | bool operator==(const use_iterator &x) const { | |||
737 | return Op == x.Op; | |||
738 | } | |||
739 | bool operator!=(const use_iterator &x) const { | |||
740 | return !operator==(x); | |||
741 | } | |||
742 | ||||
743 | /// Return true if this iterator is at the end of uses list. | |||
744 | bool atEnd() const { return Op == nullptr; } | |||
745 | ||||
746 | // Iterator traversal: forward iteration only. | |||
747 | use_iterator &operator++() { // Preincrement | |||
748 | assert(Op && "Cannot increment end iterator!")(static_cast <bool> (Op && "Cannot increment end iterator!" ) ? void (0) : __assert_fail ("Op && \"Cannot increment end iterator!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 748, __extension__ __PRETTY_FUNCTION__)); | |||
749 | Op = Op->getNext(); | |||
750 | return *this; | |||
751 | } | |||
752 | ||||
753 | use_iterator operator++(int) { // Postincrement | |||
754 | use_iterator tmp = *this; ++*this; return tmp; | |||
755 | } | |||
756 | ||||
757 | /// Retrieve a pointer to the current user node. | |||
758 | SDNode *operator*() const { | |||
759 | assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!" ) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 759, __extension__ __PRETTY_FUNCTION__)); | |||
760 | return Op->getUser(); | |||
761 | } | |||
762 | ||||
763 | SDNode *operator->() const { return operator*(); } | |||
764 | ||||
765 | SDUse &getUse() const { return *Op; } | |||
766 | ||||
767 | /// Retrieve the operand # of this use in its user. | |||
768 | unsigned getOperandNo() const { | |||
769 | assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!" ) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 769, __extension__ __PRETTY_FUNCTION__)); | |||
770 | return (unsigned)(Op - Op->getUser()->OperandList); | |||
771 | } | |||
772 | }; | |||
773 | ||||
774 | /// Provide iteration support to walk over all uses of an SDNode. | |||
775 | use_iterator use_begin() const { | |||
776 | return use_iterator(UseList); | |||
777 | } | |||
778 | ||||
779 | static use_iterator use_end() { return use_iterator(nullptr); } | |||
780 | ||||
781 | inline iterator_range<use_iterator> uses() { | |||
782 | return make_range(use_begin(), use_end()); | |||
783 | } | |||
784 | inline iterator_range<use_iterator> uses() const { | |||
785 | return make_range(use_begin(), use_end()); | |||
786 | } | |||
787 | ||||
788 | /// Return true if there are exactly NUSES uses of the indicated value. | |||
789 | /// This method ignores uses of other values defined by this operation. | |||
790 | bool hasNUsesOfValue(unsigned NUses, unsigned Value) const; | |||
791 | ||||
792 | /// Return true if there are any use of the indicated value. | |||
793 | /// This method ignores uses of other values defined by this operation. | |||
794 | bool hasAnyUseOfValue(unsigned Value) const; | |||
795 | ||||
796 | /// Return true if this node is the only use of N. | |||
797 | bool isOnlyUserOf(const SDNode *N) const; | |||
798 | ||||
799 | /// Return true if this node is an operand of N. | |||
800 | bool isOperandOf(const SDNode *N) const; | |||
801 | ||||
802 | /// Return true if this node is a predecessor of N. | |||
803 | /// NOTE: Implemented on top of hasPredecessor and every bit as | |||
804 | /// expensive. Use carefully. | |||
805 | bool isPredecessorOf(const SDNode *N) const { | |||
806 | return N->hasPredecessor(this); | |||
807 | } | |||
808 | ||||
809 | /// Return true if N is a predecessor of this node. | |||
810 | /// N is either an operand of this node, or can be reached by recursively | |||
811 | /// traversing up the operands. | |||
812 | /// NOTE: This is an expensive method. Use it carefully. | |||
813 | bool hasPredecessor(const SDNode *N) const; | |||
814 | ||||
815 | /// Returns true if N is a predecessor of any node in Worklist. This | |||
816 | /// helper keeps Visited and Worklist sets externally to allow unions | |||
817 | /// searches to be performed in parallel, caching of results across | |||
818 | /// queries and incremental addition to Worklist. Stops early if N is | |||
819 | /// found but will resume. Remember to clear Visited and Worklists | |||
820 | /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before | |||
821 | /// giving up. The TopologicalPrune flag signals that positive NodeIds are | |||
822 | /// topologically ordered (Operands have strictly smaller node id) and search | |||
823 | /// can be pruned leveraging this. | |||
824 | static bool hasPredecessorHelper(const SDNode *N, | |||
825 | SmallPtrSetImpl<const SDNode *> &Visited, | |||
826 | SmallVectorImpl<const SDNode *> &Worklist, | |||
827 | unsigned int MaxSteps = 0, | |||
828 | bool TopologicalPrune = false) { | |||
829 | SmallVector<const SDNode *, 8> DeferredNodes; | |||
830 | if (Visited.count(N)) | |||
831 | return true; | |||
832 | ||||
833 | // Node Id's are assigned in three places: As a topological | |||
834 | // ordering (> 0), during legalization (results in values set to | |||
835 | // 0), new nodes (set to -1). If N has a topolgical id then we | |||
836 | // know that all nodes with ids smaller than it cannot be | |||
837 | // successors and we need not check them. Filter out all node | |||
838 | // that can't be matches. We add them to the worklist before exit | |||
839 | // in case of multiple calls. Note that during selection the topological id | |||
840 | // may be violated if a node's predecessor is selected before it. We mark | |||
841 | // this at selection negating the id of unselected successors and | |||
842 | // restricting topological pruning to positive ids. | |||
843 | ||||
844 | int NId = N->getNodeId(); | |||
845 | // If we Invalidated the Id, reconstruct original NId. | |||
846 | if (NId < -1) | |||
847 | NId = -(NId + 1); | |||
848 | ||||
849 | bool Found = false; | |||
850 | while (!Worklist.empty()) { | |||
851 | const SDNode *M = Worklist.pop_back_val(); | |||
852 | int MId = M->getNodeId(); | |||
853 | if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) && | |||
854 | (MId > 0) && (MId < NId)) { | |||
855 | DeferredNodes.push_back(M); | |||
856 | continue; | |||
857 | } | |||
858 | for (const SDValue &OpV : M->op_values()) { | |||
859 | SDNode *Op = OpV.getNode(); | |||
860 | if (Visited.insert(Op).second) | |||
861 | Worklist.push_back(Op); | |||
862 | if (Op == N) | |||
863 | Found = true; | |||
864 | } | |||
865 | if (Found) | |||
866 | break; | |||
867 | if (MaxSteps != 0 && Visited.size() >= MaxSteps) | |||
868 | break; | |||
869 | } | |||
870 | // Push deferred nodes back on worklist. | |||
871 | Worklist.append(DeferredNodes.begin(), DeferredNodes.end()); | |||
872 | // If we bailed early, conservatively return found. | |||
873 | if (MaxSteps != 0 && Visited.size() >= MaxSteps) | |||
874 | return true; | |||
875 | return Found; | |||
876 | } | |||
877 | ||||
878 | /// Return true if all the users of N are contained in Nodes. | |||
879 | /// NOTE: Requires at least one match, but doesn't require them all. | |||
880 | static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N); | |||
881 | ||||
882 | /// Return the number of values used by this operation. | |||
883 | unsigned getNumOperands() const { return NumOperands; } | |||
884 | ||||
885 | /// Return the maximum number of operands that a SDNode can hold. | |||
886 | static constexpr size_t getMaxNumOperands() { | |||
887 | return std::numeric_limits<decltype(SDNode::NumOperands)>::max(); | |||
888 | } | |||
889 | ||||
890 | /// Helper method returns the integer value of a ConstantSDNode operand. | |||
891 | inline uint64_t getConstantOperandVal(unsigned Num) const; | |||
892 | ||||
893 | /// Helper method returns the APInt of a ConstantSDNode operand. | |||
894 | inline const APInt &getConstantOperandAPInt(unsigned Num) const; | |||
895 | ||||
896 | const SDValue &getOperand(unsigned Num) const { | |||
897 | assert(Num < NumOperands && "Invalid child # of SDNode!")(static_cast <bool> (Num < NumOperands && "Invalid child # of SDNode!" ) ? void (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 897, __extension__ __PRETTY_FUNCTION__)); | |||
898 | return OperandList[Num]; | |||
899 | } | |||
900 | ||||
901 | using op_iterator = SDUse *; | |||
902 | ||||
903 | op_iterator op_begin() const { return OperandList; } | |||
904 | op_iterator op_end() const { return OperandList+NumOperands; } | |||
905 | ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); } | |||
906 | ||||
907 | /// Iterator for directly iterating over the operand SDValue's. | |||
908 | struct value_op_iterator | |||
909 | : iterator_adaptor_base<value_op_iterator, op_iterator, | |||
910 | std::random_access_iterator_tag, SDValue, | |||
911 | ptrdiff_t, value_op_iterator *, | |||
912 | value_op_iterator *> { | |||
913 | explicit value_op_iterator(SDUse *U = nullptr) | |||
914 | : iterator_adaptor_base(U) {} | |||
915 | ||||
916 | const SDValue &operator*() const { return I->get(); } | |||
917 | }; | |||
918 | ||||
919 | iterator_range<value_op_iterator> op_values() const { | |||
920 | return make_range(value_op_iterator(op_begin()), | |||
921 | value_op_iterator(op_end())); | |||
922 | } | |||
923 | ||||
924 | SDVTList getVTList() const { | |||
925 | SDVTList X = { ValueList, NumValues }; | |||
926 | return X; | |||
927 | } | |||
928 | ||||
929 | /// If this node has a glue operand, return the node | |||
930 | /// to which the glue operand points. Otherwise return NULL. | |||
931 | SDNode *getGluedNode() const { | |||
932 | if (getNumOperands() != 0 && | |||
933 | getOperand(getNumOperands()-1).getValueType() == MVT::Glue) | |||
934 | return getOperand(getNumOperands()-1).getNode(); | |||
935 | return nullptr; | |||
936 | } | |||
937 | ||||
938 | /// If this node has a glue value with a user, return | |||
939 | /// the user (there is at most one). Otherwise return NULL. | |||
940 | SDNode *getGluedUser() const { | |||
941 | for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI) | |||
942 | if (UI.getUse().get().getValueType() == MVT::Glue) | |||
943 | return *UI; | |||
944 | return nullptr; | |||
945 | } | |||
946 | ||||
947 | SDNodeFlags getFlags() const { return Flags; } | |||
948 | void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; } | |||
949 | ||||
950 | /// Clear any flags in this node that aren't also set in Flags. | |||
951 | /// If Flags is not in a defined state then this has no effect. | |||
952 | void intersectFlagsWith(const SDNodeFlags Flags); | |||
953 | ||||
954 | /// Return the number of values defined/returned by this operator. | |||
955 | unsigned getNumValues() const { return NumValues; } | |||
956 | ||||
957 | /// Return the type of a specified result. | |||
958 | EVT getValueType(unsigned ResNo) const { | |||
959 | assert(ResNo < NumValues && "Illegal result number!")(static_cast <bool> (ResNo < NumValues && "Illegal result number!" ) ? void (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 959, __extension__ __PRETTY_FUNCTION__)); | |||
960 | return ValueList[ResNo]; | |||
961 | } | |||
962 | ||||
963 | /// Return the type of a specified result as a simple type. | |||
964 | MVT getSimpleValueType(unsigned ResNo) const { | |||
965 | return getValueType(ResNo).getSimpleVT(); | |||
966 | } | |||
967 | ||||
968 | /// Returns MVT::getSizeInBits(getValueType(ResNo)). | |||
969 | /// | |||
970 | /// If the value type is a scalable vector type, the scalable property will | |||
971 | /// be set and the runtime size will be a positive integer multiple of the | |||
972 | /// base size. | |||
973 | TypeSize getValueSizeInBits(unsigned ResNo) const { | |||
974 | return getValueType(ResNo).getSizeInBits(); | |||
975 | } | |||
976 | ||||
977 | using value_iterator = const EVT *; | |||
978 | ||||
979 | value_iterator value_begin() const { return ValueList; } | |||
980 | value_iterator value_end() const { return ValueList+NumValues; } | |||
981 | iterator_range<value_iterator> values() const { | |||
982 | return llvm::make_range(value_begin(), value_end()); | |||
983 | } | |||
984 | ||||
985 | /// Return the opcode of this operation for printing. | |||
986 | std::string getOperationName(const SelectionDAG *G = nullptr) const; | |||
987 | static const char* getIndexedModeName(ISD::MemIndexedMode AM); | |||
988 | void print_types(raw_ostream &OS, const SelectionDAG *G) const; | |||
989 | void print_details(raw_ostream &OS, const SelectionDAG *G) const; | |||
990 | void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const; | |||
991 | void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const; | |||
992 | ||||
993 | /// Print a SelectionDAG node and all children down to | |||
994 | /// the leaves. The given SelectionDAG allows target-specific nodes | |||
995 | /// to be printed in human-readable form. Unlike printr, this will | |||
996 | /// print the whole DAG, including children that appear multiple | |||
997 | /// times. | |||
998 | /// | |||
999 | void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const; | |||
1000 | ||||
1001 | /// Print a SelectionDAG node and children up to | |||
1002 | /// depth "depth." The given SelectionDAG allows target-specific | |||
1003 | /// nodes to be printed in human-readable form. Unlike printr, this | |||
1004 | /// will print children that appear multiple times wherever they are | |||
1005 | /// used. | |||
1006 | /// | |||
1007 | void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr, | |||
1008 | unsigned depth = 100) const; | |||
1009 | ||||
1010 | /// Dump this node, for debugging. | |||
1011 | void dump() const; | |||
1012 | ||||
1013 | /// Dump (recursively) this node and its use-def subgraph. | |||
1014 | void dumpr() const; | |||
1015 | ||||
1016 | /// Dump this node, for debugging. | |||
1017 | /// The given SelectionDAG allows target-specific nodes to be printed | |||
1018 | /// in human-readable form. | |||
1019 | void dump(const SelectionDAG *G) const; | |||
1020 | ||||
1021 | /// Dump (recursively) this node and its use-def subgraph. | |||
1022 | /// The given SelectionDAG allows target-specific nodes to be printed | |||
1023 | /// in human-readable form. | |||
1024 | void dumpr(const SelectionDAG *G) const; | |||
1025 | ||||
1026 | /// printrFull to dbgs(). The given SelectionDAG allows | |||
1027 | /// target-specific nodes to be printed in human-readable form. | |||
1028 | /// Unlike dumpr, this will print the whole DAG, including children | |||
1029 | /// that appear multiple times. | |||
1030 | void dumprFull(const SelectionDAG *G = nullptr) const; | |||
1031 | ||||
1032 | /// printrWithDepth to dbgs(). The given | |||
1033 | /// SelectionDAG allows target-specific nodes to be printed in | |||
1034 | /// human-readable form. Unlike dumpr, this will print children | |||
1035 | /// that appear multiple times wherever they are used. | |||
1036 | /// | |||
1037 | void dumprWithDepth(const SelectionDAG *G = nullptr, | |||
1038 | unsigned depth = 100) const; | |||
1039 | ||||
1040 | /// Gather unique data for the node. | |||
1041 | void Profile(FoldingSetNodeID &ID) const; | |||
1042 | ||||
1043 | /// This method should only be used by the SDUse class. | |||
1044 | void addUse(SDUse &U) { U.addToList(&UseList); } | |||
1045 | ||||
1046 | protected: | |||
1047 | static SDVTList getSDVTList(EVT VT) { | |||
1048 | SDVTList Ret = { getValueTypeList(VT), 1 }; | |||
1049 | return Ret; | |||
1050 | } | |||
1051 | ||||
1052 | /// Create an SDNode. | |||
1053 | /// | |||
1054 | /// SDNodes are created without any operands, and never own the operand | |||
1055 | /// storage. To add operands, see SelectionDAG::createOperands. | |||
1056 | SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs) | |||
1057 | : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs), | |||
1058 | IROrder(Order), debugLoc(std::move(dl)) { | |||
1059 | memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits)); | |||
1060 | assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")(static_cast <bool> (debugLoc.hasTrivialDestructor() && "Expected trivial destructor") ? void (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1060, __extension__ __PRETTY_FUNCTION__)); | |||
1061 | assert(NumValues == VTs.NumVTs &&(static_cast <bool> (NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!") ? void (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1062, __extension__ __PRETTY_FUNCTION__)) | |||
1062 | "NumValues wasn't wide enough for its operands!")(static_cast <bool> (NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!") ? void (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1062, __extension__ __PRETTY_FUNCTION__)); | |||
1063 | } | |||
1064 | ||||
1065 | /// Release the operands and set this node to have zero operands. | |||
1066 | void DropOperands(); | |||
1067 | }; | |||
1068 | ||||
1069 | /// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed | |||
1070 | /// into SDNode creation functions. | |||
1071 | /// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted | |||
1072 | /// from the original Instruction, and IROrder is the ordinal position of | |||
1073 | /// the instruction. | |||
1074 | /// When an SDNode is created after the DAG is being built, both DebugLoc and | |||
1075 | /// the IROrder are propagated from the original SDNode. | |||
1076 | /// So SDLoc class provides two constructors besides the default one, one to | |||
1077 | /// be used by the DAGBuilder, the other to be used by others. | |||
1078 | class SDLoc { | |||
1079 | private: | |||
1080 | DebugLoc DL; | |||
1081 | int IROrder = 0; | |||
1082 | ||||
1083 | public: | |||
1084 | SDLoc() = default; | |||
1085 | SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {} | |||
1086 | SDLoc(const SDValue V) : SDLoc(V.getNode()) {} | |||
1087 | SDLoc(const Instruction *I, int Order) : IROrder(Order) { | |||
1088 | assert(Order >= 0 && "bad IROrder")(static_cast <bool> (Order >= 0 && "bad IROrder" ) ? void (0) : __assert_fail ("Order >= 0 && \"bad IROrder\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1088, __extension__ __PRETTY_FUNCTION__)); | |||
1089 | if (I) | |||
1090 | DL = I->getDebugLoc(); | |||
1091 | } | |||
1092 | ||||
1093 | unsigned getIROrder() const { return IROrder; } | |||
1094 | const DebugLoc &getDebugLoc() const { return DL; } | |||
1095 | }; | |||
1096 | ||||
1097 | // Define inline functions from the SDValue class. | |||
1098 | ||||
1099 | inline SDValue::SDValue(SDNode *node, unsigned resno) | |||
1100 | : Node(node), ResNo(resno) { | |||
1101 | // Explicitly check for !ResNo to avoid use-after-free, because there are | |||
1102 | // callers that use SDValue(N, 0) with a deleted N to indicate successful | |||
1103 | // combines. | |||
1104 | assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(static_cast <bool> ((!Node || !ResNo || ResNo < Node ->getNumValues()) && "Invalid result number for the given node!" ) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1105, __extension__ __PRETTY_FUNCTION__)) | |||
1105 | "Invalid result number for the given node!")(static_cast <bool> ((!Node || !ResNo || ResNo < Node ->getNumValues()) && "Invalid result number for the given node!" ) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1105, __extension__ __PRETTY_FUNCTION__)); | |||
1106 | assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")(static_cast <bool> (ResNo < -2U && "Cannot use result numbers reserved for DenseMaps." ) ? void (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1106, __extension__ __PRETTY_FUNCTION__)); | |||
1107 | } | |||
1108 | ||||
1109 | inline unsigned SDValue::getOpcode() const { | |||
1110 | return Node->getOpcode(); | |||
1111 | } | |||
1112 | ||||
1113 | inline EVT SDValue::getValueType() const { | |||
1114 | return Node->getValueType(ResNo); | |||
| ||||
1115 | } | |||
1116 | ||||
1117 | inline unsigned SDValue::getNumOperands() const { | |||
1118 | return Node->getNumOperands(); | |||
1119 | } | |||
1120 | ||||
1121 | inline const SDValue &SDValue::getOperand(unsigned i) const { | |||
1122 | return Node->getOperand(i); | |||
1123 | } | |||
1124 | ||||
1125 | inline uint64_t SDValue::getConstantOperandVal(unsigned i) const { | |||
1126 | return Node->getConstantOperandVal(i); | |||
1127 | } | |||
1128 | ||||
1129 | inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const { | |||
1130 | return Node->getConstantOperandAPInt(i); | |||
1131 | } | |||
1132 | ||||
1133 | inline bool SDValue::isTargetOpcode() const { | |||
1134 | return Node->isTargetOpcode(); | |||
1135 | } | |||
1136 | ||||
1137 | inline bool SDValue::isTargetMemoryOpcode() const { | |||
1138 | return Node->isTargetMemoryOpcode(); | |||
1139 | } | |||
1140 | ||||
1141 | inline bool SDValue::isMachineOpcode() const { | |||
1142 | return Node->isMachineOpcode(); | |||
1143 | } | |||
1144 | ||||
1145 | inline unsigned SDValue::getMachineOpcode() const { | |||
1146 | return Node->getMachineOpcode(); | |||
1147 | } | |||
1148 | ||||
1149 | inline bool SDValue::isUndef() const { | |||
1150 | return Node->isUndef(); | |||
1151 | } | |||
1152 | ||||
1153 | inline bool SDValue::use_empty() const { | |||
1154 | return !Node->hasAnyUseOfValue(ResNo); | |||
1155 | } | |||
1156 | ||||
1157 | inline bool SDValue::hasOneUse() const { | |||
1158 | return Node->hasNUsesOfValue(1, ResNo); | |||
1159 | } | |||
1160 | ||||
1161 | inline const DebugLoc &SDValue::getDebugLoc() const { | |||
1162 | return Node->getDebugLoc(); | |||
1163 | } | |||
1164 | ||||
1165 | inline void SDValue::dump() const { | |||
1166 | return Node->dump(); | |||
1167 | } | |||
1168 | ||||
1169 | inline void SDValue::dump(const SelectionDAG *G) const { | |||
1170 | return Node->dump(G); | |||
1171 | } | |||
1172 | ||||
1173 | inline void SDValue::dumpr() const { | |||
1174 | return Node->dumpr(); | |||
1175 | } | |||
1176 | ||||
1177 | inline void SDValue::dumpr(const SelectionDAG *G) const { | |||
1178 | return Node->dumpr(G); | |||
1179 | } | |||
1180 | ||||
1181 | // Define inline functions from the SDUse class. | |||
1182 | ||||
1183 | inline void SDUse::set(const SDValue &V) { | |||
1184 | if (Val.getNode()) removeFromList(); | |||
1185 | Val = V; | |||
1186 | if (V.getNode()) V.getNode()->addUse(*this); | |||
1187 | } | |||
1188 | ||||
1189 | inline void SDUse::setInitial(const SDValue &V) { | |||
1190 | Val = V; | |||
1191 | V.getNode()->addUse(*this); | |||
1192 | } | |||
1193 | ||||
1194 | inline void SDUse::setNode(SDNode *N) { | |||
1195 | if (Val.getNode()) removeFromList(); | |||
1196 | Val.setNode(N); | |||
1197 | if (N) N->addUse(*this); | |||
1198 | } | |||
1199 | ||||
1200 | /// This class is used to form a handle around another node that | |||
1201 | /// is persistent and is updated across invocations of replaceAllUsesWith on its | |||
1202 | /// operand. This node should be directly created by end-users and not added to | |||
1203 | /// the AllNodes list. | |||
1204 | class HandleSDNode : public SDNode { | |||
1205 | SDUse Op; | |||
1206 | ||||
1207 | public: | |||
1208 | explicit HandleSDNode(SDValue X) | |||
1209 | : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) { | |||
1210 | // HandleSDNodes are never inserted into the DAG, so they won't be | |||
1211 | // auto-numbered. Use ID 65535 as a sentinel. | |||
1212 | PersistentId = 0xffff; | |||
1213 | ||||
1214 | // Manually set up the operand list. This node type is special in that it's | |||
1215 | // always stack allocated and SelectionDAG does not manage its operands. | |||
1216 | // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not | |||
1217 | // be so special. | |||
1218 | Op.setUser(this); | |||
1219 | Op.setInitial(X); | |||
1220 | NumOperands = 1; | |||
1221 | OperandList = &Op; | |||
1222 | } | |||
1223 | ~HandleSDNode(); | |||
1224 | ||||
1225 | const SDValue &getValue() const { return Op; } | |||
1226 | }; | |||
1227 | ||||
1228 | class AddrSpaceCastSDNode : public SDNode { | |||
1229 | private: | |||
1230 | unsigned SrcAddrSpace; | |||
1231 | unsigned DestAddrSpace; | |||
1232 | ||||
1233 | public: | |||
1234 | AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT, | |||
1235 | unsigned SrcAS, unsigned DestAS); | |||
1236 | ||||
1237 | unsigned getSrcAddressSpace() const { return SrcAddrSpace; } | |||
1238 | unsigned getDestAddressSpace() const { return DestAddrSpace; } | |||
1239 | ||||
1240 | static bool classof(const SDNode *N) { | |||
1241 | return N->getOpcode() == ISD::ADDRSPACECAST; | |||
1242 | } | |||
1243 | }; | |||
1244 | ||||
1245 | /// This is an abstract virtual class for memory operations. | |||
1246 | class MemSDNode : public SDNode { | |||
1247 | private: | |||
1248 | // VT of in-memory value. | |||
1249 | EVT MemoryVT; | |||
1250 | ||||
1251 | protected: | |||
1252 | /// Memory reference information. | |||
1253 | MachineMemOperand *MMO; | |||
1254 | ||||
1255 | public: | |||
1256 | MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
1257 | EVT memvt, MachineMemOperand *MMO); | |||
1258 | ||||
1259 | bool readMem() const { return MMO->isLoad(); } | |||
1260 | bool writeMem() const { return MMO->isStore(); } | |||
1261 | ||||
1262 | /// Returns alignment and volatility of the memory access | |||
1263 | Align getOriginalAlign() const { return MMO->getBaseAlign(); } | |||
1264 | Align getAlign() const { return MMO->getAlign(); } | |||
1265 | // FIXME: Remove once transition to getAlign is over. | |||
1266 | unsigned getAlignment() const { return MMO->getAlign().value(); } | |||
1267 | ||||
1268 | /// Return the SubclassData value, without HasDebugValue. This contains an | |||
1269 | /// encoding of the volatile flag, as well as bits used by subclasses. This | |||
1270 | /// function should only be used to compute a FoldingSetNodeID value. | |||
1271 | /// The HasDebugValue bit is masked out because CSE map needs to match | |||
1272 | /// nodes with debug info with nodes without debug info. Same is about | |||
1273 | /// isDivergent bit. | |||
1274 | unsigned getRawSubclassData() const { | |||
1275 | uint16_t Data; | |||
1276 | union { | |||
1277 | char RawSDNodeBits[sizeof(uint16_t)]; | |||
1278 | SDNodeBitfields SDNodeBits; | |||
1279 | }; | |||
1280 | memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits)); | |||
1281 | SDNodeBits.HasDebugValue = 0; | |||
1282 | SDNodeBits.IsDivergent = false; | |||
1283 | memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits)); | |||
1284 | return Data; | |||
1285 | } | |||
1286 | ||||
1287 | bool isVolatile() const { return MemSDNodeBits.IsVolatile; } | |||
1288 | bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; } | |||
1289 | bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; } | |||
1290 | bool isInvariant() const { return MemSDNodeBits.IsInvariant; } | |||
1291 | ||||
1292 | // Returns the offset from the location of the access. | |||
1293 | int64_t getSrcValueOffset() const { return MMO->getOffset(); } | |||
1294 | ||||
1295 | /// Returns the AA info that describes the dereference. | |||
1296 | AAMDNodes getAAInfo() const { return MMO->getAAInfo(); } | |||
1297 | ||||
1298 | /// Returns the Ranges that describes the dereference. | |||
1299 | const MDNode *getRanges() const { return MMO->getRanges(); } | |||
1300 | ||||
1301 | /// Returns the synchronization scope ID for this memory operation. | |||
1302 | SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); } | |||
1303 | ||||
1304 | /// Return the atomic ordering requirements for this memory operation. For | |||
1305 | /// cmpxchg atomic operations, return the atomic ordering requirements when | |||
1306 | /// store occurs. | |||
1307 | AtomicOrdering getSuccessOrdering() const { | |||
1308 | return MMO->getSuccessOrdering(); | |||
1309 | } | |||
1310 | ||||
1311 | /// Return a single atomic ordering that is at least as strong as both the | |||
1312 | /// success and failure orderings for an atomic operation. (For operations | |||
1313 | /// other than cmpxchg, this is equivalent to getSuccessOrdering().) | |||
1314 | AtomicOrdering getMergedOrdering() const { return MMO->getMergedOrdering(); } | |||
1315 | ||||
1316 | /// Return true if the memory operation ordering is Unordered or higher. | |||
1317 | bool isAtomic() const { return MMO->isAtomic(); } | |||
1318 | ||||
1319 | /// Returns true if the memory operation doesn't imply any ordering | |||
1320 | /// constraints on surrounding memory operations beyond the normal memory | |||
1321 | /// aliasing rules. | |||
1322 | bool isUnordered() const { return MMO->isUnordered(); } | |||
1323 | ||||
1324 | /// Returns true if the memory operation is neither atomic or volatile. | |||
1325 | bool isSimple() const { return !isAtomic() && !isVolatile(); } | |||
1326 | ||||
1327 | /// Return the type of the in-memory value. | |||
1328 | EVT getMemoryVT() const { return MemoryVT; } | |||
1329 | ||||
1330 | /// Return a MachineMemOperand object describing the memory | |||
1331 | /// reference performed by operation. | |||
1332 | MachineMemOperand *getMemOperand() const { return MMO; } | |||
1333 | ||||
1334 | const MachinePointerInfo &getPointerInfo() const { | |||
1335 | return MMO->getPointerInfo(); | |||
1336 | } | |||
1337 | ||||
1338 | /// Return the address space for the associated pointer | |||
1339 | unsigned getAddressSpace() const { | |||
1340 | return getPointerInfo().getAddrSpace(); | |||
1341 | } | |||
1342 | ||||
1343 | /// Update this MemSDNode's MachineMemOperand information | |||
1344 | /// to reflect the alignment of NewMMO, if it has a greater alignment. | |||
1345 | /// This must only be used when the new alignment applies to all users of | |||
1346 | /// this MachineMemOperand. | |||
1347 | void refineAlignment(const MachineMemOperand *NewMMO) { | |||
1348 | MMO->refineAlignment(NewMMO); | |||
1349 | } | |||
1350 | ||||
1351 | const SDValue &getChain() const { return getOperand(0); } | |||
1352 | ||||
1353 | const SDValue &getBasePtr() const { | |||
1354 | switch (getOpcode()) { | |||
1355 | case ISD::STORE: | |||
1356 | case ISD::MSTORE: | |||
1357 | return getOperand(2); | |||
1358 | case ISD::MGATHER: | |||
1359 | case ISD::MSCATTER: | |||
1360 | return getOperand(3); | |||
1361 | default: | |||
1362 | return getOperand(1); | |||
1363 | } | |||
1364 | } | |||
1365 | ||||
1366 | // Methods to support isa and dyn_cast | |||
1367 | static bool classof(const SDNode *N) { | |||
1368 | // For some targets, we lower some target intrinsics to a MemIntrinsicNode | |||
1369 | // with either an intrinsic or a target opcode. | |||
1370 | switch (N->getOpcode()) { | |||
1371 | case ISD::LOAD: | |||
1372 | case ISD::STORE: | |||
1373 | case ISD::PREFETCH: | |||
1374 | case ISD::ATOMIC_CMP_SWAP: | |||
1375 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: | |||
1376 | case ISD::ATOMIC_SWAP: | |||
1377 | case ISD::ATOMIC_LOAD_ADD: | |||
1378 | case ISD::ATOMIC_LOAD_SUB: | |||
1379 | case ISD::ATOMIC_LOAD_AND: | |||
1380 | case ISD::ATOMIC_LOAD_CLR: | |||
1381 | case ISD::ATOMIC_LOAD_OR: | |||
1382 | case ISD::ATOMIC_LOAD_XOR: | |||
1383 | case ISD::ATOMIC_LOAD_NAND: | |||
1384 | case ISD::ATOMIC_LOAD_MIN: | |||
1385 | case ISD::ATOMIC_LOAD_MAX: | |||
1386 | case ISD::ATOMIC_LOAD_UMIN: | |||
1387 | case ISD::ATOMIC_LOAD_UMAX: | |||
1388 | case ISD::ATOMIC_LOAD_FADD: | |||
1389 | case ISD::ATOMIC_LOAD_FSUB: | |||
1390 | case ISD::ATOMIC_LOAD: | |||
1391 | case ISD::ATOMIC_STORE: | |||
1392 | case ISD::MLOAD: | |||
1393 | case ISD::MSTORE: | |||
1394 | case ISD::MGATHER: | |||
1395 | case ISD::MSCATTER: | |||
1396 | return true; | |||
1397 | default: | |||
1398 | return N->isMemIntrinsic() || N->isTargetMemoryOpcode(); | |||
1399 | } | |||
1400 | } | |||
1401 | }; | |||
1402 | ||||
1403 | /// This is an SDNode representing atomic operations. | |||
1404 | class AtomicSDNode : public MemSDNode { | |||
1405 | public: | |||
1406 | AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL, | |||
1407 | EVT MemVT, MachineMemOperand *MMO) | |||
1408 | : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) { | |||
1409 | assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||(static_cast <bool> (((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?" ) ? void (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1410, __extension__ __PRETTY_FUNCTION__)) | |||
1410 | MMO->isAtomic()) && "then why are we using an AtomicSDNode?")(static_cast <bool> (((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?" ) ? void (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1410, __extension__ __PRETTY_FUNCTION__)); | |||
1411 | } | |||
1412 | ||||
1413 | const SDValue &getBasePtr() const { return getOperand(1); } | |||
1414 | const SDValue &getVal() const { return getOperand(2); } | |||
1415 | ||||
1416 | /// Returns true if this SDNode represents cmpxchg atomic operation, false | |||
1417 | /// otherwise. | |||
1418 | bool isCompareAndSwap() const { | |||
1419 | unsigned Op = getOpcode(); | |||
1420 | return Op == ISD::ATOMIC_CMP_SWAP || | |||
1421 | Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS; | |||
1422 | } | |||
1423 | ||||
1424 | /// For cmpxchg atomic operations, return the atomic ordering requirements | |||
1425 | /// when store does not occur. | |||
1426 | AtomicOrdering getFailureOrdering() const { | |||
1427 | assert(isCompareAndSwap() && "Must be cmpxchg operation")(static_cast <bool> (isCompareAndSwap() && "Must be cmpxchg operation" ) ? void (0) : __assert_fail ("isCompareAndSwap() && \"Must be cmpxchg operation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1427, __extension__ __PRETTY_FUNCTION__)); | |||
1428 | return MMO->getFailureOrdering(); | |||
1429 | } | |||
1430 | ||||
1431 | // Methods to support isa and dyn_cast | |||
1432 | static bool classof(const SDNode *N) { | |||
1433 | return N->getOpcode() == ISD::ATOMIC_CMP_SWAP || | |||
1434 | N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS || | |||
1435 | N->getOpcode() == ISD::ATOMIC_SWAP || | |||
1436 | N->getOpcode() == ISD::ATOMIC_LOAD_ADD || | |||
1437 | N->getOpcode() == ISD::ATOMIC_LOAD_SUB || | |||
1438 | N->getOpcode() == ISD::ATOMIC_LOAD_AND || | |||
1439 | N->getOpcode() == ISD::ATOMIC_LOAD_CLR || | |||
1440 | N->getOpcode() == ISD::ATOMIC_LOAD_OR || | |||
1441 | N->getOpcode() == ISD::ATOMIC_LOAD_XOR || | |||
1442 | N->getOpcode() == ISD::ATOMIC_LOAD_NAND || | |||
1443 | N->getOpcode() == ISD::ATOMIC_LOAD_MIN || | |||
1444 | N->getOpcode() == ISD::ATOMIC_LOAD_MAX || | |||
1445 | N->getOpcode() == ISD::ATOMIC_LOAD_UMIN || | |||
1446 | N->getOpcode() == ISD::ATOMIC_LOAD_UMAX || | |||
1447 | N->getOpcode() == ISD::ATOMIC_LOAD_FADD || | |||
1448 | N->getOpcode() == ISD::ATOMIC_LOAD_FSUB || | |||
1449 | N->getOpcode() == ISD::ATOMIC_LOAD || | |||
1450 | N->getOpcode() == ISD::ATOMIC_STORE; | |||
1451 | } | |||
1452 | }; | |||
1453 | ||||
1454 | /// This SDNode is used for target intrinsics that touch | |||
1455 | /// memory and need an associated MachineMemOperand. Its opcode may be | |||
1456 | /// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode | |||
1457 | /// with a value not less than FIRST_TARGET_MEMORY_OPCODE. | |||
1458 | class MemIntrinsicSDNode : public MemSDNode { | |||
1459 | public: | |||
1460 | MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, | |||
1461 | SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO) | |||
1462 | : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) { | |||
1463 | SDNodeBits.IsMemIntrinsic = true; | |||
1464 | } | |||
1465 | ||||
1466 | // Methods to support isa and dyn_cast | |||
1467 | static bool classof(const SDNode *N) { | |||
1468 | // We lower some target intrinsics to their target opcode | |||
1469 | // early a node with a target opcode can be of this class | |||
1470 | return N->isMemIntrinsic() || | |||
1471 | N->getOpcode() == ISD::PREFETCH || | |||
1472 | N->isTargetMemoryOpcode(); | |||
1473 | } | |||
1474 | }; | |||
1475 | ||||
1476 | /// This SDNode is used to implement the code generator | |||
1477 | /// support for the llvm IR shufflevector instruction. It combines elements | |||
1478 | /// from two input vectors into a new input vector, with the selection and | |||
1479 | /// ordering of elements determined by an array of integers, referred to as | |||
1480 | /// the shuffle mask. For input vectors of width N, mask indices of 0..N-1 | |||
1481 | /// refer to elements from the LHS input, and indices from N to 2N-1 the RHS. | |||
1482 | /// An index of -1 is treated as undef, such that the code generator may put | |||
1483 | /// any value in the corresponding element of the result. | |||
1484 | class ShuffleVectorSDNode : public SDNode { | |||
1485 | // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and | |||
1486 | // is freed when the SelectionDAG object is destroyed. | |||
1487 | const int *Mask; | |||
1488 | ||||
1489 | protected: | |||
1490 | friend class SelectionDAG; | |||
1491 | ||||
1492 | ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M) | |||
1493 | : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {} | |||
1494 | ||||
1495 | public: | |||
1496 | ArrayRef<int> getMask() const { | |||
1497 | EVT VT = getValueType(0); | |||
1498 | return makeArrayRef(Mask, VT.getVectorNumElements()); | |||
1499 | } | |||
1500 | ||||
1501 | int getMaskElt(unsigned Idx) const { | |||
1502 | assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")(static_cast <bool> (Idx < getValueType(0).getVectorNumElements () && "Idx out of range!") ? void (0) : __assert_fail ("Idx < getValueType(0).getVectorNumElements() && \"Idx out of range!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1502, __extension__ __PRETTY_FUNCTION__)); | |||
1503 | return Mask[Idx]; | |||
1504 | } | |||
1505 | ||||
1506 | bool isSplat() const { return isSplatMask(Mask, getValueType(0)); } | |||
1507 | ||||
1508 | int getSplatIndex() const { | |||
1509 | assert(isSplat() && "Cannot get splat index for non-splat!")(static_cast <bool> (isSplat() && "Cannot get splat index for non-splat!" ) ? void (0) : __assert_fail ("isSplat() && \"Cannot get splat index for non-splat!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1509, __extension__ __PRETTY_FUNCTION__)); | |||
1510 | EVT VT = getValueType(0); | |||
1511 | for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) | |||
1512 | if (Mask[i] >= 0) | |||
1513 | return Mask[i]; | |||
1514 | ||||
1515 | // We can choose any index value here and be correct because all elements | |||
1516 | // are undefined. Return 0 for better potential for callers to simplify. | |||
1517 | return 0; | |||
1518 | } | |||
1519 | ||||
1520 | static bool isSplatMask(const int *Mask, EVT VT); | |||
1521 | ||||
1522 | /// Change values in a shuffle permute mask assuming | |||
1523 | /// the two vector operands have swapped position. | |||
1524 | static void commuteMask(MutableArrayRef<int> Mask) { | |||
1525 | unsigned NumElems = Mask.size(); | |||
1526 | for (unsigned i = 0; i != NumElems; ++i) { | |||
1527 | int idx = Mask[i]; | |||
1528 | if (idx < 0) | |||
1529 | continue; | |||
1530 | else if (idx < (int)NumElems) | |||
1531 | Mask[i] = idx + NumElems; | |||
1532 | else | |||
1533 | Mask[i] = idx - NumElems; | |||
1534 | } | |||
1535 | } | |||
1536 | ||||
1537 | static bool classof(const SDNode *N) { | |||
1538 | return N->getOpcode() == ISD::VECTOR_SHUFFLE; | |||
1539 | } | |||
1540 | }; | |||
1541 | ||||
1542 | class ConstantSDNode : public SDNode { | |||
1543 | friend class SelectionDAG; | |||
1544 | ||||
1545 | const ConstantInt *Value; | |||
1546 | ||||
1547 | ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT) | |||
1548 | : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(), | |||
1549 | getSDVTList(VT)), | |||
1550 | Value(val) { | |||
1551 | ConstantSDNodeBits.IsOpaque = isOpaque; | |||
1552 | } | |||
1553 | ||||
1554 | public: | |||
1555 | const ConstantInt *getConstantIntValue() const { return Value; } | |||
1556 | const APInt &getAPIntValue() const { return Value->getValue(); } | |||
1557 | uint64_t getZExtValue() const { return Value->getZExtValue(); } | |||
1558 | int64_t getSExtValue() const { return Value->getSExtValue(); } | |||
1559 | uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) { | |||
1560 | return Value->getLimitedValue(Limit); | |||
1561 | } | |||
1562 | MaybeAlign getMaybeAlignValue() const { return Value->getMaybeAlignValue(); } | |||
1563 | Align getAlignValue() const { return Value->getAlignValue(); } | |||
1564 | ||||
1565 | bool isOne() const { return Value->isOne(); } | |||
1566 | bool isNullValue() const { return Value->isZero(); } | |||
1567 | bool isAllOnesValue() const { return Value->isMinusOne(); } | |||
1568 | bool isMaxSignedValue() const { return Value->isMaxValue(true); } | |||
1569 | bool isMinSignedValue() const { return Value->isMinValue(true); } | |||
1570 | ||||
1571 | bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; } | |||
1572 | ||||
1573 | static bool classof(const SDNode *N) { | |||
1574 | return N->getOpcode() == ISD::Constant || | |||
1575 | N->getOpcode() == ISD::TargetConstant; | |||
1576 | } | |||
1577 | }; | |||
1578 | ||||
1579 | uint64_t SDNode::getConstantOperandVal(unsigned Num) const { | |||
1580 | return cast<ConstantSDNode>(getOperand(Num))->getZExtValue(); | |||
1581 | } | |||
1582 | ||||
1583 | const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const { | |||
1584 | return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue(); | |||
1585 | } | |||
1586 | ||||
1587 | class ConstantFPSDNode : public SDNode { | |||
1588 | friend class SelectionDAG; | |||
1589 | ||||
1590 | const ConstantFP *Value; | |||
1591 | ||||
1592 | ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT) | |||
1593 | : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0, | |||
1594 | DebugLoc(), getSDVTList(VT)), | |||
1595 | Value(val) {} | |||
1596 | ||||
1597 | public: | |||
1598 | const APFloat& getValueAPF() const { return Value->getValueAPF(); } | |||
1599 | const ConstantFP *getConstantFPValue() const { return Value; } | |||
1600 | ||||
1601 | /// Return true if the value is positive or negative zero. | |||
1602 | bool isZero() const { return Value->isZero(); } | |||
1603 | ||||
1604 | /// Return true if the value is a NaN. | |||
1605 | bool isNaN() const { return Value->isNaN(); } | |||
1606 | ||||
1607 | /// Return true if the value is an infinity | |||
1608 | bool isInfinity() const { return Value->isInfinity(); } | |||
1609 | ||||
1610 | /// Return true if the value is negative. | |||
1611 | bool isNegative() const { return Value->isNegative(); } | |||
1612 | ||||
1613 | /// We don't rely on operator== working on double values, as | |||
1614 | /// it returns true for things that are clearly not equal, like -0.0 and 0.0. | |||
1615 | /// As such, this method can be used to do an exact bit-for-bit comparison of | |||
1616 | /// two floating point values. | |||
1617 | ||||
1618 | /// We leave the version with the double argument here because it's just so | |||
1619 | /// convenient to write "2.0" and the like. Without this function we'd | |||
1620 | /// have to duplicate its logic everywhere it's called. | |||
1621 | bool isExactlyValue(double V) const { | |||
1622 | return Value->getValueAPF().isExactlyValue(V); | |||
1623 | } | |||
1624 | bool isExactlyValue(const APFloat& V) const; | |||
1625 | ||||
1626 | static bool isValueValidForType(EVT VT, const APFloat& Val); | |||
1627 | ||||
1628 | static bool classof(const SDNode *N) { | |||
1629 | return N->getOpcode() == ISD::ConstantFP || | |||
1630 | N->getOpcode() == ISD::TargetConstantFP; | |||
1631 | } | |||
1632 | }; | |||
1633 | ||||
1634 | /// Returns true if \p V is a constant integer zero. | |||
1635 | bool isNullConstant(SDValue V); | |||
1636 | ||||
1637 | /// Returns true if \p V is an FP constant with a value of positive zero. | |||
1638 | bool isNullFPConstant(SDValue V); | |||
1639 | ||||
1640 | /// Returns true if \p V is an integer constant with all bits set. | |||
1641 | bool isAllOnesConstant(SDValue V); | |||
1642 | ||||
1643 | /// Returns true if \p V is a constant integer one. | |||
1644 | bool isOneConstant(SDValue V); | |||
1645 | ||||
1646 | /// Return the non-bitcasted source operand of \p V if it exists. | |||
1647 | /// If \p V is not a bitcasted value, it is returned as-is. | |||
1648 | SDValue peekThroughBitcasts(SDValue V); | |||
1649 | ||||
1650 | /// Return the non-bitcasted and one-use source operand of \p V if it exists. | |||
1651 | /// If \p V is not a bitcasted one-use value, it is returned as-is. | |||
1652 | SDValue peekThroughOneUseBitcasts(SDValue V); | |||
1653 | ||||
1654 | /// Return the non-extracted vector source operand of \p V if it exists. | |||
1655 | /// If \p V is not an extracted subvector, it is returned as-is. | |||
1656 | SDValue peekThroughExtractSubvectors(SDValue V); | |||
1657 | ||||
1658 | /// Returns true if \p V is a bitwise not operation. Assumes that an all ones | |||
1659 | /// constant is canonicalized to be operand 1. | |||
1660 | bool isBitwiseNot(SDValue V, bool AllowUndefs = false); | |||
1661 | ||||
1662 | /// Returns the SDNode if it is a constant splat BuildVector or constant int. | |||
1663 | ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false, | |||
1664 | bool AllowTruncation = false); | |||
1665 | ||||
1666 | /// Returns the SDNode if it is a demanded constant splat BuildVector or | |||
1667 | /// constant int. | |||
1668 | ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts, | |||
1669 | bool AllowUndefs = false, | |||
1670 | bool AllowTruncation = false); | |||
1671 | ||||
1672 | /// Returns the SDNode if it is a constant splat BuildVector or constant float. | |||
1673 | ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false); | |||
1674 | ||||
1675 | /// Returns the SDNode if it is a demanded constant splat BuildVector or | |||
1676 | /// constant float. | |||
1677 | ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts, | |||
1678 | bool AllowUndefs = false); | |||
1679 | ||||
1680 | /// Return true if the value is a constant 0 integer or a splatted vector of | |||
1681 | /// a constant 0 integer (with no undefs by default). | |||
1682 | /// Build vector implicit truncation is not an issue for null values. | |||
1683 | bool isNullOrNullSplat(SDValue V, bool AllowUndefs = false); | |||
1684 | ||||
1685 | /// Return true if the value is a constant 1 integer or a splatted vector of a | |||
1686 | /// constant 1 integer (with no undefs). | |||
1687 | /// Does not permit build vector implicit truncation. | |||
1688 | bool isOneOrOneSplat(SDValue V, bool AllowUndefs = false); | |||
1689 | ||||
1690 | /// Return true if the value is a constant -1 integer or a splatted vector of a | |||
1691 | /// constant -1 integer (with no undefs). | |||
1692 | /// Does not permit build vector implicit truncation. | |||
1693 | bool isAllOnesOrAllOnesSplat(SDValue V, bool AllowUndefs = false); | |||
1694 | ||||
1695 | /// Return true if \p V is either a integer or FP constant. | |||
1696 | inline bool isIntOrFPConstant(SDValue V) { | |||
1697 | return isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V); | |||
1698 | } | |||
1699 | ||||
1700 | class GlobalAddressSDNode : public SDNode { | |||
1701 | friend class SelectionDAG; | |||
1702 | ||||
1703 | const GlobalValue *TheGlobal; | |||
1704 | int64_t Offset; | |||
1705 | unsigned TargetFlags; | |||
1706 | ||||
1707 | GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, | |||
1708 | const GlobalValue *GA, EVT VT, int64_t o, | |||
1709 | unsigned TF); | |||
1710 | ||||
1711 | public: | |||
1712 | const GlobalValue *getGlobal() const { return TheGlobal; } | |||
1713 | int64_t getOffset() const { return Offset; } | |||
1714 | unsigned getTargetFlags() const { return TargetFlags; } | |||
1715 | // Return the address space this GlobalAddress belongs to. | |||
1716 | unsigned getAddressSpace() const; | |||
1717 | ||||
1718 | static bool classof(const SDNode *N) { | |||
1719 | return N->getOpcode() == ISD::GlobalAddress || | |||
1720 | N->getOpcode() == ISD::TargetGlobalAddress || | |||
1721 | N->getOpcode() == ISD::GlobalTLSAddress || | |||
1722 | N->getOpcode() == ISD::TargetGlobalTLSAddress; | |||
1723 | } | |||
1724 | }; | |||
1725 | ||||
1726 | class FrameIndexSDNode : public SDNode { | |||
1727 | friend class SelectionDAG; | |||
1728 | ||||
1729 | int FI; | |||
1730 | ||||
1731 | FrameIndexSDNode(int fi, EVT VT, bool isTarg) | |||
1732 | : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex, | |||
1733 | 0, DebugLoc(), getSDVTList(VT)), FI(fi) { | |||
1734 | } | |||
1735 | ||||
1736 | public: | |||
1737 | int getIndex() const { return FI; } | |||
1738 | ||||
1739 | static bool classof(const SDNode *N) { | |||
1740 | return N->getOpcode() == ISD::FrameIndex || | |||
1741 | N->getOpcode() == ISD::TargetFrameIndex; | |||
1742 | } | |||
1743 | }; | |||
1744 | ||||
1745 | /// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate | |||
1746 | /// the offet and size that are started/ended in the underlying FrameIndex. | |||
1747 | class LifetimeSDNode : public SDNode { | |||
1748 | friend class SelectionDAG; | |||
1749 | int64_t Size; | |||
1750 | int64_t Offset; // -1 if offset is unknown. | |||
1751 | ||||
1752 | LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, | |||
1753 | SDVTList VTs, int64_t Size, int64_t Offset) | |||
1754 | : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {} | |||
1755 | public: | |||
1756 | int64_t getFrameIndex() const { | |||
1757 | return cast<FrameIndexSDNode>(getOperand(1))->getIndex(); | |||
1758 | } | |||
1759 | ||||
1760 | bool hasOffset() const { return Offset >= 0; } | |||
1761 | int64_t getOffset() const { | |||
1762 | assert(hasOffset() && "offset is unknown")(static_cast <bool> (hasOffset() && "offset is unknown" ) ? void (0) : __assert_fail ("hasOffset() && \"offset is unknown\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1762, __extension__ __PRETTY_FUNCTION__)); | |||
1763 | return Offset; | |||
1764 | } | |||
1765 | int64_t getSize() const { | |||
1766 | assert(hasOffset() && "offset is unknown")(static_cast <bool> (hasOffset() && "offset is unknown" ) ? void (0) : __assert_fail ("hasOffset() && \"offset is unknown\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1766, __extension__ __PRETTY_FUNCTION__)); | |||
1767 | return Size; | |||
1768 | } | |||
1769 | ||||
1770 | // Methods to support isa and dyn_cast | |||
1771 | static bool classof(const SDNode *N) { | |||
1772 | return N->getOpcode() == ISD::LIFETIME_START || | |||
1773 | N->getOpcode() == ISD::LIFETIME_END; | |||
1774 | } | |||
1775 | }; | |||
1776 | ||||
1777 | /// This SDNode is used for PSEUDO_PROBE values, which are the function guid and | |||
1778 | /// the index of the basic block being probed. A pseudo probe serves as a place | |||
1779 | /// holder and will be removed at the end of compilation. It does not have any | |||
1780 | /// operand because we do not want the instruction selection to deal with any. | |||
1781 | class PseudoProbeSDNode : public SDNode { | |||
1782 | friend class SelectionDAG; | |||
1783 | uint64_t Guid; | |||
1784 | uint64_t Index; | |||
1785 | uint32_t Attributes; | |||
1786 | ||||
1787 | PseudoProbeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &Dl, | |||
1788 | SDVTList VTs, uint64_t Guid, uint64_t Index, uint32_t Attr) | |||
1789 | : SDNode(Opcode, Order, Dl, VTs), Guid(Guid), Index(Index), | |||
1790 | Attributes(Attr) {} | |||
1791 | ||||
1792 | public: | |||
1793 | uint64_t getGuid() const { return Guid; } | |||
1794 | uint64_t getIndex() const { return Index; } | |||
1795 | uint32_t getAttributes() const { return Attributes; } | |||
1796 | ||||
1797 | // Methods to support isa and dyn_cast | |||
1798 | static bool classof(const SDNode *N) { | |||
1799 | return N->getOpcode() == ISD::PSEUDO_PROBE; | |||
1800 | } | |||
1801 | }; | |||
1802 | ||||
1803 | class JumpTableSDNode : public SDNode { | |||
1804 | friend class SelectionDAG; | |||
1805 | ||||
1806 | int JTI; | |||
1807 | unsigned TargetFlags; | |||
1808 | ||||
1809 | JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned TF) | |||
1810 | : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable, | |||
1811 | 0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) { | |||
1812 | } | |||
1813 | ||||
1814 | public: | |||
1815 | int getIndex() const { return JTI; } | |||
1816 | unsigned getTargetFlags() const { return TargetFlags; } | |||
1817 | ||||
1818 | static bool classof(const SDNode *N) { | |||
1819 | return N->getOpcode() == ISD::JumpTable || | |||
1820 | N->getOpcode() == ISD::TargetJumpTable; | |||
1821 | } | |||
1822 | }; | |||
1823 | ||||
1824 | class ConstantPoolSDNode : public SDNode { | |||
1825 | friend class SelectionDAG; | |||
1826 | ||||
1827 | union { | |||
1828 | const Constant *ConstVal; | |||
1829 | MachineConstantPoolValue *MachineCPVal; | |||
1830 | } Val; | |||
1831 | int Offset; // It's a MachineConstantPoolValue if top bit is set. | |||
1832 | Align Alignment; // Minimum alignment requirement of CP. | |||
1833 | unsigned TargetFlags; | |||
1834 | ||||
1835 | ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o, | |||
1836 | Align Alignment, unsigned TF) | |||
1837 | : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0, | |||
1838 | DebugLoc(), getSDVTList(VT)), | |||
1839 | Offset(o), Alignment(Alignment), TargetFlags(TF) { | |||
1840 | assert(Offset >= 0 && "Offset is too large")(static_cast <bool> (Offset >= 0 && "Offset is too large" ) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1840, __extension__ __PRETTY_FUNCTION__)); | |||
1841 | Val.ConstVal = c; | |||
1842 | } | |||
1843 | ||||
1844 | ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v, EVT VT, int o, | |||
1845 | Align Alignment, unsigned TF) | |||
1846 | : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0, | |||
1847 | DebugLoc(), getSDVTList(VT)), | |||
1848 | Offset(o), Alignment(Alignment), TargetFlags(TF) { | |||
1849 | assert(Offset >= 0 && "Offset is too large")(static_cast <bool> (Offset >= 0 && "Offset is too large" ) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1849, __extension__ __PRETTY_FUNCTION__)); | |||
1850 | Val.MachineCPVal = v; | |||
1851 | Offset |= 1 << (sizeof(unsigned)*CHAR_BIT8-1); | |||
1852 | } | |||
1853 | ||||
1854 | public: | |||
1855 | bool isMachineConstantPoolEntry() const { | |||
1856 | return Offset < 0; | |||
1857 | } | |||
1858 | ||||
1859 | const Constant *getConstVal() const { | |||
1860 | assert(!isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast <bool> (!isMachineConstantPoolEntry() && "Wrong constantpool type") ? void (0) : __assert_fail ("!isMachineConstantPoolEntry() && \"Wrong constantpool type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1860, __extension__ __PRETTY_FUNCTION__)); | |||
1861 | return Val.ConstVal; | |||
1862 | } | |||
1863 | ||||
1864 | MachineConstantPoolValue *getMachineCPVal() const { | |||
1865 | assert(isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast <bool> (isMachineConstantPoolEntry() && "Wrong constantpool type") ? void (0) : __assert_fail ("isMachineConstantPoolEntry() && \"Wrong constantpool type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 1865, __extension__ __PRETTY_FUNCTION__)); | |||
1866 | return Val.MachineCPVal; | |||
1867 | } | |||
1868 | ||||
1869 | int getOffset() const { | |||
1870 | return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT8-1)); | |||
1871 | } | |||
1872 | ||||
1873 | // Return the alignment of this constant pool object, which is either 0 (for | |||
1874 | // default alignment) or the desired value. | |||
1875 | Align getAlign() const { return Alignment; } | |||
1876 | unsigned getTargetFlags() const { return TargetFlags; } | |||
1877 | ||||
1878 | Type *getType() const; | |||
1879 | ||||
1880 | static bool classof(const SDNode *N) { | |||
1881 | return N->getOpcode() == ISD::ConstantPool || | |||
1882 | N->getOpcode() == ISD::TargetConstantPool; | |||
1883 | } | |||
1884 | }; | |||
1885 | ||||
1886 | /// Completely target-dependent object reference. | |||
1887 | class TargetIndexSDNode : public SDNode { | |||
1888 | friend class SelectionDAG; | |||
1889 | ||||
1890 | unsigned TargetFlags; | |||
1891 | int Index; | |||
1892 | int64_t Offset; | |||
1893 | ||||
1894 | public: | |||
1895 | TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned TF) | |||
1896 | : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)), | |||
1897 | TargetFlags(TF), Index(Idx), Offset(Ofs) {} | |||
1898 | ||||
1899 | unsigned getTargetFlags() const { return TargetFlags; } | |||
1900 | int getIndex() const { return Index; } | |||
1901 | int64_t getOffset() const { return Offset; } | |||
1902 | ||||
1903 | static bool classof(const SDNode *N) { | |||
1904 | return N->getOpcode() == ISD::TargetIndex; | |||
1905 | } | |||
1906 | }; | |||
1907 | ||||
1908 | class BasicBlockSDNode : public SDNode { | |||
1909 | friend class SelectionDAG; | |||
1910 | ||||
1911 | MachineBasicBlock *MBB; | |||
1912 | ||||
1913 | /// Debug info is meaningful and potentially useful here, but we create | |||
1914 | /// blocks out of order when they're jumped to, which makes it a bit | |||
1915 | /// harder. Let's see if we need it first. | |||
1916 | explicit BasicBlockSDNode(MachineBasicBlock *mbb) | |||
1917 | : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb) | |||
1918 | {} | |||
1919 | ||||
1920 | public: | |||
1921 | MachineBasicBlock *getBasicBlock() const { return MBB; } | |||
1922 | ||||
1923 | static bool classof(const SDNode *N) { | |||
1924 | return N->getOpcode() == ISD::BasicBlock; | |||
1925 | } | |||
1926 | }; | |||
1927 | ||||
1928 | /// A "pseudo-class" with methods for operating on BUILD_VECTORs. | |||
1929 | class BuildVectorSDNode : public SDNode { | |||
1930 | public: | |||
1931 | // These are constructed as SDNodes and then cast to BuildVectorSDNodes. | |||
1932 | explicit BuildVectorSDNode() = delete; | |||
1933 | ||||
1934 | /// Check if this is a constant splat, and if so, find the | |||
1935 | /// smallest element size that splats the vector. If MinSplatBits is | |||
1936 | /// nonzero, the element size must be at least that large. Note that the | |||
1937 | /// splat element may be the entire vector (i.e., a one element vector). | |||
1938 | /// Returns the splat element value in SplatValue. Any undefined bits in | |||
1939 | /// that value are zero, and the corresponding bits in the SplatUndef mask | |||
1940 | /// are set. The SplatBitSize value is set to the splat element size in | |||
1941 | /// bits. HasAnyUndefs is set to true if any bits in the vector are | |||
1942 | /// undefined. isBigEndian describes the endianness of the target. | |||
1943 | bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, | |||
1944 | unsigned &SplatBitSize, bool &HasAnyUndefs, | |||
1945 | unsigned MinSplatBits = 0, | |||
1946 | bool isBigEndian = false) const; | |||
1947 | ||||
1948 | /// Returns the demanded splatted value or a null value if this is not a | |||
1949 | /// splat. | |||
1950 | /// | |||
1951 | /// The DemandedElts mask indicates the elements that must be in the splat. | |||
1952 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
1953 | /// the vector width and set the bits where elements are undef. | |||
1954 | SDValue getSplatValue(const APInt &DemandedElts, | |||
1955 | BitVector *UndefElements = nullptr) const; | |||
1956 | ||||
1957 | /// Returns the splatted value or a null value if this is not a splat. | |||
1958 | /// | |||
1959 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
1960 | /// the vector width and set the bits where elements are undef. | |||
1961 | SDValue getSplatValue(BitVector *UndefElements = nullptr) const; | |||
1962 | ||||
1963 | /// Find the shortest repeating sequence of values in the build vector. | |||
1964 | /// | |||
1965 | /// e.g. { u, X, u, X, u, u, X, u } -> { X } | |||
1966 | /// { X, Y, u, Y, u, u, X, u } -> { X, Y } | |||
1967 | /// | |||
1968 | /// Currently this must be a power-of-2 build vector. | |||
1969 | /// The DemandedElts mask indicates the elements that must be present, | |||
1970 | /// undemanded elements in Sequence may be null (SDValue()). If passed a | |||
1971 | /// non-null UndefElements bitvector, it will resize it to match the original | |||
1972 | /// vector width and set the bits where elements are undef. If result is | |||
1973 | /// false, Sequence will be empty. | |||
1974 | bool getRepeatedSequence(const APInt &DemandedElts, | |||
1975 | SmallVectorImpl<SDValue> &Sequence, | |||
1976 | BitVector *UndefElements = nullptr) const; | |||
1977 | ||||
1978 | /// Find the shortest repeating sequence of values in the build vector. | |||
1979 | /// | |||
1980 | /// e.g. { u, X, u, X, u, u, X, u } -> { X } | |||
1981 | /// { X, Y, u, Y, u, u, X, u } -> { X, Y } | |||
1982 | /// | |||
1983 | /// Currently this must be a power-of-2 build vector. | |||
1984 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
1985 | /// the original vector width and set the bits where elements are undef. | |||
1986 | /// If result is false, Sequence will be empty. | |||
1987 | bool getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence, | |||
1988 | BitVector *UndefElements = nullptr) const; | |||
1989 | ||||
1990 | /// Returns the demanded splatted constant or null if this is not a constant | |||
1991 | /// splat. | |||
1992 | /// | |||
1993 | /// The DemandedElts mask indicates the elements that must be in the splat. | |||
1994 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
1995 | /// the vector width and set the bits where elements are undef. | |||
1996 | ConstantSDNode * | |||
1997 | getConstantSplatNode(const APInt &DemandedElts, | |||
1998 | BitVector *UndefElements = nullptr) const; | |||
1999 | ||||
2000 | /// Returns the splatted constant or null if this is not a constant | |||
2001 | /// splat. | |||
2002 | /// | |||
2003 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
2004 | /// the vector width and set the bits where elements are undef. | |||
2005 | ConstantSDNode * | |||
2006 | getConstantSplatNode(BitVector *UndefElements = nullptr) const; | |||
2007 | ||||
2008 | /// Returns the demanded splatted constant FP or null if this is not a | |||
2009 | /// constant FP splat. | |||
2010 | /// | |||
2011 | /// The DemandedElts mask indicates the elements that must be in the splat. | |||
2012 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
2013 | /// the vector width and set the bits where elements are undef. | |||
2014 | ConstantFPSDNode * | |||
2015 | getConstantFPSplatNode(const APInt &DemandedElts, | |||
2016 | BitVector *UndefElements = nullptr) const; | |||
2017 | ||||
2018 | /// Returns the splatted constant FP or null if this is not a constant | |||
2019 | /// FP splat. | |||
2020 | /// | |||
2021 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
2022 | /// the vector width and set the bits where elements are undef. | |||
2023 | ConstantFPSDNode * | |||
2024 | getConstantFPSplatNode(BitVector *UndefElements = nullptr) const; | |||
2025 | ||||
2026 | /// If this is a constant FP splat and the splatted constant FP is an | |||
2027 | /// exact power or 2, return the log base 2 integer value. Otherwise, | |||
2028 | /// return -1. | |||
2029 | /// | |||
2030 | /// The BitWidth specifies the necessary bit precision. | |||
2031 | int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, | |||
2032 | uint32_t BitWidth) const; | |||
2033 | ||||
2034 | bool isConstant() const; | |||
2035 | ||||
2036 | static bool classof(const SDNode *N) { | |||
2037 | return N->getOpcode() == ISD::BUILD_VECTOR; | |||
2038 | } | |||
2039 | }; | |||
2040 | ||||
2041 | /// An SDNode that holds an arbitrary LLVM IR Value. This is | |||
2042 | /// used when the SelectionDAG needs to make a simple reference to something | |||
2043 | /// in the LLVM IR representation. | |||
2044 | /// | |||
2045 | class SrcValueSDNode : public SDNode { | |||
2046 | friend class SelectionDAG; | |||
2047 | ||||
2048 | const Value *V; | |||
2049 | ||||
2050 | /// Create a SrcValue for a general value. | |||
2051 | explicit SrcValueSDNode(const Value *v) | |||
2052 | : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {} | |||
2053 | ||||
2054 | public: | |||
2055 | /// Return the contained Value. | |||
2056 | const Value *getValue() const { return V; } | |||
2057 | ||||
2058 | static bool classof(const SDNode *N) { | |||
2059 | return N->getOpcode() == ISD::SRCVALUE; | |||
2060 | } | |||
2061 | }; | |||
2062 | ||||
2063 | class MDNodeSDNode : public SDNode { | |||
2064 | friend class SelectionDAG; | |||
2065 | ||||
2066 | const MDNode *MD; | |||
2067 | ||||
2068 | explicit MDNodeSDNode(const MDNode *md) | |||
2069 | : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md) | |||
2070 | {} | |||
2071 | ||||
2072 | public: | |||
2073 | const MDNode *getMD() const { return MD; } | |||
2074 | ||||
2075 | static bool classof(const SDNode *N) { | |||
2076 | return N->getOpcode() == ISD::MDNODE_SDNODE; | |||
2077 | } | |||
2078 | }; | |||
2079 | ||||
2080 | class RegisterSDNode : public SDNode { | |||
2081 | friend class SelectionDAG; | |||
2082 | ||||
2083 | Register Reg; | |||
2084 | ||||
2085 | RegisterSDNode(Register reg, EVT VT) | |||
2086 | : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {} | |||
2087 | ||||
2088 | public: | |||
2089 | Register getReg() const { return Reg; } | |||
2090 | ||||
2091 | static bool classof(const SDNode *N) { | |||
2092 | return N->getOpcode() == ISD::Register; | |||
2093 | } | |||
2094 | }; | |||
2095 | ||||
2096 | class RegisterMaskSDNode : public SDNode { | |||
2097 | friend class SelectionDAG; | |||
2098 | ||||
2099 | // The memory for RegMask is not owned by the node. | |||
2100 | const uint32_t *RegMask; | |||
2101 | ||||
2102 | RegisterMaskSDNode(const uint32_t *mask) | |||
2103 | : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)), | |||
2104 | RegMask(mask) {} | |||
2105 | ||||
2106 | public: | |||
2107 | const uint32_t *getRegMask() const { return RegMask; } | |||
2108 | ||||
2109 | static bool classof(const SDNode *N) { | |||
2110 | return N->getOpcode() == ISD::RegisterMask; | |||
2111 | } | |||
2112 | }; | |||
2113 | ||||
2114 | class BlockAddressSDNode : public SDNode { | |||
2115 | friend class SelectionDAG; | |||
2116 | ||||
2117 | const BlockAddress *BA; | |||
2118 | int64_t Offset; | |||
2119 | unsigned TargetFlags; | |||
2120 | ||||
2121 | BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba, | |||
2122 | int64_t o, unsigned Flags) | |||
2123 | : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)), | |||
2124 | BA(ba), Offset(o), TargetFlags(Flags) {} | |||
2125 | ||||
2126 | public: | |||
2127 | const BlockAddress *getBlockAddress() const { return BA; } | |||
2128 | int64_t getOffset() const { return Offset; } | |||
2129 | unsigned getTargetFlags() const { return TargetFlags; } | |||
2130 | ||||
2131 | static bool classof(const SDNode *N) { | |||
2132 | return N->getOpcode() == ISD::BlockAddress || | |||
2133 | N->getOpcode() == ISD::TargetBlockAddress; | |||
2134 | } | |||
2135 | }; | |||
2136 | ||||
2137 | class LabelSDNode : public SDNode { | |||
2138 | friend class SelectionDAG; | |||
2139 | ||||
2140 | MCSymbol *Label; | |||
2141 | ||||
2142 | LabelSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, MCSymbol *L) | |||
2143 | : SDNode(Opcode, Order, dl, getSDVTList(MVT::Other)), Label(L) { | |||
2144 | assert(LabelSDNode::classof(this) && "not a label opcode")(static_cast <bool> (LabelSDNode::classof(this) && "not a label opcode") ? void (0) : __assert_fail ("LabelSDNode::classof(this) && \"not a label opcode\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2144, __extension__ __PRETTY_FUNCTION__)); | |||
2145 | } | |||
2146 | ||||
2147 | public: | |||
2148 | MCSymbol *getLabel() const { return Label; } | |||
2149 | ||||
2150 | static bool classof(const SDNode *N) { | |||
2151 | return N->getOpcode() == ISD::EH_LABEL || | |||
2152 | N->getOpcode() == ISD::ANNOTATION_LABEL; | |||
2153 | } | |||
2154 | }; | |||
2155 | ||||
2156 | class ExternalSymbolSDNode : public SDNode { | |||
2157 | friend class SelectionDAG; | |||
2158 | ||||
2159 | const char *Symbol; | |||
2160 | unsigned TargetFlags; | |||
2161 | ||||
2162 | ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned TF, EVT VT) | |||
2163 | : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol, 0, | |||
2164 | DebugLoc(), getSDVTList(VT)), | |||
2165 | Symbol(Sym), TargetFlags(TF) {} | |||
2166 | ||||
2167 | public: | |||
2168 | const char *getSymbol() const { return Symbol; } | |||
2169 | unsigned getTargetFlags() const { return TargetFlags; } | |||
2170 | ||||
2171 | static bool classof(const SDNode *N) { | |||
2172 | return N->getOpcode() == ISD::ExternalSymbol || | |||
2173 | N->getOpcode() == ISD::TargetExternalSymbol; | |||
2174 | } | |||
2175 | }; | |||
2176 | ||||
2177 | class MCSymbolSDNode : public SDNode { | |||
2178 | friend class SelectionDAG; | |||
2179 | ||||
2180 | MCSymbol *Symbol; | |||
2181 | ||||
2182 | MCSymbolSDNode(MCSymbol *Symbol, EVT VT) | |||
2183 | : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {} | |||
2184 | ||||
2185 | public: | |||
2186 | MCSymbol *getMCSymbol() const { return Symbol; } | |||
2187 | ||||
2188 | static bool classof(const SDNode *N) { | |||
2189 | return N->getOpcode() == ISD::MCSymbol; | |||
2190 | } | |||
2191 | }; | |||
2192 | ||||
2193 | class CondCodeSDNode : public SDNode { | |||
2194 | friend class SelectionDAG; | |||
2195 | ||||
2196 | ISD::CondCode Condition; | |||
2197 | ||||
2198 | explicit CondCodeSDNode(ISD::CondCode Cond) | |||
2199 | : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)), | |||
2200 | Condition(Cond) {} | |||
2201 | ||||
2202 | public: | |||
2203 | ISD::CondCode get() const { return Condition; } | |||
2204 | ||||
2205 | static bool classof(const SDNode *N) { | |||
2206 | return N->getOpcode() == ISD::CONDCODE; | |||
2207 | } | |||
2208 | }; | |||
2209 | ||||
2210 | /// This class is used to represent EVT's, which are used | |||
2211 | /// to parameterize some operations. | |||
2212 | class VTSDNode : public SDNode { | |||
2213 | friend class SelectionDAG; | |||
2214 | ||||
2215 | EVT ValueType; | |||
2216 | ||||
2217 | explicit VTSDNode(EVT VT) | |||
2218 | : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)), | |||
2219 | ValueType(VT) {} | |||
2220 | ||||
2221 | public: | |||
2222 | EVT getVT() const { return ValueType; } | |||
2223 | ||||
2224 | static bool classof(const SDNode *N) { | |||
2225 | return N->getOpcode() == ISD::VALUETYPE; | |||
2226 | } | |||
2227 | }; | |||
2228 | ||||
2229 | /// Base class for LoadSDNode and StoreSDNode | |||
2230 | class LSBaseSDNode : public MemSDNode { | |||
2231 | public: | |||
2232 | LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl, | |||
2233 | SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT, | |||
2234 | MachineMemOperand *MMO) | |||
2235 | : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) { | |||
2236 | LSBaseSDNodeBits.AddressingMode = AM; | |||
2237 | assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM && "Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2237, __extension__ __PRETTY_FUNCTION__)); | |||
2238 | } | |||
2239 | ||||
2240 | const SDValue &getOffset() const { | |||
2241 | return getOperand(getOpcode() == ISD::LOAD ? 2 : 3); | |||
2242 | } | |||
2243 | ||||
2244 | /// Return the addressing mode for this load or store: | |||
2245 | /// unindexed, pre-inc, pre-dec, post-inc, or post-dec. | |||
2246 | ISD::MemIndexedMode getAddressingMode() const { | |||
2247 | return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode); | |||
2248 | } | |||
2249 | ||||
2250 | /// Return true if this is a pre/post inc/dec load/store. | |||
2251 | bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; } | |||
2252 | ||||
2253 | /// Return true if this is NOT a pre/post inc/dec load/store. | |||
2254 | bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; } | |||
2255 | ||||
2256 | static bool classof(const SDNode *N) { | |||
2257 | return N->getOpcode() == ISD::LOAD || | |||
2258 | N->getOpcode() == ISD::STORE; | |||
2259 | } | |||
2260 | }; | |||
2261 | ||||
2262 | /// This class is used to represent ISD::LOAD nodes. | |||
2263 | class LoadSDNode : public LSBaseSDNode { | |||
2264 | friend class SelectionDAG; | |||
2265 | ||||
2266 | LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2267 | ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT, | |||
2268 | MachineMemOperand *MMO) | |||
2269 | : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) { | |||
2270 | LoadSDNodeBits.ExtTy = ETy; | |||
2271 | assert(readMem() && "Load MachineMemOperand is not a load!")(static_cast <bool> (readMem() && "Load MachineMemOperand is not a load!" ) ? void (0) : __assert_fail ("readMem() && \"Load MachineMemOperand is not a load!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2271, __extension__ __PRETTY_FUNCTION__)); | |||
2272 | assert(!writeMem() && "Load MachineMemOperand is a store!")(static_cast <bool> (!writeMem() && "Load MachineMemOperand is a store!" ) ? void (0) : __assert_fail ("!writeMem() && \"Load MachineMemOperand is a store!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2272, __extension__ __PRETTY_FUNCTION__)); | |||
2273 | } | |||
2274 | ||||
2275 | public: | |||
2276 | /// Return whether this is a plain node, | |||
2277 | /// or one of the varieties of value-extending loads. | |||
2278 | ISD::LoadExtType getExtensionType() const { | |||
2279 | return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy); | |||
2280 | } | |||
2281 | ||||
2282 | const SDValue &getBasePtr() const { return getOperand(1); } | |||
2283 | const SDValue &getOffset() const { return getOperand(2); } | |||
2284 | ||||
2285 | static bool classof(const SDNode *N) { | |||
2286 | return N->getOpcode() == ISD::LOAD; | |||
2287 | } | |||
2288 | }; | |||
2289 | ||||
2290 | /// This class is used to represent ISD::STORE nodes. | |||
2291 | class StoreSDNode : public LSBaseSDNode { | |||
2292 | friend class SelectionDAG; | |||
2293 | ||||
2294 | StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2295 | ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT, | |||
2296 | MachineMemOperand *MMO) | |||
2297 | : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) { | |||
2298 | StoreSDNodeBits.IsTruncating = isTrunc; | |||
2299 | assert(!readMem() && "Store MachineMemOperand is a load!")(static_cast <bool> (!readMem() && "Store MachineMemOperand is a load!" ) ? void (0) : __assert_fail ("!readMem() && \"Store MachineMemOperand is a load!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2299, __extension__ __PRETTY_FUNCTION__)); | |||
2300 | assert(writeMem() && "Store MachineMemOperand is not a store!")(static_cast <bool> (writeMem() && "Store MachineMemOperand is not a store!" ) ? void (0) : __assert_fail ("writeMem() && \"Store MachineMemOperand is not a store!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2300, __extension__ __PRETTY_FUNCTION__)); | |||
2301 | } | |||
2302 | ||||
2303 | public: | |||
2304 | /// Return true if the op does a truncation before store. | |||
2305 | /// For integers this is the same as doing a TRUNCATE and storing the result. | |||
2306 | /// For floats, it is the same as doing an FP_ROUND and storing the result. | |||
2307 | bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; } | |||
2308 | void setTruncatingStore(bool Truncating) { | |||
2309 | StoreSDNodeBits.IsTruncating = Truncating; | |||
2310 | } | |||
2311 | ||||
2312 | const SDValue &getValue() const { return getOperand(1); } | |||
2313 | const SDValue &getBasePtr() const { return getOperand(2); } | |||
2314 | const SDValue &getOffset() const { return getOperand(3); } | |||
2315 | ||||
2316 | static bool classof(const SDNode *N) { | |||
2317 | return N->getOpcode() == ISD::STORE; | |||
2318 | } | |||
2319 | }; | |||
2320 | ||||
2321 | /// This base class is used to represent MLOAD and MSTORE nodes | |||
2322 | class MaskedLoadStoreSDNode : public MemSDNode { | |||
2323 | public: | |||
2324 | friend class SelectionDAG; | |||
2325 | ||||
2326 | MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order, | |||
2327 | const DebugLoc &dl, SDVTList VTs, | |||
2328 | ISD::MemIndexedMode AM, EVT MemVT, | |||
2329 | MachineMemOperand *MMO) | |||
2330 | : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) { | |||
2331 | LSBaseSDNodeBits.AddressingMode = AM; | |||
2332 | assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM && "Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2332, __extension__ __PRETTY_FUNCTION__)); | |||
2333 | } | |||
2334 | ||||
2335 | // MaskedLoadSDNode (Chain, ptr, offset, mask, passthru) | |||
2336 | // MaskedStoreSDNode (Chain, data, ptr, offset, mask) | |||
2337 | // Mask is a vector of i1 elements | |||
2338 | const SDValue &getOffset() const { | |||
2339 | return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3); | |||
2340 | } | |||
2341 | const SDValue &getMask() const { | |||
2342 | return getOperand(getOpcode() == ISD::MLOAD ? 3 : 4); | |||
2343 | } | |||
2344 | ||||
2345 | /// Return the addressing mode for this load or store: | |||
2346 | /// unindexed, pre-inc, pre-dec, post-inc, or post-dec. | |||
2347 | ISD::MemIndexedMode getAddressingMode() const { | |||
2348 | return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode); | |||
2349 | } | |||
2350 | ||||
2351 | /// Return true if this is a pre/post inc/dec load/store. | |||
2352 | bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; } | |||
2353 | ||||
2354 | /// Return true if this is NOT a pre/post inc/dec load/store. | |||
2355 | bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; } | |||
2356 | ||||
2357 | static bool classof(const SDNode *N) { | |||
2358 | return N->getOpcode() == ISD::MLOAD || | |||
2359 | N->getOpcode() == ISD::MSTORE; | |||
2360 | } | |||
2361 | }; | |||
2362 | ||||
2363 | /// This class is used to represent an MLOAD node | |||
2364 | class MaskedLoadSDNode : public MaskedLoadStoreSDNode { | |||
2365 | public: | |||
2366 | friend class SelectionDAG; | |||
2367 | ||||
2368 | MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2369 | ISD::MemIndexedMode AM, ISD::LoadExtType ETy, | |||
2370 | bool IsExpanding, EVT MemVT, MachineMemOperand *MMO) | |||
2371 | : MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, AM, MemVT, MMO) { | |||
2372 | LoadSDNodeBits.ExtTy = ETy; | |||
2373 | LoadSDNodeBits.IsExpanding = IsExpanding; | |||
2374 | } | |||
2375 | ||||
2376 | ISD::LoadExtType getExtensionType() const { | |||
2377 | return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy); | |||
2378 | } | |||
2379 | ||||
2380 | const SDValue &getBasePtr() const { return getOperand(1); } | |||
2381 | const SDValue &getOffset() const { return getOperand(2); } | |||
2382 | const SDValue &getMask() const { return getOperand(3); } | |||
2383 | const SDValue &getPassThru() const { return getOperand(4); } | |||
2384 | ||||
2385 | static bool classof(const SDNode *N) { | |||
2386 | return N->getOpcode() == ISD::MLOAD; | |||
2387 | } | |||
2388 | ||||
2389 | bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; } | |||
2390 | }; | |||
2391 | ||||
2392 | /// This class is used to represent an MSTORE node | |||
2393 | class MaskedStoreSDNode : public MaskedLoadStoreSDNode { | |||
2394 | public: | |||
2395 | friend class SelectionDAG; | |||
2396 | ||||
2397 | MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2398 | ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing, | |||
2399 | EVT MemVT, MachineMemOperand *MMO) | |||
2400 | : MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, AM, MemVT, MMO) { | |||
2401 | StoreSDNodeBits.IsTruncating = isTrunc; | |||
2402 | StoreSDNodeBits.IsCompressing = isCompressing; | |||
2403 | } | |||
2404 | ||||
2405 | /// Return true if the op does a truncation before store. | |||
2406 | /// For integers this is the same as doing a TRUNCATE and storing the result. | |||
2407 | /// For floats, it is the same as doing an FP_ROUND and storing the result. | |||
2408 | bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; } | |||
2409 | ||||
2410 | /// Returns true if the op does a compression to the vector before storing. | |||
2411 | /// The node contiguously stores the active elements (integers or floats) | |||
2412 | /// in src (those with their respective bit set in writemask k) to unaligned | |||
2413 | /// memory at base_addr. | |||
2414 | bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; } | |||
2415 | ||||
2416 | const SDValue &getValue() const { return getOperand(1); } | |||
2417 | const SDValue &getBasePtr() const { return getOperand(2); } | |||
2418 | const SDValue &getOffset() const { return getOperand(3); } | |||
2419 | const SDValue &getMask() const { return getOperand(4); } | |||
2420 | ||||
2421 | static bool classof(const SDNode *N) { | |||
2422 | return N->getOpcode() == ISD::MSTORE; | |||
2423 | } | |||
2424 | }; | |||
2425 | ||||
2426 | /// This is a base class used to represent | |||
2427 | /// MGATHER and MSCATTER nodes | |||
2428 | /// | |||
2429 | class MaskedGatherScatterSDNode : public MemSDNode { | |||
2430 | public: | |||
2431 | friend class SelectionDAG; | |||
2432 | ||||
2433 | MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order, | |||
2434 | const DebugLoc &dl, SDVTList VTs, EVT MemVT, | |||
2435 | MachineMemOperand *MMO, ISD::MemIndexType IndexType) | |||
2436 | : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) { | |||
2437 | LSBaseSDNodeBits.AddressingMode = IndexType; | |||
2438 | assert(getIndexType() == IndexType && "Value truncated")(static_cast <bool> (getIndexType() == IndexType && "Value truncated") ? void (0) : __assert_fail ("getIndexType() == IndexType && \"Value truncated\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2438, __extension__ __PRETTY_FUNCTION__)); | |||
2439 | } | |||
2440 | ||||
2441 | /// How is Index applied to BasePtr when computing addresses. | |||
2442 | ISD::MemIndexType getIndexType() const { | |||
2443 | return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode); | |||
2444 | } | |||
2445 | void setIndexType(ISD::MemIndexType IndexType) { | |||
2446 | LSBaseSDNodeBits.AddressingMode = IndexType; | |||
2447 | } | |||
2448 | bool isIndexScaled() const { | |||
2449 | return (getIndexType() == ISD::SIGNED_SCALED) || | |||
2450 | (getIndexType() == ISD::UNSIGNED_SCALED); | |||
2451 | } | |||
2452 | bool isIndexSigned() const { | |||
2453 | return (getIndexType() == ISD::SIGNED_SCALED) || | |||
2454 | (getIndexType() == ISD::SIGNED_UNSCALED); | |||
2455 | } | |||
2456 | ||||
2457 | // In the both nodes address is Op1, mask is Op2: | |||
2458 | // MaskedGatherSDNode (Chain, passthru, mask, base, index, scale) | |||
2459 | // MaskedScatterSDNode (Chain, value, mask, base, index, scale) | |||
2460 | // Mask is a vector of i1 elements | |||
2461 | const SDValue &getBasePtr() const { return getOperand(3); } | |||
2462 | const SDValue &getIndex() const { return getOperand(4); } | |||
2463 | const SDValue &getMask() const { return getOperand(2); } | |||
2464 | const SDValue &getScale() const { return getOperand(5); } | |||
2465 | ||||
2466 | static bool classof(const SDNode *N) { | |||
2467 | return N->getOpcode() == ISD::MGATHER || | |||
2468 | N->getOpcode() == ISD::MSCATTER; | |||
2469 | } | |||
2470 | }; | |||
2471 | ||||
2472 | /// This class is used to represent an MGATHER node | |||
2473 | /// | |||
2474 | class MaskedGatherSDNode : public MaskedGatherScatterSDNode { | |||
2475 | public: | |||
2476 | friend class SelectionDAG; | |||
2477 | ||||
2478 | MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2479 | EVT MemVT, MachineMemOperand *MMO, | |||
2480 | ISD::MemIndexType IndexType, ISD::LoadExtType ETy) | |||
2481 | : MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO, | |||
2482 | IndexType) { | |||
2483 | LoadSDNodeBits.ExtTy = ETy; | |||
2484 | } | |||
2485 | ||||
2486 | const SDValue &getPassThru() const { return getOperand(1); } | |||
2487 | ||||
2488 | ISD::LoadExtType getExtensionType() const { | |||
2489 | return ISD::LoadExtType(LoadSDNodeBits.ExtTy); | |||
2490 | } | |||
2491 | ||||
2492 | static bool classof(const SDNode *N) { | |||
2493 | return N->getOpcode() == ISD::MGATHER; | |||
2494 | } | |||
2495 | }; | |||
2496 | ||||
2497 | /// This class is used to represent an MSCATTER node | |||
2498 | /// | |||
2499 | class MaskedScatterSDNode : public MaskedGatherScatterSDNode { | |||
2500 | public: | |||
2501 | friend class SelectionDAG; | |||
2502 | ||||
2503 | MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2504 | EVT MemVT, MachineMemOperand *MMO, | |||
2505 | ISD::MemIndexType IndexType, bool IsTrunc) | |||
2506 | : MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO, | |||
2507 | IndexType) { | |||
2508 | StoreSDNodeBits.IsTruncating = IsTrunc; | |||
2509 | } | |||
2510 | ||||
2511 | /// Return true if the op does a truncation before store. | |||
2512 | /// For integers this is the same as doing a TRUNCATE and storing the result. | |||
2513 | /// For floats, it is the same as doing an FP_ROUND and storing the result. | |||
2514 | bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; } | |||
2515 | ||||
2516 | const SDValue &getValue() const { return getOperand(1); } | |||
2517 | ||||
2518 | static bool classof(const SDNode *N) { | |||
2519 | return N->getOpcode() == ISD::MSCATTER; | |||
2520 | } | |||
2521 | }; | |||
2522 | ||||
2523 | /// An SDNode that represents everything that will be needed | |||
2524 | /// to construct a MachineInstr. These nodes are created during the | |||
2525 | /// instruction selection proper phase. | |||
2526 | /// | |||
2527 | /// Note that the only supported way to set the `memoperands` is by calling the | |||
2528 | /// `SelectionDAG::setNodeMemRefs` function as the memory management happens | |||
2529 | /// inside the DAG rather than in the node. | |||
2530 | class MachineSDNode : public SDNode { | |||
2531 | private: | |||
2532 | friend class SelectionDAG; | |||
2533 | ||||
2534 | MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs) | |||
2535 | : SDNode(Opc, Order, DL, VTs) {} | |||
2536 | ||||
2537 | // We use a pointer union between a single `MachineMemOperand` pointer and | |||
2538 | // a pointer to an array of `MachineMemOperand` pointers. This is null when | |||
2539 | // the number of these is zero, the single pointer variant used when the | |||
2540 | // number is one, and the array is used for larger numbers. | |||
2541 | // | |||
2542 | // The array is allocated via the `SelectionDAG`'s allocator and so will | |||
2543 | // always live until the DAG is cleaned up and doesn't require ownership here. | |||
2544 | // | |||
2545 | // We can't use something simpler like `TinyPtrVector` here because `SDNode` | |||
2546 | // subclasses aren't managed in a conforming C++ manner. See the comments on | |||
2547 | // `SelectionDAG::MorphNodeTo` which details what all goes on, but the | |||
2548 | // constraint here is that these don't manage memory with their constructor or | |||
2549 | // destructor and can be initialized to a good state even if they start off | |||
2550 | // uninitialized. | |||
2551 | PointerUnion<MachineMemOperand *, MachineMemOperand **> MemRefs = {}; | |||
2552 | ||||
2553 | // Note that this could be folded into the above `MemRefs` member if doing so | |||
2554 | // is advantageous at some point. We don't need to store this in most cases. | |||
2555 | // However, at the moment this doesn't appear to make the allocation any | |||
2556 | // smaller and makes the code somewhat simpler to read. | |||
2557 | int NumMemRefs = 0; | |||
2558 | ||||
2559 | public: | |||
2560 | using mmo_iterator = ArrayRef<MachineMemOperand *>::const_iterator; | |||
2561 | ||||
2562 | ArrayRef<MachineMemOperand *> memoperands() const { | |||
2563 | // Special case the common cases. | |||
2564 | if (NumMemRefs == 0) | |||
2565 | return {}; | |||
2566 | if (NumMemRefs == 1) | |||
2567 | return makeArrayRef(MemRefs.getAddrOfPtr1(), 1); | |||
2568 | ||||
2569 | // Otherwise we have an actual array. | |||
2570 | return makeArrayRef(MemRefs.get<MachineMemOperand **>(), NumMemRefs); | |||
2571 | } | |||
2572 | mmo_iterator memoperands_begin() const { return memoperands().begin(); } | |||
2573 | mmo_iterator memoperands_end() const { return memoperands().end(); } | |||
2574 | bool memoperands_empty() const { return memoperands().empty(); } | |||
2575 | ||||
2576 | /// Clear out the memory reference descriptor list. | |||
2577 | void clearMemRefs() { | |||
2578 | MemRefs = nullptr; | |||
2579 | NumMemRefs = 0; | |||
2580 | } | |||
2581 | ||||
2582 | static bool classof(const SDNode *N) { | |||
2583 | return N->isMachineOpcode(); | |||
2584 | } | |||
2585 | }; | |||
2586 | ||||
2587 | /// An SDNode that records if a register contains a value that is guaranteed to | |||
2588 | /// be aligned accordingly. | |||
2589 | class AssertAlignSDNode : public SDNode { | |||
2590 | Align Alignment; | |||
2591 | ||||
2592 | public: | |||
2593 | AssertAlignSDNode(unsigned Order, const DebugLoc &DL, EVT VT, Align A) | |||
2594 | : SDNode(ISD::AssertAlign, Order, DL, getSDVTList(VT)), Alignment(A) {} | |||
2595 | ||||
2596 | Align getAlign() const { return Alignment; } | |||
2597 | ||||
2598 | static bool classof(const SDNode *N) { | |||
2599 | return N->getOpcode() == ISD::AssertAlign; | |||
2600 | } | |||
2601 | }; | |||
2602 | ||||
2603 | class SDNodeIterator { | |||
2604 | const SDNode *Node; | |||
2605 | unsigned Operand; | |||
2606 | ||||
2607 | SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {} | |||
2608 | ||||
2609 | public: | |||
2610 | using iterator_category = std::forward_iterator_tag; | |||
2611 | using value_type = SDNode; | |||
2612 | using difference_type = std::ptrdiff_t; | |||
2613 | using pointer = value_type *; | |||
2614 | using reference = value_type &; | |||
2615 | ||||
2616 | bool operator==(const SDNodeIterator& x) const { | |||
2617 | return Operand == x.Operand; | |||
2618 | } | |||
2619 | bool operator!=(const SDNodeIterator& x) const { return !operator==(x); } | |||
2620 | ||||
2621 | pointer operator*() const { | |||
2622 | return Node->getOperand(Operand).getNode(); | |||
2623 | } | |||
2624 | pointer operator->() const { return operator*(); } | |||
2625 | ||||
2626 | SDNodeIterator& operator++() { // Preincrement | |||
2627 | ++Operand; | |||
2628 | return *this; | |||
2629 | } | |||
2630 | SDNodeIterator operator++(int) { // Postincrement | |||
2631 | SDNodeIterator tmp = *this; ++*this; return tmp; | |||
2632 | } | |||
2633 | size_t operator-(SDNodeIterator Other) const { | |||
2634 | assert(Node == Other.Node &&(static_cast <bool> (Node == Other.Node && "Cannot compare iterators of two different nodes!" ) ? void (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2635, __extension__ __PRETTY_FUNCTION__)) | |||
2635 | "Cannot compare iterators of two different nodes!")(static_cast <bool> (Node == Other.Node && "Cannot compare iterators of two different nodes!" ) ? void (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h" , 2635, __extension__ __PRETTY_FUNCTION__)); | |||
2636 | return Operand - Other.Operand; | |||
2637 | } | |||
2638 | ||||
2639 | static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); } | |||
2640 | static SDNodeIterator end (const SDNode *N) { | |||
2641 | return SDNodeIterator(N, N->getNumOperands()); | |||
2642 | } | |||
2643 | ||||
2644 | unsigned getOperand() const { return Operand; } | |||
2645 | const SDNode *getNode() const { return Node; } | |||
2646 | }; | |||
2647 | ||||
2648 | template <> struct GraphTraits<SDNode*> { | |||
2649 | using NodeRef = SDNode *; | |||
2650 | using ChildIteratorType = SDNodeIterator; | |||
2651 | ||||
2652 | static NodeRef getEntryNode(SDNode *N) { return N; } | |||
2653 | ||||
2654 | static ChildIteratorType child_begin(NodeRef N) { | |||
2655 | return SDNodeIterator::begin(N); | |||
2656 | } | |||
2657 | ||||
2658 | static ChildIteratorType child_end(NodeRef N) { | |||
2659 | return SDNodeIterator::end(N); | |||
2660 | } | |||
2661 | }; | |||
2662 | ||||
2663 | /// A representation of the largest SDNode, for use in sizeof(). | |||
2664 | /// | |||
2665 | /// This needs to be a union because the largest node differs on 32 bit systems | |||
2666 | /// with 4 and 8 byte pointer alignment, respectively. | |||
2667 | using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode, | |||
2668 | BlockAddressSDNode, | |||
2669 | GlobalAddressSDNode, | |||
2670 | PseudoProbeSDNode>; | |||
2671 | ||||
2672 | /// The SDNode class with the greatest alignment requirement. | |||
2673 | using MostAlignedSDNode = GlobalAddressSDNode; | |||
2674 | ||||
2675 | namespace ISD { | |||
2676 | ||||
2677 | /// Returns true if the specified node is a non-extending and unindexed load. | |||
2678 | inline bool isNormalLoad(const SDNode *N) { | |||
2679 | const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N); | |||
2680 | return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD && | |||
2681 | Ld->getAddressingMode() == ISD::UNINDEXED; | |||
2682 | } | |||
2683 | ||||
2684 | /// Returns true if the specified node is a non-extending load. | |||
2685 | inline bool isNON_EXTLoad(const SDNode *N) { | |||
2686 | return isa<LoadSDNode>(N) && | |||
2687 | cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD; | |||
2688 | } | |||
2689 | ||||
2690 | /// Returns true if the specified node is a EXTLOAD. | |||
2691 | inline bool isEXTLoad(const SDNode *N) { | |||
2692 | return isa<LoadSDNode>(N) && | |||
2693 | cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD; | |||
2694 | } | |||
2695 | ||||
2696 | /// Returns true if the specified node is a SEXTLOAD. | |||
2697 | inline bool isSEXTLoad(const SDNode *N) { | |||
2698 | return isa<LoadSDNode>(N) && | |||
2699 | cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD; | |||
2700 | } | |||
2701 | ||||
2702 | /// Returns true if the specified node is a ZEXTLOAD. | |||
2703 | inline bool isZEXTLoad(const SDNode *N) { | |||
2704 | return isa<LoadSDNode>(N) && | |||
2705 | cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD; | |||
2706 | } | |||
2707 | ||||
2708 | /// Returns true if the specified node is an unindexed load. | |||
2709 | inline bool isUNINDEXEDLoad(const SDNode *N) { | |||
2710 | return isa<LoadSDNode>(N) && | |||
2711 | cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED; | |||
2712 | } | |||
2713 | ||||
2714 | /// Returns true if the specified node is a non-truncating | |||
2715 | /// and unindexed store. | |||
2716 | inline bool isNormalStore(const SDNode *N) { | |||
2717 | const StoreSDNode *St = dyn_cast<StoreSDNode>(N); | |||
2718 | return St && !St->isTruncatingStore() && | |||
2719 | St->getAddressingMode() == ISD::UNINDEXED; | |||
2720 | } | |||
2721 | ||||
2722 | /// Returns true if the specified node is an unindexed store. | |||
2723 | inline bool isUNINDEXEDStore(const SDNode *N) { | |||
2724 | return isa<StoreSDNode>(N) && | |||
2725 | cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED; | |||
2726 | } | |||
2727 | ||||
2728 | /// Attempt to match a unary predicate against a scalar/splat constant or | |||
2729 | /// every element of a constant BUILD_VECTOR. | |||
2730 | /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match. | |||
2731 | bool matchUnaryPredicate(SDValue Op, | |||
2732 | std::function<bool(ConstantSDNode *)> Match, | |||
2733 | bool AllowUndefs = false); | |||
2734 | ||||
2735 | /// Attempt to match a binary predicate against a pair of scalar/splat | |||
2736 | /// constants or every element of a pair of constant BUILD_VECTORs. | |||
2737 | /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match. | |||
2738 | /// If AllowTypeMismatch is true then RetType + ArgTypes don't need to match. | |||
2739 | bool matchBinaryPredicate( | |||
2740 | SDValue LHS, SDValue RHS, | |||
2741 | std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match, | |||
2742 | bool AllowUndefs = false, bool AllowTypeMismatch = false); | |||
2743 | ||||
2744 | /// Returns true if the specified value is the overflow result from one | |||
2745 | /// of the overflow intrinsic nodes. | |||
2746 | inline bool isOverflowIntrOpRes(SDValue Op) { | |||
2747 | unsigned Opc = Op.getOpcode(); | |||
2748 | return (Op.getResNo() == 1 && | |||
2749 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || | |||
2750 | Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO)); | |||
2751 | } | |||
2752 | ||||
2753 | } // end namespace ISD | |||
2754 | ||||
2755 | } // end namespace llvm | |||
2756 | ||||
2757 | #endif // LLVM_CODEGEN_SELECTIONDAGNODES_H |