File: | build/source/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp |
Warning: | line 13850, column 22 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation ----===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // This file implements the AArch64TargetLowering class. | ||||
10 | // | ||||
11 | //===----------------------------------------------------------------------===// | ||||
12 | |||||
13 | #include "AArch64ISelLowering.h" | ||||
14 | #include "AArch64CallingConvention.h" | ||||
15 | #include "AArch64ExpandImm.h" | ||||
16 | #include "AArch64MachineFunctionInfo.h" | ||||
17 | #include "AArch64PerfectShuffle.h" | ||||
18 | #include "AArch64RegisterInfo.h" | ||||
19 | #include "AArch64Subtarget.h" | ||||
20 | #include "MCTargetDesc/AArch64AddressingModes.h" | ||||
21 | #include "Utils/AArch64BaseInfo.h" | ||||
22 | #include "llvm/ADT/APFloat.h" | ||||
23 | #include "llvm/ADT/APInt.h" | ||||
24 | #include "llvm/ADT/ArrayRef.h" | ||||
25 | #include "llvm/ADT/STLExtras.h" | ||||
26 | #include "llvm/ADT/SmallSet.h" | ||||
27 | #include "llvm/ADT/SmallVector.h" | ||||
28 | #include "llvm/ADT/Statistic.h" | ||||
29 | #include "llvm/ADT/StringRef.h" | ||||
30 | #include "llvm/ADT/Triple.h" | ||||
31 | #include "llvm/ADT/Twine.h" | ||||
32 | #include "llvm/Analysis/LoopInfo.h" | ||||
33 | #include "llvm/Analysis/MemoryLocation.h" | ||||
34 | #include "llvm/Analysis/ObjCARCUtil.h" | ||||
35 | #include "llvm/Analysis/TargetTransformInfo.h" | ||||
36 | #include "llvm/Analysis/VectorUtils.h" | ||||
37 | #include "llvm/CodeGen/Analysis.h" | ||||
38 | #include "llvm/CodeGen/CallingConvLower.h" | ||||
39 | #include "llvm/CodeGen/ISDOpcodes.h" | ||||
40 | #include "llvm/CodeGen/MachineBasicBlock.h" | ||||
41 | #include "llvm/CodeGen/MachineFrameInfo.h" | ||||
42 | #include "llvm/CodeGen/MachineFunction.h" | ||||
43 | #include "llvm/CodeGen/MachineInstr.h" | ||||
44 | #include "llvm/CodeGen/MachineInstrBuilder.h" | ||||
45 | #include "llvm/CodeGen/MachineMemOperand.h" | ||||
46 | #include "llvm/CodeGen/MachineRegisterInfo.h" | ||||
47 | #include "llvm/CodeGen/RuntimeLibcalls.h" | ||||
48 | #include "llvm/CodeGen/SelectionDAG.h" | ||||
49 | #include "llvm/CodeGen/SelectionDAGNodes.h" | ||||
50 | #include "llvm/CodeGen/TargetCallingConv.h" | ||||
51 | #include "llvm/CodeGen/TargetInstrInfo.h" | ||||
52 | #include "llvm/CodeGen/ValueTypes.h" | ||||
53 | #include "llvm/IR/Attributes.h" | ||||
54 | #include "llvm/IR/Constants.h" | ||||
55 | #include "llvm/IR/DataLayout.h" | ||||
56 | #include "llvm/IR/DebugLoc.h" | ||||
57 | #include "llvm/IR/DerivedTypes.h" | ||||
58 | #include "llvm/IR/Function.h" | ||||
59 | #include "llvm/IR/GetElementPtrTypeIterator.h" | ||||
60 | #include "llvm/IR/GlobalValue.h" | ||||
61 | #include "llvm/IR/IRBuilder.h" | ||||
62 | #include "llvm/IR/Instruction.h" | ||||
63 | #include "llvm/IR/Instructions.h" | ||||
64 | #include "llvm/IR/IntrinsicInst.h" | ||||
65 | #include "llvm/IR/Intrinsics.h" | ||||
66 | #include "llvm/IR/IntrinsicsAArch64.h" | ||||
67 | #include "llvm/IR/Module.h" | ||||
68 | #include "llvm/IR/OperandTraits.h" | ||||
69 | #include "llvm/IR/PatternMatch.h" | ||||
70 | #include "llvm/IR/Type.h" | ||||
71 | #include "llvm/IR/Use.h" | ||||
72 | #include "llvm/IR/Value.h" | ||||
73 | #include "llvm/MC/MCRegisterInfo.h" | ||||
74 | #include "llvm/Support/Casting.h" | ||||
75 | #include "llvm/Support/CodeGen.h" | ||||
76 | #include "llvm/Support/CommandLine.h" | ||||
77 | #include "llvm/Support/Compiler.h" | ||||
78 | #include "llvm/Support/Debug.h" | ||||
79 | #include "llvm/Support/ErrorHandling.h" | ||||
80 | #include "llvm/Support/InstructionCost.h" | ||||
81 | #include "llvm/Support/KnownBits.h" | ||||
82 | #include "llvm/Support/MachineValueType.h" | ||||
83 | #include "llvm/Support/MathExtras.h" | ||||
84 | #include "llvm/Support/raw_ostream.h" | ||||
85 | #include "llvm/Target/TargetMachine.h" | ||||
86 | #include "llvm/Target/TargetOptions.h" | ||||
87 | #include <algorithm> | ||||
88 | #include <bitset> | ||||
89 | #include <cassert> | ||||
90 | #include <cctype> | ||||
91 | #include <cstdint> | ||||
92 | #include <cstdlib> | ||||
93 | #include <iterator> | ||||
94 | #include <limits> | ||||
95 | #include <optional> | ||||
96 | #include <tuple> | ||||
97 | #include <utility> | ||||
98 | #include <vector> | ||||
99 | |||||
100 | using namespace llvm; | ||||
101 | using namespace llvm::PatternMatch; | ||||
102 | |||||
103 | #define DEBUG_TYPE"aarch64-lower" "aarch64-lower" | ||||
104 | |||||
105 | STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"aarch64-lower", "NumTailCalls" , "Number of tail calls"}; | ||||
106 | STATISTIC(NumShiftInserts, "Number of vector shift inserts")static llvm::Statistic NumShiftInserts = {"aarch64-lower", "NumShiftInserts" , "Number of vector shift inserts"}; | ||||
107 | STATISTIC(NumOptimizedImms, "Number of times immediates were optimized")static llvm::Statistic NumOptimizedImms = {"aarch64-lower", "NumOptimizedImms" , "Number of times immediates were optimized"}; | ||||
108 | |||||
109 | // FIXME: The necessary dtprel relocations don't seem to be supported | ||||
110 | // well in the GNU bfd and gold linkers at the moment. Therefore, by | ||||
111 | // default, for now, fall back to GeneralDynamic code generation. | ||||
112 | cl::opt<bool> EnableAArch64ELFLocalDynamicTLSGeneration( | ||||
113 | "aarch64-elf-ldtls-generation", cl::Hidden, | ||||
114 | cl::desc("Allow AArch64 Local Dynamic TLS code generation"), | ||||
115 | cl::init(false)); | ||||
116 | |||||
117 | static cl::opt<bool> | ||||
118 | EnableOptimizeLogicalImm("aarch64-enable-logical-imm", cl::Hidden, | ||||
119 | cl::desc("Enable AArch64 logical imm instruction " | ||||
120 | "optimization"), | ||||
121 | cl::init(true)); | ||||
122 | |||||
123 | // Temporary option added for the purpose of testing functionality added | ||||
124 | // to DAGCombiner.cpp in D92230. It is expected that this can be removed | ||||
125 | // in future when both implementations will be based off MGATHER rather | ||||
126 | // than the GLD1 nodes added for the SVE gather load intrinsics. | ||||
127 | static cl::opt<bool> | ||||
128 | EnableCombineMGatherIntrinsics("aarch64-enable-mgather-combine", cl::Hidden, | ||||
129 | cl::desc("Combine extends of AArch64 masked " | ||||
130 | "gather intrinsics"), | ||||
131 | cl::init(true)); | ||||
132 | |||||
133 | // All of the XOR, OR and CMP use ALU ports, and data dependency will become the | ||||
134 | // bottleneck after this transform on high end CPU. So this max leaf node | ||||
135 | // limitation is guard cmp+ccmp will be profitable. | ||||
136 | static cl::opt<unsigned> MaxXors("aarch64-max-xors", cl::init(16), cl::Hidden, | ||||
137 | cl::desc("Maximum of xors")); | ||||
138 | |||||
139 | /// Value type used for condition codes. | ||||
140 | static const MVT MVT_CC = MVT::i32; | ||||
141 | |||||
142 | static inline EVT getPackedSVEVectorVT(EVT VT) { | ||||
143 | switch (VT.getSimpleVT().SimpleTy) { | ||||
144 | default: | ||||
145 | llvm_unreachable("unexpected element type for vector")::llvm::llvm_unreachable_internal("unexpected element type for vector" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 145); | ||||
146 | case MVT::i8: | ||||
147 | return MVT::nxv16i8; | ||||
148 | case MVT::i16: | ||||
149 | return MVT::nxv8i16; | ||||
150 | case MVT::i32: | ||||
151 | return MVT::nxv4i32; | ||||
152 | case MVT::i64: | ||||
153 | return MVT::nxv2i64; | ||||
154 | case MVT::f16: | ||||
155 | return MVT::nxv8f16; | ||||
156 | case MVT::f32: | ||||
157 | return MVT::nxv4f32; | ||||
158 | case MVT::f64: | ||||
159 | return MVT::nxv2f64; | ||||
160 | case MVT::bf16: | ||||
161 | return MVT::nxv8bf16; | ||||
162 | } | ||||
163 | } | ||||
164 | |||||
165 | // NOTE: Currently there's only a need to return integer vector types. If this | ||||
166 | // changes then just add an extra "type" parameter. | ||||
167 | static inline EVT getPackedSVEVectorVT(ElementCount EC) { | ||||
168 | switch (EC.getKnownMinValue()) { | ||||
169 | default: | ||||
170 | llvm_unreachable("unexpected element count for vector")::llvm::llvm_unreachable_internal("unexpected element count for vector" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 170); | ||||
171 | case 16: | ||||
172 | return MVT::nxv16i8; | ||||
173 | case 8: | ||||
174 | return MVT::nxv8i16; | ||||
175 | case 4: | ||||
176 | return MVT::nxv4i32; | ||||
177 | case 2: | ||||
178 | return MVT::nxv2i64; | ||||
179 | } | ||||
180 | } | ||||
181 | |||||
182 | static inline EVT getPromotedVTForPredicate(EVT VT) { | ||||
183 | assert(VT.isScalableVector() && (VT.getVectorElementType() == MVT::i1) &&(static_cast <bool> (VT.isScalableVector() && ( VT.getVectorElementType() == MVT::i1) && "Expected scalable predicate vector type!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && (VT.getVectorElementType() == MVT::i1) && \"Expected scalable predicate vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 184, __extension__ __PRETTY_FUNCTION__)) | ||||
184 | "Expected scalable predicate vector type!")(static_cast <bool> (VT.isScalableVector() && ( VT.getVectorElementType() == MVT::i1) && "Expected scalable predicate vector type!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && (VT.getVectorElementType() == MVT::i1) && \"Expected scalable predicate vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 184, __extension__ __PRETTY_FUNCTION__)); | ||||
185 | switch (VT.getVectorMinNumElements()) { | ||||
186 | default: | ||||
187 | llvm_unreachable("unexpected element count for vector")::llvm::llvm_unreachable_internal("unexpected element count for vector" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 187); | ||||
188 | case 2: | ||||
189 | return MVT::nxv2i64; | ||||
190 | case 4: | ||||
191 | return MVT::nxv4i32; | ||||
192 | case 8: | ||||
193 | return MVT::nxv8i16; | ||||
194 | case 16: | ||||
195 | return MVT::nxv16i8; | ||||
196 | } | ||||
197 | } | ||||
198 | |||||
199 | /// Returns true if VT's elements occupy the lowest bit positions of its | ||||
200 | /// associated register class without any intervening space. | ||||
201 | /// | ||||
202 | /// For example, nxv2f16, nxv4f16 and nxv8f16 are legal types that belong to the | ||||
203 | /// same register class, but only nxv8f16 can be treated as a packed vector. | ||||
204 | static inline bool isPackedVectorType(EVT VT, SelectionDAG &DAG) { | ||||
205 | assert(VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&(static_cast <bool> (VT.isVector() && DAG.getTargetLoweringInfo ().isTypeLegal(VT) && "Expected legal vector type!") ? void (0) : __assert_fail ("VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && \"Expected legal vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 206, __extension__ __PRETTY_FUNCTION__)) | ||||
206 | "Expected legal vector type!")(static_cast <bool> (VT.isVector() && DAG.getTargetLoweringInfo ().isTypeLegal(VT) && "Expected legal vector type!") ? void (0) : __assert_fail ("VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && \"Expected legal vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 206, __extension__ __PRETTY_FUNCTION__)); | ||||
207 | return VT.isFixedLengthVector() || | ||||
208 | VT.getSizeInBits().getKnownMinSize() == AArch64::SVEBitsPerBlock; | ||||
209 | } | ||||
210 | |||||
211 | // Returns true for ####_MERGE_PASSTHRU opcodes, whose operands have a leading | ||||
212 | // predicate and end with a passthru value matching the result type. | ||||
213 | static bool isMergePassthruOpcode(unsigned Opc) { | ||||
214 | switch (Opc) { | ||||
215 | default: | ||||
216 | return false; | ||||
217 | case AArch64ISD::BITREVERSE_MERGE_PASSTHRU: | ||||
218 | case AArch64ISD::BSWAP_MERGE_PASSTHRU: | ||||
219 | case AArch64ISD::REVH_MERGE_PASSTHRU: | ||||
220 | case AArch64ISD::REVW_MERGE_PASSTHRU: | ||||
221 | case AArch64ISD::REVD_MERGE_PASSTHRU: | ||||
222 | case AArch64ISD::CTLZ_MERGE_PASSTHRU: | ||||
223 | case AArch64ISD::CTPOP_MERGE_PASSTHRU: | ||||
224 | case AArch64ISD::DUP_MERGE_PASSTHRU: | ||||
225 | case AArch64ISD::ABS_MERGE_PASSTHRU: | ||||
226 | case AArch64ISD::NEG_MERGE_PASSTHRU: | ||||
227 | case AArch64ISD::FNEG_MERGE_PASSTHRU: | ||||
228 | case AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU: | ||||
229 | case AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU: | ||||
230 | case AArch64ISD::FCEIL_MERGE_PASSTHRU: | ||||
231 | case AArch64ISD::FFLOOR_MERGE_PASSTHRU: | ||||
232 | case AArch64ISD::FNEARBYINT_MERGE_PASSTHRU: | ||||
233 | case AArch64ISD::FRINT_MERGE_PASSTHRU: | ||||
234 | case AArch64ISD::FROUND_MERGE_PASSTHRU: | ||||
235 | case AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU: | ||||
236 | case AArch64ISD::FTRUNC_MERGE_PASSTHRU: | ||||
237 | case AArch64ISD::FP_ROUND_MERGE_PASSTHRU: | ||||
238 | case AArch64ISD::FP_EXTEND_MERGE_PASSTHRU: | ||||
239 | case AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU: | ||||
240 | case AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU: | ||||
241 | case AArch64ISD::FCVTZU_MERGE_PASSTHRU: | ||||
242 | case AArch64ISD::FCVTZS_MERGE_PASSTHRU: | ||||
243 | case AArch64ISD::FSQRT_MERGE_PASSTHRU: | ||||
244 | case AArch64ISD::FRECPX_MERGE_PASSTHRU: | ||||
245 | case AArch64ISD::FABS_MERGE_PASSTHRU: | ||||
246 | return true; | ||||
247 | } | ||||
248 | } | ||||
249 | |||||
250 | // Returns true if inactive lanes are known to be zeroed by construction. | ||||
251 | static bool isZeroingInactiveLanes(SDValue Op) { | ||||
252 | switch (Op.getOpcode()) { | ||||
253 | default: | ||||
254 | // We guarantee i1 splat_vectors to zero the other lanes by | ||||
255 | // implementing it with ptrue and possibly a punpklo for nxv1i1. | ||||
256 | if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) | ||||
257 | return true; | ||||
258 | return false; | ||||
259 | case AArch64ISD::PTRUE: | ||||
260 | case AArch64ISD::SETCC_MERGE_ZERO: | ||||
261 | return true; | ||||
262 | case ISD::INTRINSIC_WO_CHAIN: | ||||
263 | switch (Op.getConstantOperandVal(0)) { | ||||
264 | default: | ||||
265 | return false; | ||||
266 | case Intrinsic::aarch64_sve_ptrue: | ||||
267 | case Intrinsic::aarch64_sve_pnext: | ||||
268 | case Intrinsic::aarch64_sve_cmpeq: | ||||
269 | case Intrinsic::aarch64_sve_cmpne: | ||||
270 | case Intrinsic::aarch64_sve_cmpge: | ||||
271 | case Intrinsic::aarch64_sve_cmpgt: | ||||
272 | case Intrinsic::aarch64_sve_cmphs: | ||||
273 | case Intrinsic::aarch64_sve_cmphi: | ||||
274 | case Intrinsic::aarch64_sve_cmpeq_wide: | ||||
275 | case Intrinsic::aarch64_sve_cmpne_wide: | ||||
276 | case Intrinsic::aarch64_sve_cmpge_wide: | ||||
277 | case Intrinsic::aarch64_sve_cmpgt_wide: | ||||
278 | case Intrinsic::aarch64_sve_cmplt_wide: | ||||
279 | case Intrinsic::aarch64_sve_cmple_wide: | ||||
280 | case Intrinsic::aarch64_sve_cmphs_wide: | ||||
281 | case Intrinsic::aarch64_sve_cmphi_wide: | ||||
282 | case Intrinsic::aarch64_sve_cmplo_wide: | ||||
283 | case Intrinsic::aarch64_sve_cmpls_wide: | ||||
284 | case Intrinsic::aarch64_sve_fcmpeq: | ||||
285 | case Intrinsic::aarch64_sve_fcmpne: | ||||
286 | case Intrinsic::aarch64_sve_fcmpge: | ||||
287 | case Intrinsic::aarch64_sve_fcmpgt: | ||||
288 | case Intrinsic::aarch64_sve_fcmpuo: | ||||
289 | return true; | ||||
290 | } | ||||
291 | } | ||||
292 | } | ||||
293 | |||||
294 | AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, | ||||
295 | const AArch64Subtarget &STI) | ||||
296 | : TargetLowering(TM), Subtarget(&STI) { | ||||
297 | // AArch64 doesn't have comparisons which set GPRs or setcc instructions, so | ||||
298 | // we have to make something up. Arbitrarily, choose ZeroOrOne. | ||||
299 | setBooleanContents(ZeroOrOneBooleanContent); | ||||
300 | // When comparing vectors the result sets the different elements in the | ||||
301 | // vector to all-one or all-zero. | ||||
302 | setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); | ||||
303 | |||||
304 | // Set up the register classes. | ||||
305 | addRegisterClass(MVT::i32, &AArch64::GPR32allRegClass); | ||||
306 | addRegisterClass(MVT::i64, &AArch64::GPR64allRegClass); | ||||
307 | |||||
308 | if (Subtarget->hasLS64()) { | ||||
309 | addRegisterClass(MVT::i64x8, &AArch64::GPR64x8ClassRegClass); | ||||
310 | setOperationAction(ISD::LOAD, MVT::i64x8, Custom); | ||||
311 | setOperationAction(ISD::STORE, MVT::i64x8, Custom); | ||||
312 | } | ||||
313 | |||||
314 | if (Subtarget->hasFPARMv8()) { | ||||
315 | addRegisterClass(MVT::f16, &AArch64::FPR16RegClass); | ||||
316 | addRegisterClass(MVT::bf16, &AArch64::FPR16RegClass); | ||||
317 | addRegisterClass(MVT::f32, &AArch64::FPR32RegClass); | ||||
318 | addRegisterClass(MVT::f64, &AArch64::FPR64RegClass); | ||||
319 | addRegisterClass(MVT::f128, &AArch64::FPR128RegClass); | ||||
320 | } | ||||
321 | |||||
322 | if (Subtarget->hasNEON()) { | ||||
323 | addRegisterClass(MVT::v16i8, &AArch64::FPR8RegClass); | ||||
324 | addRegisterClass(MVT::v8i16, &AArch64::FPR16RegClass); | ||||
325 | // Someone set us up the NEON. | ||||
326 | addDRTypeForNEON(MVT::v2f32); | ||||
327 | addDRTypeForNEON(MVT::v8i8); | ||||
328 | addDRTypeForNEON(MVT::v4i16); | ||||
329 | addDRTypeForNEON(MVT::v2i32); | ||||
330 | addDRTypeForNEON(MVT::v1i64); | ||||
331 | addDRTypeForNEON(MVT::v1f64); | ||||
332 | addDRTypeForNEON(MVT::v4f16); | ||||
333 | if (Subtarget->hasBF16()) | ||||
334 | addDRTypeForNEON(MVT::v4bf16); | ||||
335 | |||||
336 | addQRTypeForNEON(MVT::v4f32); | ||||
337 | addQRTypeForNEON(MVT::v2f64); | ||||
338 | addQRTypeForNEON(MVT::v16i8); | ||||
339 | addQRTypeForNEON(MVT::v8i16); | ||||
340 | addQRTypeForNEON(MVT::v4i32); | ||||
341 | addQRTypeForNEON(MVT::v2i64); | ||||
342 | addQRTypeForNEON(MVT::v8f16); | ||||
343 | if (Subtarget->hasBF16()) | ||||
344 | addQRTypeForNEON(MVT::v8bf16); | ||||
345 | } | ||||
346 | |||||
347 | if (Subtarget->hasSVEorSME()) { | ||||
348 | // Add legal sve predicate types | ||||
349 | addRegisterClass(MVT::nxv1i1, &AArch64::PPRRegClass); | ||||
350 | addRegisterClass(MVT::nxv2i1, &AArch64::PPRRegClass); | ||||
351 | addRegisterClass(MVT::nxv4i1, &AArch64::PPRRegClass); | ||||
352 | addRegisterClass(MVT::nxv8i1, &AArch64::PPRRegClass); | ||||
353 | addRegisterClass(MVT::nxv16i1, &AArch64::PPRRegClass); | ||||
354 | |||||
355 | // Add legal sve data types | ||||
356 | addRegisterClass(MVT::nxv16i8, &AArch64::ZPRRegClass); | ||||
357 | addRegisterClass(MVT::nxv8i16, &AArch64::ZPRRegClass); | ||||
358 | addRegisterClass(MVT::nxv4i32, &AArch64::ZPRRegClass); | ||||
359 | addRegisterClass(MVT::nxv2i64, &AArch64::ZPRRegClass); | ||||
360 | |||||
361 | addRegisterClass(MVT::nxv2f16, &AArch64::ZPRRegClass); | ||||
362 | addRegisterClass(MVT::nxv4f16, &AArch64::ZPRRegClass); | ||||
363 | addRegisterClass(MVT::nxv8f16, &AArch64::ZPRRegClass); | ||||
364 | addRegisterClass(MVT::nxv2f32, &AArch64::ZPRRegClass); | ||||
365 | addRegisterClass(MVT::nxv4f32, &AArch64::ZPRRegClass); | ||||
366 | addRegisterClass(MVT::nxv2f64, &AArch64::ZPRRegClass); | ||||
367 | |||||
368 | if (Subtarget->hasBF16()) { | ||||
369 | addRegisterClass(MVT::nxv2bf16, &AArch64::ZPRRegClass); | ||||
370 | addRegisterClass(MVT::nxv4bf16, &AArch64::ZPRRegClass); | ||||
371 | addRegisterClass(MVT::nxv8bf16, &AArch64::ZPRRegClass); | ||||
372 | } | ||||
373 | |||||
374 | if (Subtarget->useSVEForFixedLengthVectors()) { | ||||
375 | for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) | ||||
376 | if (useSVEForFixedLengthVectorVT(VT)) | ||||
377 | addRegisterClass(VT, &AArch64::ZPRRegClass); | ||||
378 | |||||
379 | for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) | ||||
380 | if (useSVEForFixedLengthVectorVT(VT)) | ||||
381 | addRegisterClass(VT, &AArch64::ZPRRegClass); | ||||
382 | } | ||||
383 | } | ||||
384 | |||||
385 | // Compute derived properties from the register classes | ||||
386 | computeRegisterProperties(Subtarget->getRegisterInfo()); | ||||
387 | |||||
388 | // Provide all sorts of operation actions | ||||
389 | setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); | ||||
390 | setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); | ||||
391 | setOperationAction(ISD::SETCC, MVT::i32, Custom); | ||||
392 | setOperationAction(ISD::SETCC, MVT::i64, Custom); | ||||
393 | setOperationAction(ISD::SETCC, MVT::f16, Custom); | ||||
394 | setOperationAction(ISD::SETCC, MVT::f32, Custom); | ||||
395 | setOperationAction(ISD::SETCC, MVT::f64, Custom); | ||||
396 | setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom); | ||||
397 | setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Custom); | ||||
398 | setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Custom); | ||||
399 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom); | ||||
400 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom); | ||||
401 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom); | ||||
402 | setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); | ||||
403 | setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); | ||||
404 | setOperationAction(ISD::BRCOND, MVT::Other, Custom); | ||||
405 | setOperationAction(ISD::BR_CC, MVT::i32, Custom); | ||||
406 | setOperationAction(ISD::BR_CC, MVT::i64, Custom); | ||||
407 | setOperationAction(ISD::BR_CC, MVT::f16, Custom); | ||||
408 | setOperationAction(ISD::BR_CC, MVT::f32, Custom); | ||||
409 | setOperationAction(ISD::BR_CC, MVT::f64, Custom); | ||||
410 | setOperationAction(ISD::SELECT, MVT::i32, Custom); | ||||
411 | setOperationAction(ISD::SELECT, MVT::i64, Custom); | ||||
412 | setOperationAction(ISD::SELECT, MVT::f16, Custom); | ||||
413 | setOperationAction(ISD::SELECT, MVT::bf16, Custom); | ||||
414 | setOperationAction(ISD::SELECT, MVT::f32, Custom); | ||||
415 | setOperationAction(ISD::SELECT, MVT::f64, Custom); | ||||
416 | setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); | ||||
417 | setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); | ||||
418 | setOperationAction(ISD::SELECT_CC, MVT::f16, Custom); | ||||
419 | setOperationAction(ISD::SELECT_CC, MVT::bf16, Expand); | ||||
420 | setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); | ||||
421 | setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); | ||||
422 | setOperationAction(ISD::BR_JT, MVT::Other, Custom); | ||||
423 | setOperationAction(ISD::JumpTable, MVT::i64, Custom); | ||||
424 | setOperationAction(ISD::SETCCCARRY, MVT::i64, Custom); | ||||
425 | |||||
426 | setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); | ||||
427 | setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); | ||||
428 | setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); | ||||
429 | |||||
430 | setOperationAction(ISD::FREM, MVT::f32, Expand); | ||||
431 | setOperationAction(ISD::FREM, MVT::f64, Expand); | ||||
432 | setOperationAction(ISD::FREM, MVT::f80, Expand); | ||||
433 | |||||
434 | setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); | ||||
435 | |||||
436 | // Custom lowering hooks are needed for XOR | ||||
437 | // to fold it into CSINC/CSINV. | ||||
438 | setOperationAction(ISD::XOR, MVT::i32, Custom); | ||||
439 | setOperationAction(ISD::XOR, MVT::i64, Custom); | ||||
440 | |||||
441 | // Virtually no operation on f128 is legal, but LLVM can't expand them when | ||||
442 | // there's a valid register class, so we need custom operations in most cases. | ||||
443 | setOperationAction(ISD::FABS, MVT::f128, Expand); | ||||
444 | setOperationAction(ISD::FADD, MVT::f128, LibCall); | ||||
445 | setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); | ||||
446 | setOperationAction(ISD::FCOS, MVT::f128, Expand); | ||||
447 | setOperationAction(ISD::FDIV, MVT::f128, LibCall); | ||||
448 | setOperationAction(ISD::FMA, MVT::f128, Expand); | ||||
449 | setOperationAction(ISD::FMUL, MVT::f128, LibCall); | ||||
450 | setOperationAction(ISD::FNEG, MVT::f128, Expand); | ||||
451 | setOperationAction(ISD::FPOW, MVT::f128, Expand); | ||||
452 | setOperationAction(ISD::FREM, MVT::f128, Expand); | ||||
453 | setOperationAction(ISD::FRINT, MVT::f128, Expand); | ||||
454 | setOperationAction(ISD::FSIN, MVT::f128, Expand); | ||||
455 | setOperationAction(ISD::FSINCOS, MVT::f128, Expand); | ||||
456 | setOperationAction(ISD::FSQRT, MVT::f128, Expand); | ||||
457 | setOperationAction(ISD::FSUB, MVT::f128, LibCall); | ||||
458 | setOperationAction(ISD::FTRUNC, MVT::f128, Expand); | ||||
459 | setOperationAction(ISD::SETCC, MVT::f128, Custom); | ||||
460 | setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom); | ||||
461 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom); | ||||
462 | setOperationAction(ISD::BR_CC, MVT::f128, Custom); | ||||
463 | setOperationAction(ISD::SELECT, MVT::f128, Custom); | ||||
464 | setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); | ||||
465 | setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); | ||||
466 | // FIXME: f128 FMINIMUM and FMAXIMUM (including STRICT versions) currently | ||||
467 | // aren't handled. | ||||
468 | |||||
469 | // Lowering for many of the conversions is actually specified by the non-f128 | ||||
470 | // type. The LowerXXX function will be trivial when f128 isn't involved. | ||||
471 | setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); | ||||
472 | setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); | ||||
473 | setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom); | ||||
474 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); | ||||
475 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); | ||||
476 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, Custom); | ||||
477 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); | ||||
478 | setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); | ||||
479 | setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom); | ||||
480 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); | ||||
481 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom); | ||||
482 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, Custom); | ||||
483 | setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); | ||||
484 | setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); | ||||
485 | setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom); | ||||
486 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom); | ||||
487 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom); | ||||
488 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, Custom); | ||||
489 | setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); | ||||
490 | setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); | ||||
491 | setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom); | ||||
492 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom); | ||||
493 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom); | ||||
494 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom); | ||||
495 | setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); | ||||
496 | setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); | ||||
497 | setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); | ||||
498 | setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom); | ||||
499 | setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom); | ||||
500 | setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom); | ||||
501 | |||||
502 | setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i32, Custom); | ||||
503 | setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom); | ||||
504 | setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i32, Custom); | ||||
505 | setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom); | ||||
506 | |||||
507 | // Variable arguments. | ||||
508 | setOperationAction(ISD::VASTART, MVT::Other, Custom); | ||||
509 | setOperationAction(ISD::VAARG, MVT::Other, Custom); | ||||
510 | setOperationAction(ISD::VACOPY, MVT::Other, Custom); | ||||
511 | setOperationAction(ISD::VAEND, MVT::Other, Expand); | ||||
512 | |||||
513 | // Variable-sized objects. | ||||
514 | setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); | ||||
515 | setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); | ||||
516 | |||||
517 | if (Subtarget->isTargetWindows()) | ||||
518 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); | ||||
519 | else | ||||
520 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); | ||||
521 | |||||
522 | // Constant pool entries | ||||
523 | setOperationAction(ISD::ConstantPool, MVT::i64, Custom); | ||||
524 | |||||
525 | // BlockAddress | ||||
526 | setOperationAction(ISD::BlockAddress, MVT::i64, Custom); | ||||
527 | |||||
528 | // AArch64 lacks both left-rotate and popcount instructions. | ||||
529 | setOperationAction(ISD::ROTL, MVT::i32, Expand); | ||||
530 | setOperationAction(ISD::ROTL, MVT::i64, Expand); | ||||
531 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { | ||||
532 | setOperationAction(ISD::ROTL, VT, Expand); | ||||
533 | setOperationAction(ISD::ROTR, VT, Expand); | ||||
534 | } | ||||
535 | |||||
536 | // AArch64 doesn't have i32 MULH{S|U}. | ||||
537 | setOperationAction(ISD::MULHU, MVT::i32, Expand); | ||||
538 | setOperationAction(ISD::MULHS, MVT::i32, Expand); | ||||
539 | |||||
540 | // AArch64 doesn't have {U|S}MUL_LOHI. | ||||
541 | setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); | ||||
542 | setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); | ||||
543 | |||||
544 | if (Subtarget->hasCSSC()) { | ||||
545 | setOperationAction(ISD::CTPOP, MVT::i32, Legal); | ||||
546 | setOperationAction(ISD::CTPOP, MVT::i64, Legal); | ||||
547 | setOperationAction(ISD::CTPOP, MVT::i128, Expand); | ||||
548 | |||||
549 | setOperationAction(ISD::PARITY, MVT::i128, Expand); | ||||
550 | |||||
551 | setOperationAction(ISD::CTTZ, MVT::i32, Legal); | ||||
552 | setOperationAction(ISD::CTTZ, MVT::i64, Legal); | ||||
553 | setOperationAction(ISD::CTTZ, MVT::i128, Expand); | ||||
554 | |||||
555 | setOperationAction(ISD::ABS, MVT::i32, Legal); | ||||
556 | setOperationAction(ISD::ABS, MVT::i64, Legal); | ||||
557 | |||||
558 | setOperationAction(ISD::SMAX, MVT::i32, Legal); | ||||
559 | setOperationAction(ISD::SMAX, MVT::i64, Legal); | ||||
560 | setOperationAction(ISD::UMAX, MVT::i32, Legal); | ||||
561 | setOperationAction(ISD::UMAX, MVT::i64, Legal); | ||||
562 | |||||
563 | setOperationAction(ISD::SMIN, MVT::i32, Legal); | ||||
564 | setOperationAction(ISD::SMIN, MVT::i64, Legal); | ||||
565 | setOperationAction(ISD::UMIN, MVT::i32, Legal); | ||||
566 | setOperationAction(ISD::UMIN, MVT::i64, Legal); | ||||
567 | } else { | ||||
568 | setOperationAction(ISD::CTPOP, MVT::i32, Custom); | ||||
569 | setOperationAction(ISD::CTPOP, MVT::i64, Custom); | ||||
570 | setOperationAction(ISD::CTPOP, MVT::i128, Custom); | ||||
571 | |||||
572 | setOperationAction(ISD::PARITY, MVT::i64, Custom); | ||||
573 | setOperationAction(ISD::PARITY, MVT::i128, Custom); | ||||
574 | |||||
575 | setOperationAction(ISD::ABS, MVT::i32, Custom); | ||||
576 | setOperationAction(ISD::ABS, MVT::i64, Custom); | ||||
577 | } | ||||
578 | |||||
579 | setOperationAction(ISD::SDIVREM, MVT::i32, Expand); | ||||
580 | setOperationAction(ISD::SDIVREM, MVT::i64, Expand); | ||||
581 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { | ||||
582 | setOperationAction(ISD::SDIVREM, VT, Expand); | ||||
583 | setOperationAction(ISD::UDIVREM, VT, Expand); | ||||
584 | } | ||||
585 | setOperationAction(ISD::SREM, MVT::i32, Expand); | ||||
586 | setOperationAction(ISD::SREM, MVT::i64, Expand); | ||||
587 | setOperationAction(ISD::UDIVREM, MVT::i32, Expand); | ||||
588 | setOperationAction(ISD::UDIVREM, MVT::i64, Expand); | ||||
589 | setOperationAction(ISD::UREM, MVT::i32, Expand); | ||||
590 | setOperationAction(ISD::UREM, MVT::i64, Expand); | ||||
591 | |||||
592 | // Custom lower Add/Sub/Mul with overflow. | ||||
593 | setOperationAction(ISD::SADDO, MVT::i32, Custom); | ||||
594 | setOperationAction(ISD::SADDO, MVT::i64, Custom); | ||||
595 | setOperationAction(ISD::UADDO, MVT::i32, Custom); | ||||
596 | setOperationAction(ISD::UADDO, MVT::i64, Custom); | ||||
597 | setOperationAction(ISD::SSUBO, MVT::i32, Custom); | ||||
598 | setOperationAction(ISD::SSUBO, MVT::i64, Custom); | ||||
599 | setOperationAction(ISD::USUBO, MVT::i32, Custom); | ||||
600 | setOperationAction(ISD::USUBO, MVT::i64, Custom); | ||||
601 | setOperationAction(ISD::SMULO, MVT::i32, Custom); | ||||
602 | setOperationAction(ISD::SMULO, MVT::i64, Custom); | ||||
603 | setOperationAction(ISD::UMULO, MVT::i32, Custom); | ||||
604 | setOperationAction(ISD::UMULO, MVT::i64, Custom); | ||||
605 | |||||
606 | setOperationAction(ISD::ADDCARRY, MVT::i32, Custom); | ||||
607 | setOperationAction(ISD::ADDCARRY, MVT::i64, Custom); | ||||
608 | setOperationAction(ISD::SUBCARRY, MVT::i32, Custom); | ||||
609 | setOperationAction(ISD::SUBCARRY, MVT::i64, Custom); | ||||
610 | setOperationAction(ISD::SADDO_CARRY, MVT::i32, Custom); | ||||
611 | setOperationAction(ISD::SADDO_CARRY, MVT::i64, Custom); | ||||
612 | setOperationAction(ISD::SSUBO_CARRY, MVT::i32, Custom); | ||||
613 | setOperationAction(ISD::SSUBO_CARRY, MVT::i64, Custom); | ||||
614 | |||||
615 | setOperationAction(ISD::FSIN, MVT::f32, Expand); | ||||
616 | setOperationAction(ISD::FSIN, MVT::f64, Expand); | ||||
617 | setOperationAction(ISD::FCOS, MVT::f32, Expand); | ||||
618 | setOperationAction(ISD::FCOS, MVT::f64, Expand); | ||||
619 | setOperationAction(ISD::FPOW, MVT::f32, Expand); | ||||
620 | setOperationAction(ISD::FPOW, MVT::f64, Expand); | ||||
621 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); | ||||
622 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); | ||||
623 | if (Subtarget->hasFullFP16()) | ||||
624 | setOperationAction(ISD::FCOPYSIGN, MVT::f16, Custom); | ||||
625 | else | ||||
626 | setOperationAction(ISD::FCOPYSIGN, MVT::f16, Promote); | ||||
627 | |||||
628 | for (auto Op : {ISD::FREM, ISD::FPOW, ISD::FPOWI, | ||||
629 | ISD::FCOS, ISD::FSIN, ISD::FSINCOS, | ||||
630 | ISD::FEXP, ISD::FEXP2, ISD::FLOG, | ||||
631 | ISD::FLOG2, ISD::FLOG10, ISD::STRICT_FREM, | ||||
632 | ISD::STRICT_FPOW, ISD::STRICT_FPOWI, ISD::STRICT_FCOS, | ||||
633 | ISD::STRICT_FSIN, ISD::STRICT_FEXP, ISD::STRICT_FEXP2, | ||||
634 | ISD::STRICT_FLOG, ISD::STRICT_FLOG2, ISD::STRICT_FLOG10}) { | ||||
635 | setOperationAction(Op, MVT::f16, Promote); | ||||
636 | setOperationAction(Op, MVT::v4f16, Expand); | ||||
637 | setOperationAction(Op, MVT::v8f16, Expand); | ||||
638 | } | ||||
639 | |||||
640 | if (!Subtarget->hasFullFP16()) { | ||||
641 | for (auto Op : | ||||
642 | {ISD::SETCC, ISD::SELECT_CC, | ||||
643 | ISD::BR_CC, ISD::FADD, ISD::FSUB, | ||||
644 | ISD::FMUL, ISD::FDIV, ISD::FMA, | ||||
645 | ISD::FNEG, ISD::FABS, ISD::FCEIL, | ||||
646 | ISD::FSQRT, ISD::FFLOOR, ISD::FNEARBYINT, | ||||
647 | ISD::FRINT, ISD::FROUND, ISD::FROUNDEVEN, | ||||
648 | ISD::FTRUNC, ISD::FMINNUM, ISD::FMAXNUM, | ||||
649 | ISD::FMINIMUM, ISD::FMAXIMUM, ISD::STRICT_FADD, | ||||
650 | ISD::STRICT_FSUB, ISD::STRICT_FMUL, ISD::STRICT_FDIV, | ||||
651 | ISD::STRICT_FMA, ISD::STRICT_FCEIL, ISD::STRICT_FFLOOR, | ||||
652 | ISD::STRICT_FSQRT, ISD::STRICT_FRINT, ISD::STRICT_FNEARBYINT, | ||||
653 | ISD::STRICT_FROUND, ISD::STRICT_FTRUNC, ISD::STRICT_FROUNDEVEN, | ||||
654 | ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM, ISD::STRICT_FMINIMUM, | ||||
655 | ISD::STRICT_FMAXIMUM}) | ||||
656 | setOperationAction(Op, MVT::f16, Promote); | ||||
657 | |||||
658 | // Round-to-integer need custom lowering for fp16, as Promote doesn't work | ||||
659 | // because the result type is integer. | ||||
660 | for (auto Op : {ISD::STRICT_LROUND, ISD::STRICT_LLROUND, ISD::STRICT_LRINT, | ||||
661 | ISD::STRICT_LLRINT}) | ||||
662 | setOperationAction(Op, MVT::f16, Custom); | ||||
663 | |||||
664 | // promote v4f16 to v4f32 when that is known to be safe. | ||||
665 | setOperationPromotedToType(ISD::FADD, MVT::v4f16, MVT::v4f32); | ||||
666 | setOperationPromotedToType(ISD::FSUB, MVT::v4f16, MVT::v4f32); | ||||
667 | setOperationPromotedToType(ISD::FMUL, MVT::v4f16, MVT::v4f32); | ||||
668 | setOperationPromotedToType(ISD::FDIV, MVT::v4f16, MVT::v4f32); | ||||
669 | |||||
670 | setOperationAction(ISD::FABS, MVT::v4f16, Expand); | ||||
671 | setOperationAction(ISD::FNEG, MVT::v4f16, Expand); | ||||
672 | setOperationAction(ISD::FROUND, MVT::v4f16, Expand); | ||||
673 | setOperationAction(ISD::FROUNDEVEN, MVT::v4f16, Expand); | ||||
674 | setOperationAction(ISD::FMA, MVT::v4f16, Expand); | ||||
675 | setOperationAction(ISD::SETCC, MVT::v4f16, Expand); | ||||
676 | setOperationAction(ISD::BR_CC, MVT::v4f16, Expand); | ||||
677 | setOperationAction(ISD::SELECT, MVT::v4f16, Expand); | ||||
678 | setOperationAction(ISD::SELECT_CC, MVT::v4f16, Expand); | ||||
679 | setOperationAction(ISD::FTRUNC, MVT::v4f16, Expand); | ||||
680 | setOperationAction(ISD::FCOPYSIGN, MVT::v4f16, Expand); | ||||
681 | setOperationAction(ISD::FFLOOR, MVT::v4f16, Expand); | ||||
682 | setOperationAction(ISD::FCEIL, MVT::v4f16, Expand); | ||||
683 | setOperationAction(ISD::FRINT, MVT::v4f16, Expand); | ||||
684 | setOperationAction(ISD::FNEARBYINT, MVT::v4f16, Expand); | ||||
685 | setOperationAction(ISD::FSQRT, MVT::v4f16, Expand); | ||||
686 | |||||
687 | setOperationAction(ISD::FABS, MVT::v8f16, Expand); | ||||
688 | setOperationAction(ISD::FADD, MVT::v8f16, Expand); | ||||
689 | setOperationAction(ISD::FCEIL, MVT::v8f16, Expand); | ||||
690 | setOperationAction(ISD::FCOPYSIGN, MVT::v8f16, Expand); | ||||
691 | setOperationAction(ISD::FDIV, MVT::v8f16, Expand); | ||||
692 | setOperationAction(ISD::FFLOOR, MVT::v8f16, Expand); | ||||
693 | setOperationAction(ISD::FMA, MVT::v8f16, Expand); | ||||
694 | setOperationAction(ISD::FMUL, MVT::v8f16, Expand); | ||||
695 | setOperationAction(ISD::FNEARBYINT, MVT::v8f16, Expand); | ||||
696 | setOperationAction(ISD::FNEG, MVT::v8f16, Expand); | ||||
697 | setOperationAction(ISD::FROUND, MVT::v8f16, Expand); | ||||
698 | setOperationAction(ISD::FROUNDEVEN, MVT::v8f16, Expand); | ||||
699 | setOperationAction(ISD::FRINT, MVT::v8f16, Expand); | ||||
700 | setOperationAction(ISD::FSQRT, MVT::v8f16, Expand); | ||||
701 | setOperationAction(ISD::FSUB, MVT::v8f16, Expand); | ||||
702 | setOperationAction(ISD::FTRUNC, MVT::v8f16, Expand); | ||||
703 | setOperationAction(ISD::SETCC, MVT::v8f16, Expand); | ||||
704 | setOperationAction(ISD::BR_CC, MVT::v8f16, Expand); | ||||
705 | setOperationAction(ISD::SELECT, MVT::v8f16, Expand); | ||||
706 | setOperationAction(ISD::SELECT_CC, MVT::v8f16, Expand); | ||||
707 | setOperationAction(ISD::FP_EXTEND, MVT::v8f16, Expand); | ||||
708 | } | ||||
709 | |||||
710 | // AArch64 has implementations of a lot of rounding-like FP operations. | ||||
711 | for (auto Op : | ||||
712 | {ISD::FFLOOR, ISD::FNEARBYINT, ISD::FCEIL, | ||||
713 | ISD::FRINT, ISD::FTRUNC, ISD::FROUND, | ||||
714 | ISD::FROUNDEVEN, ISD::FMINNUM, ISD::FMAXNUM, | ||||
715 | ISD::FMINIMUM, ISD::FMAXIMUM, ISD::LROUND, | ||||
716 | ISD::LLROUND, ISD::LRINT, ISD::LLRINT, | ||||
717 | ISD::STRICT_FFLOOR, ISD::STRICT_FCEIL, ISD::STRICT_FNEARBYINT, | ||||
718 | ISD::STRICT_FRINT, ISD::STRICT_FTRUNC, ISD::STRICT_FROUNDEVEN, | ||||
719 | ISD::STRICT_FROUND, ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM, | ||||
720 | ISD::STRICT_FMINIMUM, ISD::STRICT_FMAXIMUM, ISD::STRICT_LROUND, | ||||
721 | ISD::STRICT_LLROUND, ISD::STRICT_LRINT, ISD::STRICT_LLRINT}) { | ||||
722 | for (MVT Ty : {MVT::f32, MVT::f64}) | ||||
723 | setOperationAction(Op, Ty, Legal); | ||||
724 | if (Subtarget->hasFullFP16()) | ||||
725 | setOperationAction(Op, MVT::f16, Legal); | ||||
726 | } | ||||
727 | |||||
728 | // Basic strict FP operations are legal | ||||
729 | for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, | ||||
730 | ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT}) { | ||||
731 | for (MVT Ty : {MVT::f32, MVT::f64}) | ||||
732 | setOperationAction(Op, Ty, Legal); | ||||
733 | if (Subtarget->hasFullFP16()) | ||||
734 | setOperationAction(Op, MVT::f16, Legal); | ||||
735 | } | ||||
736 | |||||
737 | // Strict conversion to a larger type is legal | ||||
738 | for (auto VT : {MVT::f32, MVT::f64}) | ||||
739 | setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal); | ||||
740 | |||||
741 | setOperationAction(ISD::PREFETCH, MVT::Other, Custom); | ||||
742 | |||||
743 | setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); | ||||
744 | setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom); | ||||
745 | |||||
746 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom); | ||||
747 | setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); | ||||
748 | setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); | ||||
749 | setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); | ||||
750 | setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); | ||||
751 | |||||
752 | // Generate outline atomics library calls only if LSE was not specified for | ||||
753 | // subtarget | ||||
754 | if (Subtarget->outlineAtomics() && !Subtarget->hasLSE()) { | ||||
755 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, LibCall); | ||||
756 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, LibCall); | ||||
757 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, LibCall); | ||||
758 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, LibCall); | ||||
759 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, LibCall); | ||||
760 | setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, LibCall); | ||||
761 | setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, LibCall); | ||||
762 | setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, LibCall); | ||||
763 | setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, LibCall); | ||||
764 | setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, LibCall); | ||||
765 | setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, LibCall); | ||||
766 | setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, LibCall); | ||||
767 | setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, LibCall); | ||||
768 | setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, LibCall); | ||||
769 | setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, LibCall); | ||||
770 | setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, LibCall); | ||||
771 | setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, LibCall); | ||||
772 | setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i8, LibCall); | ||||
773 | setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i16, LibCall); | ||||
774 | setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i32, LibCall); | ||||
775 | setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i64, LibCall); | ||||
776 | setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, LibCall); | ||||
777 | setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, LibCall); | ||||
778 | setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, LibCall); | ||||
779 | setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, LibCall); | ||||
780 | #define LCALLNAMES(A, B, N) \ | ||||
781 | setLibcallName(A##N##_RELAX, #B #N "_relax"); \ | ||||
782 | setLibcallName(A##N##_ACQ, #B #N "_acq"); \ | ||||
783 | setLibcallName(A##N##_REL, #B #N "_rel"); \ | ||||
784 | setLibcallName(A##N##_ACQ_REL, #B #N "_acq_rel"); | ||||
785 | #define LCALLNAME4(A, B) \ | ||||
786 | LCALLNAMES(A, B, 1) \ | ||||
787 | LCALLNAMES(A, B, 2) LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8) | ||||
788 | #define LCALLNAME5(A, B) \ | ||||
789 | LCALLNAMES(A, B, 1) \ | ||||
790 | LCALLNAMES(A, B, 2) \ | ||||
791 | LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8) LCALLNAMES(A, B, 16) | ||||
792 | LCALLNAME5(RTLIB::OUTLINE_ATOMIC_CAS, __aarch64_cas) | ||||
793 | LCALLNAME4(RTLIB::OUTLINE_ATOMIC_SWP, __aarch64_swp) | ||||
794 | LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDADD, __aarch64_ldadd) | ||||
795 | LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDSET, __aarch64_ldset) | ||||
796 | LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDCLR, __aarch64_ldclr) | ||||
797 | LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDEOR, __aarch64_ldeor) | ||||
798 | #undef LCALLNAMES | ||||
799 | #undef LCALLNAME4 | ||||
800 | #undef LCALLNAME5 | ||||
801 | } | ||||
802 | |||||
803 | // 128-bit loads and stores can be done without expanding | ||||
804 | setOperationAction(ISD::LOAD, MVT::i128, Custom); | ||||
805 | setOperationAction(ISD::STORE, MVT::i128, Custom); | ||||
806 | |||||
807 | // Aligned 128-bit loads and stores are single-copy atomic according to the | ||||
808 | // v8.4a spec. | ||||
809 | if (Subtarget->hasLSE2()) { | ||||
810 | setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom); | ||||
811 | setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom); | ||||
812 | } | ||||
813 | |||||
814 | // 256 bit non-temporal stores can be lowered to STNP. Do this as part of the | ||||
815 | // custom lowering, as there are no un-paired non-temporal stores and | ||||
816 | // legalization will break up 256 bit inputs. | ||||
817 | setOperationAction(ISD::STORE, MVT::v32i8, Custom); | ||||
818 | setOperationAction(ISD::STORE, MVT::v16i16, Custom); | ||||
819 | setOperationAction(ISD::STORE, MVT::v16f16, Custom); | ||||
820 | setOperationAction(ISD::STORE, MVT::v8i32, Custom); | ||||
821 | setOperationAction(ISD::STORE, MVT::v8f32, Custom); | ||||
822 | setOperationAction(ISD::STORE, MVT::v4f64, Custom); | ||||
823 | setOperationAction(ISD::STORE, MVT::v4i64, Custom); | ||||
824 | |||||
825 | // 256 bit non-temporal loads can be lowered to LDNP. This is done using | ||||
826 | // custom lowering, as there are no un-paired non-temporal loads legalization | ||||
827 | // will break up 256 bit inputs. | ||||
828 | setOperationAction(ISD::LOAD, MVT::v32i8, Custom); | ||||
829 | setOperationAction(ISD::LOAD, MVT::v16i16, Custom); | ||||
830 | setOperationAction(ISD::LOAD, MVT::v16f16, Custom); | ||||
831 | setOperationAction(ISD::LOAD, MVT::v8i32, Custom); | ||||
832 | setOperationAction(ISD::LOAD, MVT::v8f32, Custom); | ||||
833 | setOperationAction(ISD::LOAD, MVT::v4f64, Custom); | ||||
834 | setOperationAction(ISD::LOAD, MVT::v4i64, Custom); | ||||
835 | |||||
836 | // Lower READCYCLECOUNTER using an mrs from PMCCNTR_EL0. | ||||
837 | // This requires the Performance Monitors extension. | ||||
838 | if (Subtarget->hasPerfMon()) | ||||
839 | setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); | ||||
840 | |||||
841 | if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr && | ||||
842 | getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) { | ||||
843 | // Issue __sincos_stret if available. | ||||
844 | setOperationAction(ISD::FSINCOS, MVT::f64, Custom); | ||||
845 | setOperationAction(ISD::FSINCOS, MVT::f32, Custom); | ||||
846 | } else { | ||||
847 | setOperationAction(ISD::FSINCOS, MVT::f64, Expand); | ||||
848 | setOperationAction(ISD::FSINCOS, MVT::f32, Expand); | ||||
849 | } | ||||
850 | |||||
851 | if (Subtarget->getTargetTriple().isOSMSVCRT()) { | ||||
852 | // MSVCRT doesn't have powi; fall back to pow | ||||
853 | setLibcallName(RTLIB::POWI_F32, nullptr); | ||||
854 | setLibcallName(RTLIB::POWI_F64, nullptr); | ||||
855 | } | ||||
856 | |||||
857 | // Make floating-point constants legal for the large code model, so they don't | ||||
858 | // become loads from the constant pool. | ||||
859 | if (Subtarget->isTargetMachO() && TM.getCodeModel() == CodeModel::Large) { | ||||
860 | setOperationAction(ISD::ConstantFP, MVT::f32, Legal); | ||||
861 | setOperationAction(ISD::ConstantFP, MVT::f64, Legal); | ||||
862 | } | ||||
863 | |||||
864 | // AArch64 does not have floating-point extending loads, i1 sign-extending | ||||
865 | // load, floating-point truncating stores, or v2i32->v2i16 truncating store. | ||||
866 | for (MVT VT : MVT::fp_valuetypes()) { | ||||
867 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); | ||||
868 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); | ||||
869 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand); | ||||
870 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); | ||||
871 | } | ||||
872 | for (MVT VT : MVT::integer_valuetypes()) | ||||
873 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Expand); | ||||
874 | |||||
875 | setTruncStoreAction(MVT::f32, MVT::f16, Expand); | ||||
876 | setTruncStoreAction(MVT::f64, MVT::f32, Expand); | ||||
877 | setTruncStoreAction(MVT::f64, MVT::f16, Expand); | ||||
878 | setTruncStoreAction(MVT::f128, MVT::f80, Expand); | ||||
879 | setTruncStoreAction(MVT::f128, MVT::f64, Expand); | ||||
880 | setTruncStoreAction(MVT::f128, MVT::f32, Expand); | ||||
881 | setTruncStoreAction(MVT::f128, MVT::f16, Expand); | ||||
882 | |||||
883 | setOperationAction(ISD::BITCAST, MVT::i16, Custom); | ||||
884 | setOperationAction(ISD::BITCAST, MVT::f16, Custom); | ||||
885 | setOperationAction(ISD::BITCAST, MVT::bf16, Custom); | ||||
886 | |||||
887 | // Indexed loads and stores are supported. | ||||
888 | for (unsigned im = (unsigned)ISD::PRE_INC; | ||||
889 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { | ||||
890 | setIndexedLoadAction(im, MVT::i8, Legal); | ||||
891 | setIndexedLoadAction(im, MVT::i16, Legal); | ||||
892 | setIndexedLoadAction(im, MVT::i32, Legal); | ||||
893 | setIndexedLoadAction(im, MVT::i64, Legal); | ||||
894 | setIndexedLoadAction(im, MVT::f64, Legal); | ||||
895 | setIndexedLoadAction(im, MVT::f32, Legal); | ||||
896 | setIndexedLoadAction(im, MVT::f16, Legal); | ||||
897 | setIndexedLoadAction(im, MVT::bf16, Legal); | ||||
898 | setIndexedStoreAction(im, MVT::i8, Legal); | ||||
899 | setIndexedStoreAction(im, MVT::i16, Legal); | ||||
900 | setIndexedStoreAction(im, MVT::i32, Legal); | ||||
901 | setIndexedStoreAction(im, MVT::i64, Legal); | ||||
902 | setIndexedStoreAction(im, MVT::f64, Legal); | ||||
903 | setIndexedStoreAction(im, MVT::f32, Legal); | ||||
904 | setIndexedStoreAction(im, MVT::f16, Legal); | ||||
905 | setIndexedStoreAction(im, MVT::bf16, Legal); | ||||
906 | } | ||||
907 | |||||
908 | // Trap. | ||||
909 | setOperationAction(ISD::TRAP, MVT::Other, Legal); | ||||
910 | setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); | ||||
911 | setOperationAction(ISD::UBSANTRAP, MVT::Other, Legal); | ||||
912 | |||||
913 | // We combine OR nodes for bitfield operations. | ||||
914 | setTargetDAGCombine(ISD::OR); | ||||
915 | // Try to create BICs for vector ANDs. | ||||
916 | setTargetDAGCombine(ISD::AND); | ||||
917 | |||||
918 | // Vector add and sub nodes may conceal a high-half opportunity. | ||||
919 | // Also, try to fold ADD into CSINC/CSINV.. | ||||
920 | setTargetDAGCombine({ISD::ADD, ISD::ABS, ISD::SUB, ISD::XOR, ISD::SINT_TO_FP, | ||||
921 | ISD::UINT_TO_FP}); | ||||
922 | |||||
923 | setTargetDAGCombine({ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FP_TO_SINT_SAT, | ||||
924 | ISD::FP_TO_UINT_SAT, ISD::FDIV}); | ||||
925 | |||||
926 | // Try and combine setcc with csel | ||||
927 | setTargetDAGCombine(ISD::SETCC); | ||||
928 | |||||
929 | setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); | ||||
930 | |||||
931 | setTargetDAGCombine({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND, | ||||
932 | ISD::VECTOR_SPLICE, ISD::SIGN_EXTEND_INREG, | ||||
933 | ISD::CONCAT_VECTORS, ISD::EXTRACT_SUBVECTOR, | ||||
934 | ISD::INSERT_SUBVECTOR, ISD::STORE, ISD::BUILD_VECTOR}); | ||||
935 | setTargetDAGCombine(ISD::LOAD); | ||||
936 | |||||
937 | setTargetDAGCombine(ISD::MSTORE); | ||||
938 | |||||
939 | setTargetDAGCombine(ISD::MUL); | ||||
940 | |||||
941 | setTargetDAGCombine({ISD::SELECT, ISD::VSELECT}); | ||||
942 | |||||
943 | setTargetDAGCombine({ISD::INTRINSIC_VOID, ISD::INTRINSIC_W_CHAIN, | ||||
944 | ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, | ||||
945 | ISD::VECREDUCE_ADD, ISD::STEP_VECTOR}); | ||||
946 | |||||
947 | setTargetDAGCombine({ISD::MGATHER, ISD::MSCATTER}); | ||||
948 | |||||
949 | setTargetDAGCombine(ISD::FP_EXTEND); | ||||
950 | |||||
951 | setTargetDAGCombine(ISD::GlobalAddress); | ||||
952 | |||||
953 | setTargetDAGCombine(ISD::CTLZ); | ||||
954 | |||||
955 | // In case of strict alignment, avoid an excessive number of byte wide stores. | ||||
956 | MaxStoresPerMemsetOptSize = 8; | ||||
957 | MaxStoresPerMemset = | ||||
958 | Subtarget->requiresStrictAlign() ? MaxStoresPerMemsetOptSize : 32; | ||||
959 | |||||
960 | MaxGluedStoresPerMemcpy = 4; | ||||
961 | MaxStoresPerMemcpyOptSize = 4; | ||||
962 | MaxStoresPerMemcpy = | ||||
963 | Subtarget->requiresStrictAlign() ? MaxStoresPerMemcpyOptSize : 16; | ||||
964 | |||||
965 | MaxStoresPerMemmoveOptSize = 4; | ||||
966 | MaxStoresPerMemmove = 4; | ||||
967 | |||||
968 | MaxLoadsPerMemcmpOptSize = 4; | ||||
969 | MaxLoadsPerMemcmp = | ||||
970 | Subtarget->requiresStrictAlign() ? MaxLoadsPerMemcmpOptSize : 8; | ||||
971 | |||||
972 | setStackPointerRegisterToSaveRestore(AArch64::SP); | ||||
973 | |||||
974 | setSchedulingPreference(Sched::Hybrid); | ||||
975 | |||||
976 | EnableExtLdPromotion = true; | ||||
977 | |||||
978 | // Set required alignment. | ||||
979 | setMinFunctionAlignment(Align(4)); | ||||
980 | // Set preferred alignments. | ||||
981 | setPrefLoopAlignment(Align(1ULL << STI.getPrefLoopLogAlignment())); | ||||
982 | setMaxBytesForAlignment(STI.getMaxBytesForLoopAlignment()); | ||||
983 | setPrefFunctionAlignment(Align(1ULL << STI.getPrefFunctionLogAlignment())); | ||||
984 | |||||
985 | // Only change the limit for entries in a jump table if specified by | ||||
986 | // the sub target, but not at the command line. | ||||
987 | unsigned MaxJT = STI.getMaximumJumpTableSize(); | ||||
988 | if (MaxJT && getMaximumJumpTableSize() == UINT_MAX(2147483647 *2U +1U)) | ||||
989 | setMaximumJumpTableSize(MaxJT); | ||||
990 | |||||
991 | setHasExtractBitsInsn(true); | ||||
992 | |||||
993 | setMaxDivRemBitWidthSupported(128); | ||||
994 | |||||
995 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); | ||||
996 | |||||
997 | if (Subtarget->hasNEON()) { | ||||
998 | // FIXME: v1f64 shouldn't be legal if we can avoid it, because it leads to | ||||
999 | // silliness like this: | ||||
1000 | for (auto Op : | ||||
1001 | {ISD::SELECT, ISD::SELECT_CC, ISD::SETCC, | ||||
1002 | ISD::BR_CC, ISD::FADD, ISD::FSUB, | ||||
1003 | ISD::FMUL, ISD::FDIV, ISD::FMA, | ||||
1004 | ISD::FNEG, ISD::FABS, ISD::FCEIL, | ||||
1005 | ISD::FSQRT, ISD::FFLOOR, ISD::FNEARBYINT, | ||||
1006 | ISD::FRINT, ISD::FROUND, ISD::FROUNDEVEN, | ||||
1007 | ISD::FTRUNC, ISD::FMINNUM, ISD::FMAXNUM, | ||||
1008 | ISD::FMINIMUM, ISD::FMAXIMUM, ISD::STRICT_FADD, | ||||
1009 | ISD::STRICT_FSUB, ISD::STRICT_FMUL, ISD::STRICT_FDIV, | ||||
1010 | ISD::STRICT_FMA, ISD::STRICT_FCEIL, ISD::STRICT_FFLOOR, | ||||
1011 | ISD::STRICT_FSQRT, ISD::STRICT_FRINT, ISD::STRICT_FNEARBYINT, | ||||
1012 | ISD::STRICT_FROUND, ISD::STRICT_FTRUNC, ISD::STRICT_FROUNDEVEN, | ||||
1013 | ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM, ISD::STRICT_FMINIMUM, | ||||
1014 | ISD::STRICT_FMAXIMUM}) | ||||
1015 | setOperationAction(Op, MVT::v1f64, Expand); | ||||
1016 | |||||
1017 | for (auto Op : | ||||
1018 | {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, ISD::UINT_TO_FP, | ||||
1019 | ISD::FP_ROUND, ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT, ISD::MUL, | ||||
1020 | ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT, | ||||
1021 | ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP, ISD::STRICT_FP_ROUND}) | ||||
1022 | setOperationAction(Op, MVT::v1i64, Expand); | ||||
1023 | |||||
1024 | // AArch64 doesn't have a direct vector ->f32 conversion instructions for | ||||
1025 | // elements smaller than i32, so promote the input to i32 first. | ||||
1026 | setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i8, MVT::v4i32); | ||||
1027 | setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i8, MVT::v4i32); | ||||
1028 | |||||
1029 | // Similarly, there is no direct i32 -> f64 vector conversion instruction. | ||||
1030 | // Or, direct i32 -> f16 vector conversion. Set it so custom, so the | ||||
1031 | // conversion happens in two steps: v4i32 -> v4f32 -> v4f16 | ||||
1032 | for (auto Op : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP, | ||||
1033 | ISD::STRICT_UINT_TO_FP}) | ||||
1034 | for (auto VT : {MVT::v2i32, MVT::v2i64, MVT::v4i32}) | ||||
1035 | setOperationAction(Op, VT, Custom); | ||||
1036 | |||||
1037 | if (Subtarget->hasFullFP16()) { | ||||
1038 | setOperationAction(ISD::ConstantFP, MVT::f16, Legal); | ||||
1039 | |||||
1040 | setOperationAction(ISD::SINT_TO_FP, MVT::v8i8, Custom); | ||||
1041 | setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom); | ||||
1042 | setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Custom); | ||||
1043 | setOperationAction(ISD::UINT_TO_FP, MVT::v16i8, Custom); | ||||
1044 | setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); | ||||
1045 | setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); | ||||
1046 | setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom); | ||||
1047 | setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom); | ||||
1048 | } else { | ||||
1049 | // when AArch64 doesn't have fullfp16 support, promote the input | ||||
1050 | // to i32 first. | ||||
1051 | setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i8, MVT::v8i32); | ||||
1052 | setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i8, MVT::v8i32); | ||||
1053 | setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v16i8, MVT::v16i32); | ||||
1054 | setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v16i8, MVT::v16i32); | ||||
1055 | setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i16, MVT::v4i32); | ||||
1056 | setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i16, MVT::v4i32); | ||||
1057 | setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i16, MVT::v8i32); | ||||
1058 | setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i16, MVT::v8i32); | ||||
1059 | } | ||||
1060 | |||||
1061 | setOperationAction(ISD::CTLZ, MVT::v1i64, Expand); | ||||
1062 | setOperationAction(ISD::CTLZ, MVT::v2i64, Expand); | ||||
1063 | setOperationAction(ISD::BITREVERSE, MVT::v8i8, Legal); | ||||
1064 | setOperationAction(ISD::BITREVERSE, MVT::v16i8, Legal); | ||||
1065 | setOperationAction(ISD::BITREVERSE, MVT::v2i32, Custom); | ||||
1066 | setOperationAction(ISD::BITREVERSE, MVT::v4i32, Custom); | ||||
1067 | setOperationAction(ISD::BITREVERSE, MVT::v1i64, Custom); | ||||
1068 | setOperationAction(ISD::BITREVERSE, MVT::v2i64, Custom); | ||||
1069 | for (auto VT : {MVT::v1i64, MVT::v2i64}) { | ||||
1070 | setOperationAction(ISD::UMAX, VT, Custom); | ||||
1071 | setOperationAction(ISD::SMAX, VT, Custom); | ||||
1072 | setOperationAction(ISD::UMIN, VT, Custom); | ||||
1073 | setOperationAction(ISD::SMIN, VT, Custom); | ||||
1074 | } | ||||
1075 | |||||
1076 | // AArch64 doesn't have MUL.2d: | ||||
1077 | setOperationAction(ISD::MUL, MVT::v2i64, Expand); | ||||
1078 | // Custom handling for some quad-vector types to detect MULL. | ||||
1079 | setOperationAction(ISD::MUL, MVT::v8i16, Custom); | ||||
1080 | setOperationAction(ISD::MUL, MVT::v4i32, Custom); | ||||
1081 | setOperationAction(ISD::MUL, MVT::v2i64, Custom); | ||||
1082 | |||||
1083 | // Saturates | ||||
1084 | for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32, | ||||
1085 | MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { | ||||
1086 | setOperationAction(ISD::SADDSAT, VT, Legal); | ||||
1087 | setOperationAction(ISD::UADDSAT, VT, Legal); | ||||
1088 | setOperationAction(ISD::SSUBSAT, VT, Legal); | ||||
1089 | setOperationAction(ISD::USUBSAT, VT, Legal); | ||||
1090 | } | ||||
1091 | |||||
1092 | for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16, | ||||
1093 | MVT::v4i32}) { | ||||
1094 | setOperationAction(ISD::AVGFLOORS, VT, Legal); | ||||
1095 | setOperationAction(ISD::AVGFLOORU, VT, Legal); | ||||
1096 | setOperationAction(ISD::AVGCEILS, VT, Legal); | ||||
1097 | setOperationAction(ISD::AVGCEILU, VT, Legal); | ||||
1098 | setOperationAction(ISD::ABDS, VT, Legal); | ||||
1099 | setOperationAction(ISD::ABDU, VT, Legal); | ||||
1100 | } | ||||
1101 | |||||
1102 | // Vector reductions | ||||
1103 | for (MVT VT : { MVT::v4f16, MVT::v2f32, | ||||
1104 | MVT::v8f16, MVT::v4f32, MVT::v2f64 }) { | ||||
1105 | if (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16()) { | ||||
1106 | setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); | ||||
1107 | setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); | ||||
1108 | |||||
1109 | setOperationAction(ISD::VECREDUCE_FADD, VT, Legal); | ||||
1110 | } | ||||
1111 | } | ||||
1112 | for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32, | ||||
1113 | MVT::v16i8, MVT::v8i16, MVT::v4i32 }) { | ||||
1114 | setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); | ||||
1115 | setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); | ||||
1116 | setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); | ||||
1117 | setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); | ||||
1118 | setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); | ||||
1119 | } | ||||
1120 | setOperationAction(ISD::VECREDUCE_ADD, MVT::v2i64, Custom); | ||||
1121 | |||||
1122 | setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Legal); | ||||
1123 | setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); | ||||
1124 | // Likewise, narrowing and extending vector loads/stores aren't handled | ||||
1125 | // directly. | ||||
1126 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { | ||||
1127 | setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); | ||||
1128 | |||||
1129 | if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32) { | ||||
1130 | setOperationAction(ISD::MULHS, VT, Legal); | ||||
1131 | setOperationAction(ISD::MULHU, VT, Legal); | ||||
1132 | } else { | ||||
1133 | setOperationAction(ISD::MULHS, VT, Expand); | ||||
1134 | setOperationAction(ISD::MULHU, VT, Expand); | ||||
1135 | } | ||||
1136 | setOperationAction(ISD::SMUL_LOHI, VT, Expand); | ||||
1137 | setOperationAction(ISD::UMUL_LOHI, VT, Expand); | ||||
1138 | |||||
1139 | setOperationAction(ISD::BSWAP, VT, Expand); | ||||
1140 | setOperationAction(ISD::CTTZ, VT, Expand); | ||||
1141 | |||||
1142 | for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { | ||||
1143 | setTruncStoreAction(VT, InnerVT, Expand); | ||||
1144 | setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); | ||||
1145 | setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); | ||||
1146 | setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); | ||||
1147 | } | ||||
1148 | } | ||||
1149 | |||||
1150 | // AArch64 has implementations of a lot of rounding-like FP operations. | ||||
1151 | for (auto Op : | ||||
1152 | {ISD::FFLOOR, ISD::FNEARBYINT, ISD::FCEIL, ISD::FRINT, ISD::FTRUNC, | ||||
1153 | ISD::FROUND, ISD::FROUNDEVEN, ISD::STRICT_FFLOOR, | ||||
1154 | ISD::STRICT_FNEARBYINT, ISD::STRICT_FCEIL, ISD::STRICT_FRINT, | ||||
1155 | ISD::STRICT_FTRUNC, ISD::STRICT_FROUND, ISD::STRICT_FROUNDEVEN}) { | ||||
1156 | for (MVT Ty : {MVT::v2f32, MVT::v4f32, MVT::v2f64}) | ||||
1157 | setOperationAction(Op, Ty, Legal); | ||||
1158 | if (Subtarget->hasFullFP16()) | ||||
1159 | for (MVT Ty : {MVT::v4f16, MVT::v8f16}) | ||||
1160 | setOperationAction(Op, Ty, Legal); | ||||
1161 | } | ||||
1162 | |||||
1163 | setTruncStoreAction(MVT::v4i16, MVT::v4i8, Custom); | ||||
1164 | |||||
1165 | setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, MVT::v4i8, Custom); | ||||
1166 | setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, MVT::v4i8, Custom); | ||||
1167 | setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, MVT::v4i8, Custom); | ||||
1168 | setLoadExtAction(ISD::EXTLOAD, MVT::v4i32, MVT::v4i8, Custom); | ||||
1169 | setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Custom); | ||||
1170 | setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Custom); | ||||
1171 | |||||
1172 | // ADDP custom lowering | ||||
1173 | for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) | ||||
1174 | setOperationAction(ISD::ADD, VT, Custom); | ||||
1175 | // FADDP custom lowering | ||||
1176 | for (MVT VT : { MVT::v16f16, MVT::v8f32, MVT::v4f64 }) | ||||
1177 | setOperationAction(ISD::FADD, VT, Custom); | ||||
1178 | } | ||||
1179 | |||||
1180 | if (Subtarget->hasSME()) { | ||||
1181 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); | ||||
1182 | } | ||||
1183 | |||||
1184 | // FIXME: Move lowering for more nodes here if those are common between | ||||
1185 | // SVE and SME. | ||||
1186 | if (Subtarget->hasSVEorSME()) { | ||||
1187 | for (auto VT : | ||||
1188 | {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1, MVT::nxv1i1}) { | ||||
1189 | setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); | ||||
1190 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | ||||
1191 | } | ||||
1192 | } | ||||
1193 | |||||
1194 | if (Subtarget->hasSME()) | ||||
1195 | setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); | ||||
1196 | |||||
1197 | if (Subtarget->hasSVE()) { | ||||
1198 | for (auto VT : {MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32, MVT::nxv2i64}) { | ||||
1199 | setOperationAction(ISD::BITREVERSE, VT, Custom); | ||||
1200 | setOperationAction(ISD::BSWAP, VT, Custom); | ||||
1201 | setOperationAction(ISD::CTLZ, VT, Custom); | ||||
1202 | setOperationAction(ISD::CTPOP, VT, Custom); | ||||
1203 | setOperationAction(ISD::CTTZ, VT, Custom); | ||||
1204 | setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); | ||||
1205 | setOperationAction(ISD::UINT_TO_FP, VT, Custom); | ||||
1206 | setOperationAction(ISD::SINT_TO_FP, VT, Custom); | ||||
1207 | setOperationAction(ISD::FP_TO_UINT, VT, Custom); | ||||
1208 | setOperationAction(ISD::FP_TO_SINT, VT, Custom); | ||||
1209 | setOperationAction(ISD::MGATHER, VT, Custom); | ||||
1210 | setOperationAction(ISD::MSCATTER, VT, Custom); | ||||
1211 | setOperationAction(ISD::MLOAD, VT, Custom); | ||||
1212 | setOperationAction(ISD::MUL, VT, Custom); | ||||
1213 | setOperationAction(ISD::MULHS, VT, Custom); | ||||
1214 | setOperationAction(ISD::MULHU, VT, Custom); | ||||
1215 | setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); | ||||
1216 | setOperationAction(ISD::VECTOR_SPLICE, VT, Custom); | ||||
1217 | setOperationAction(ISD::SELECT, VT, Custom); | ||||
1218 | setOperationAction(ISD::SETCC, VT, Custom); | ||||
1219 | setOperationAction(ISD::SDIV, VT, Custom); | ||||
1220 | setOperationAction(ISD::UDIV, VT, Custom); | ||||
1221 | setOperationAction(ISD::SMIN, VT, Custom); | ||||
1222 | setOperationAction(ISD::UMIN, VT, Custom); | ||||
1223 | setOperationAction(ISD::SMAX, VT, Custom); | ||||
1224 | setOperationAction(ISD::UMAX, VT, Custom); | ||||
1225 | setOperationAction(ISD::SHL, VT, Custom); | ||||
1226 | setOperationAction(ISD::SRL, VT, Custom); | ||||
1227 | setOperationAction(ISD::SRA, VT, Custom); | ||||
1228 | setOperationAction(ISD::ABS, VT, Custom); | ||||
1229 | setOperationAction(ISD::ABDS, VT, Custom); | ||||
1230 | setOperationAction(ISD::ABDU, VT, Custom); | ||||
1231 | setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); | ||||
1232 | setOperationAction(ISD::VECREDUCE_AND, VT, Custom); | ||||
1233 | setOperationAction(ISD::VECREDUCE_OR, VT, Custom); | ||||
1234 | setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); | ||||
1235 | setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); | ||||
1236 | setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); | ||||
1237 | setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); | ||||
1238 | setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); | ||||
1239 | |||||
1240 | setOperationAction(ISD::UMUL_LOHI, VT, Expand); | ||||
1241 | setOperationAction(ISD::SMUL_LOHI, VT, Expand); | ||||
1242 | setOperationAction(ISD::SELECT_CC, VT, Expand); | ||||
1243 | setOperationAction(ISD::ROTL, VT, Expand); | ||||
1244 | setOperationAction(ISD::ROTR, VT, Expand); | ||||
1245 | |||||
1246 | setOperationAction(ISD::SADDSAT, VT, Legal); | ||||
1247 | setOperationAction(ISD::UADDSAT, VT, Legal); | ||||
1248 | setOperationAction(ISD::SSUBSAT, VT, Legal); | ||||
1249 | setOperationAction(ISD::USUBSAT, VT, Legal); | ||||
1250 | setOperationAction(ISD::UREM, VT, Expand); | ||||
1251 | setOperationAction(ISD::SREM, VT, Expand); | ||||
1252 | setOperationAction(ISD::SDIVREM, VT, Expand); | ||||
1253 | setOperationAction(ISD::UDIVREM, VT, Expand); | ||||
1254 | } | ||||
1255 | |||||
1256 | // Illegal unpacked integer vector types. | ||||
1257 | for (auto VT : {MVT::nxv8i8, MVT::nxv4i16, MVT::nxv2i32}) { | ||||
1258 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); | ||||
1259 | setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); | ||||
1260 | } | ||||
1261 | |||||
1262 | // Legalize unpacked bitcasts to REINTERPRET_CAST. | ||||
1263 | for (auto VT : {MVT::nxv2i16, MVT::nxv4i16, MVT::nxv2i32, MVT::nxv2bf16, | ||||
1264 | MVT::nxv4bf16, MVT::nxv2f16, MVT::nxv4f16, MVT::nxv2f32}) | ||||
1265 | setOperationAction(ISD::BITCAST, VT, Custom); | ||||
1266 | |||||
1267 | for (auto VT : | ||||
1268 | { MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64, MVT::nxv4i8, | ||||
1269 | MVT::nxv4i16, MVT::nxv4i32, MVT::nxv8i8, MVT::nxv8i16 }) | ||||
1270 | setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Legal); | ||||
1271 | |||||
1272 | for (auto VT : | ||||
1273 | {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1, MVT::nxv1i1}) { | ||||
1274 | setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); | ||||
1275 | setOperationAction(ISD::SELECT, VT, Custom); | ||||
1276 | setOperationAction(ISD::SETCC, VT, Custom); | ||||
1277 | setOperationAction(ISD::TRUNCATE, VT, Custom); | ||||
1278 | setOperationAction(ISD::VECREDUCE_AND, VT, Custom); | ||||
1279 | setOperationAction(ISD::VECREDUCE_OR, VT, Custom); | ||||
1280 | setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); | ||||
1281 | |||||
1282 | setOperationAction(ISD::SELECT_CC, VT, Expand); | ||||
1283 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | ||||
1284 | setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); | ||||
1285 | |||||
1286 | // There are no legal MVT::nxv16f## based types. | ||||
1287 | if (VT != MVT::nxv16i1) { | ||||
1288 | setOperationAction(ISD::SINT_TO_FP, VT, Custom); | ||||
1289 | setOperationAction(ISD::UINT_TO_FP, VT, Custom); | ||||
1290 | } | ||||
1291 | } | ||||
1292 | |||||
1293 | // NEON doesn't support masked loads/stores/gathers/scatters, but SVE does | ||||
1294 | for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v1f64, | ||||
1295 | MVT::v2f64, MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, | ||||
1296 | MVT::v2i32, MVT::v4i32, MVT::v1i64, MVT::v2i64}) { | ||||
1297 | setOperationAction(ISD::MLOAD, VT, Custom); | ||||
1298 | setOperationAction(ISD::MSTORE, VT, Custom); | ||||
1299 | setOperationAction(ISD::MGATHER, VT, Custom); | ||||
1300 | setOperationAction(ISD::MSCATTER, VT, Custom); | ||||
1301 | } | ||||
1302 | |||||
1303 | // Firstly, exclude all scalable vector extending loads/truncating stores, | ||||
1304 | // include both integer and floating scalable vector. | ||||
1305 | for (MVT VT : MVT::scalable_vector_valuetypes()) { | ||||
1306 | for (MVT InnerVT : MVT::scalable_vector_valuetypes()) { | ||||
1307 | setTruncStoreAction(VT, InnerVT, Expand); | ||||
1308 | setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); | ||||
1309 | setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); | ||||
1310 | setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); | ||||
1311 | } | ||||
1312 | } | ||||
1313 | |||||
1314 | // Then, selectively enable those which we directly support. | ||||
1315 | setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i8, Legal); | ||||
1316 | setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i16, Legal); | ||||
1317 | setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i32, Legal); | ||||
1318 | setTruncStoreAction(MVT::nxv4i32, MVT::nxv4i8, Legal); | ||||
1319 | setTruncStoreAction(MVT::nxv4i32, MVT::nxv4i16, Legal); | ||||
1320 | setTruncStoreAction(MVT::nxv8i16, MVT::nxv8i8, Legal); | ||||
1321 | for (auto Op : {ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}) { | ||||
1322 | setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i8, Legal); | ||||
1323 | setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i16, Legal); | ||||
1324 | setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i32, Legal); | ||||
1325 | setLoadExtAction(Op, MVT::nxv4i32, MVT::nxv4i8, Legal); | ||||
1326 | setLoadExtAction(Op, MVT::nxv4i32, MVT::nxv4i16, Legal); | ||||
1327 | setLoadExtAction(Op, MVT::nxv8i16, MVT::nxv8i8, Legal); | ||||
1328 | } | ||||
1329 | |||||
1330 | // SVE supports truncating stores of 64 and 128-bit vectors | ||||
1331 | setTruncStoreAction(MVT::v2i64, MVT::v2i8, Custom); | ||||
1332 | setTruncStoreAction(MVT::v2i64, MVT::v2i16, Custom); | ||||
1333 | setTruncStoreAction(MVT::v2i64, MVT::v2i32, Custom); | ||||
1334 | setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom); | ||||
1335 | setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom); | ||||
1336 | |||||
1337 | for (auto VT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32, | ||||
1338 | MVT::nxv4f32, MVT::nxv2f64}) { | ||||
1339 | setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); | ||||
1340 | setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); | ||||
1341 | setOperationAction(ISD::MGATHER, VT, Custom); | ||||
1342 | setOperationAction(ISD::MSCATTER, VT, Custom); | ||||
1343 | setOperationAction(ISD::MLOAD, VT, Custom); | ||||
1344 | setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); | ||||
1345 | setOperationAction(ISD::SELECT, VT, Custom); | ||||
1346 | setOperationAction(ISD::FADD, VT, Custom); | ||||
1347 | setOperationAction(ISD::FCOPYSIGN, VT, Custom); | ||||
1348 | setOperationAction(ISD::FDIV, VT, Custom); | ||||
1349 | setOperationAction(ISD::FMA, VT, Custom); | ||||
1350 | setOperationAction(ISD::FMAXIMUM, VT, Custom); | ||||
1351 | setOperationAction(ISD::FMAXNUM, VT, Custom); | ||||
1352 | setOperationAction(ISD::FMINIMUM, VT, Custom); | ||||
1353 | setOperationAction(ISD::FMINNUM, VT, Custom); | ||||
1354 | setOperationAction(ISD::FMUL, VT, Custom); | ||||
1355 | setOperationAction(ISD::FNEG, VT, Custom); | ||||
1356 | setOperationAction(ISD::FSUB, VT, Custom); | ||||
1357 | setOperationAction(ISD::FCEIL, VT, Custom); | ||||
1358 | setOperationAction(ISD::FFLOOR, VT, Custom); | ||||
1359 | setOperationAction(ISD::FNEARBYINT, VT, Custom); | ||||
1360 | setOperationAction(ISD::FRINT, VT, Custom); | ||||
1361 | setOperationAction(ISD::FROUND, VT, Custom); | ||||
1362 | setOperationAction(ISD::FROUNDEVEN, VT, Custom); | ||||
1363 | setOperationAction(ISD::FTRUNC, VT, Custom); | ||||
1364 | setOperationAction(ISD::FSQRT, VT, Custom); | ||||
1365 | setOperationAction(ISD::FABS, VT, Custom); | ||||
1366 | setOperationAction(ISD::FP_EXTEND, VT, Custom); | ||||
1367 | setOperationAction(ISD::FP_ROUND, VT, Custom); | ||||
1368 | setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); | ||||
1369 | setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); | ||||
1370 | setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); | ||||
1371 | setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); | ||||
1372 | setOperationAction(ISD::VECTOR_SPLICE, VT, Custom); | ||||
1373 | |||||
1374 | setOperationAction(ISD::SELECT_CC, VT, Expand); | ||||
1375 | setOperationAction(ISD::FREM, VT, Expand); | ||||
1376 | setOperationAction(ISD::FPOW, VT, Expand); | ||||
1377 | setOperationAction(ISD::FPOWI, VT, Expand); | ||||
1378 | setOperationAction(ISD::FCOS, VT, Expand); | ||||
1379 | setOperationAction(ISD::FSIN, VT, Expand); | ||||
1380 | setOperationAction(ISD::FSINCOS, VT, Expand); | ||||
1381 | setOperationAction(ISD::FEXP, VT, Expand); | ||||
1382 | setOperationAction(ISD::FEXP2, VT, Expand); | ||||
1383 | setOperationAction(ISD::FLOG, VT, Expand); | ||||
1384 | setOperationAction(ISD::FLOG2, VT, Expand); | ||||
1385 | setOperationAction(ISD::FLOG10, VT, Expand); | ||||
1386 | |||||
1387 | setCondCodeAction(ISD::SETO, VT, Expand); | ||||
1388 | setCondCodeAction(ISD::SETOLT, VT, Expand); | ||||
1389 | setCondCodeAction(ISD::SETLT, VT, Expand); | ||||
1390 | setCondCodeAction(ISD::SETOLE, VT, Expand); | ||||
1391 | setCondCodeAction(ISD::SETLE, VT, Expand); | ||||
1392 | setCondCodeAction(ISD::SETULT, VT, Expand); | ||||
1393 | setCondCodeAction(ISD::SETULE, VT, Expand); | ||||
1394 | setCondCodeAction(ISD::SETUGE, VT, Expand); | ||||
1395 | setCondCodeAction(ISD::SETUGT, VT, Expand); | ||||
1396 | setCondCodeAction(ISD::SETUEQ, VT, Expand); | ||||
1397 | setCondCodeAction(ISD::SETONE, VT, Expand); | ||||
1398 | } | ||||
1399 | |||||
1400 | for (auto VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}) { | ||||
1401 | setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); | ||||
1402 | setOperationAction(ISD::MGATHER, VT, Custom); | ||||
1403 | setOperationAction(ISD::MSCATTER, VT, Custom); | ||||
1404 | setOperationAction(ISD::MLOAD, VT, Custom); | ||||
1405 | setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); | ||||
1406 | setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); | ||||
1407 | } | ||||
1408 | |||||
1409 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom); | ||||
1410 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); | ||||
1411 | |||||
1412 | // NEON doesn't support integer divides, but SVE does | ||||
1413 | for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32, | ||||
1414 | MVT::v4i32, MVT::v1i64, MVT::v2i64}) { | ||||
1415 | setOperationAction(ISD::SDIV, VT, Custom); | ||||
1416 | setOperationAction(ISD::UDIV, VT, Custom); | ||||
1417 | } | ||||
1418 | |||||
1419 | // NEON doesn't support 64-bit vector integer muls, but SVE does. | ||||
1420 | setOperationAction(ISD::MUL, MVT::v1i64, Custom); | ||||
1421 | setOperationAction(ISD::MUL, MVT::v2i64, Custom); | ||||
1422 | |||||
1423 | // NEON doesn't support across-vector reductions, but SVE does. | ||||
1424 | for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v2f64}) | ||||
1425 | setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); | ||||
1426 | |||||
1427 | if (Subtarget->forceStreamingCompatibleSVE()) { | ||||
1428 | setTruncStoreAction(MVT::v2f32, MVT::v2f16, Custom); | ||||
1429 | setTruncStoreAction(MVT::v4f32, MVT::v4f16, Custom); | ||||
1430 | setTruncStoreAction(MVT::v8f32, MVT::v8f16, Custom); | ||||
1431 | setTruncStoreAction(MVT::v1f64, MVT::v1f16, Custom); | ||||
1432 | setTruncStoreAction(MVT::v2f64, MVT::v2f16, Custom); | ||||
1433 | setTruncStoreAction(MVT::v4f64, MVT::v4f16, Custom); | ||||
1434 | setTruncStoreAction(MVT::v1f64, MVT::v1f32, Custom); | ||||
1435 | setTruncStoreAction(MVT::v2f64, MVT::v2f32, Custom); | ||||
1436 | setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); | ||||
1437 | for (MVT VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32, | ||||
1438 | MVT::v4i32, MVT::v1i64, MVT::v2i64}) | ||||
1439 | addTypeForStreamingSVE(VT); | ||||
1440 | |||||
1441 | for (MVT VT : | ||||
1442 | {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v2f64}) | ||||
1443 | addTypeForStreamingSVE(VT); | ||||
1444 | } | ||||
1445 | |||||
1446 | // NOTE: Currently this has to happen after computeRegisterProperties rather | ||||
1447 | // than the preferred option of combining it with the addRegisterClass call. | ||||
1448 | if (Subtarget->useSVEForFixedLengthVectors()) { | ||||
1449 | for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) | ||||
1450 | if (useSVEForFixedLengthVectorVT(VT)) | ||||
1451 | addTypeForFixedLengthSVE(VT); | ||||
1452 | for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) | ||||
1453 | if (useSVEForFixedLengthVectorVT(VT)) | ||||
1454 | addTypeForFixedLengthSVE(VT); | ||||
1455 | |||||
1456 | // 64bit results can mean a bigger than NEON input. | ||||
1457 | for (auto VT : {MVT::v8i8, MVT::v4i16}) | ||||
1458 | setOperationAction(ISD::TRUNCATE, VT, Custom); | ||||
1459 | setOperationAction(ISD::FP_ROUND, MVT::v4f16, Custom); | ||||
1460 | |||||
1461 | // 128bit results imply a bigger than NEON input. | ||||
1462 | for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) | ||||
1463 | setOperationAction(ISD::TRUNCATE, VT, Custom); | ||||
1464 | for (auto VT : {MVT::v8f16, MVT::v4f32}) | ||||
1465 | setOperationAction(ISD::FP_ROUND, VT, Custom); | ||||
1466 | |||||
1467 | // These operations are not supported on NEON but SVE can do them. | ||||
1468 | setOperationAction(ISD::BITREVERSE, MVT::v1i64, Custom); | ||||
1469 | setOperationAction(ISD::CTLZ, MVT::v1i64, Custom); | ||||
1470 | setOperationAction(ISD::CTLZ, MVT::v2i64, Custom); | ||||
1471 | setOperationAction(ISD::CTTZ, MVT::v1i64, Custom); | ||||
1472 | setOperationAction(ISD::MULHS, MVT::v1i64, Custom); | ||||
1473 | setOperationAction(ISD::MULHS, MVT::v2i64, Custom); | ||||
1474 | setOperationAction(ISD::MULHU, MVT::v1i64, Custom); | ||||
1475 | setOperationAction(ISD::MULHU, MVT::v2i64, Custom); | ||||
1476 | setOperationAction(ISD::SMAX, MVT::v1i64, Custom); | ||||
1477 | setOperationAction(ISD::SMAX, MVT::v2i64, Custom); | ||||
1478 | setOperationAction(ISD::SMIN, MVT::v1i64, Custom); | ||||
1479 | setOperationAction(ISD::SMIN, MVT::v2i64, Custom); | ||||
1480 | setOperationAction(ISD::UMAX, MVT::v1i64, Custom); | ||||
1481 | setOperationAction(ISD::UMAX, MVT::v2i64, Custom); | ||||
1482 | setOperationAction(ISD::UMIN, MVT::v1i64, Custom); | ||||
1483 | setOperationAction(ISD::UMIN, MVT::v2i64, Custom); | ||||
1484 | setOperationAction(ISD::VECREDUCE_SMAX, MVT::v2i64, Custom); | ||||
1485 | setOperationAction(ISD::VECREDUCE_SMIN, MVT::v2i64, Custom); | ||||
1486 | setOperationAction(ISD::VECREDUCE_UMAX, MVT::v2i64, Custom); | ||||
1487 | setOperationAction(ISD::VECREDUCE_UMIN, MVT::v2i64, Custom); | ||||
1488 | |||||
1489 | // Int operations with no NEON support. | ||||
1490 | for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, | ||||
1491 | MVT::v2i32, MVT::v4i32, MVT::v2i64}) { | ||||
1492 | setOperationAction(ISD::BITREVERSE, VT, Custom); | ||||
1493 | setOperationAction(ISD::CTTZ, VT, Custom); | ||||
1494 | setOperationAction(ISD::VECREDUCE_AND, VT, Custom); | ||||
1495 | setOperationAction(ISD::VECREDUCE_OR, VT, Custom); | ||||
1496 | setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); | ||||
1497 | } | ||||
1498 | |||||
1499 | |||||
1500 | // Use SVE for vectors with more than 2 elements. | ||||
1501 | for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v4f32}) | ||||
1502 | setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); | ||||
1503 | } | ||||
1504 | |||||
1505 | setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv2i1, MVT::nxv2i64); | ||||
1506 | setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv4i1, MVT::nxv4i32); | ||||
1507 | setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv8i1, MVT::nxv8i16); | ||||
1508 | setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv16i1, MVT::nxv16i8); | ||||
1509 | |||||
1510 | setOperationAction(ISD::VSCALE, MVT::i32, Custom); | ||||
1511 | } | ||||
1512 | |||||
1513 | if (Subtarget->hasMOPS() && Subtarget->hasMTE()) { | ||||
1514 | // Only required for llvm.aarch64.mops.memset.tag | ||||
1515 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); | ||||
1516 | } | ||||
1517 | |||||
1518 | PredictableSelectIsExpensive = Subtarget->predictableSelectIsExpensive(); | ||||
1519 | |||||
1520 | IsStrictFPEnabled = true; | ||||
1521 | } | ||||
1522 | |||||
1523 | void AArch64TargetLowering::addTypeForNEON(MVT VT) { | ||||
1524 | assert(VT.isVector() && "VT should be a vector type")(static_cast <bool> (VT.isVector() && "VT should be a vector type" ) ? void (0) : __assert_fail ("VT.isVector() && \"VT should be a vector type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 1524, __extension__ __PRETTY_FUNCTION__)); | ||||
1525 | |||||
1526 | if (VT.isFloatingPoint()) { | ||||
1527 | MVT PromoteTo = EVT(VT).changeVectorElementTypeToInteger().getSimpleVT(); | ||||
1528 | setOperationPromotedToType(ISD::LOAD, VT, PromoteTo); | ||||
1529 | setOperationPromotedToType(ISD::STORE, VT, PromoteTo); | ||||
1530 | } | ||||
1531 | |||||
1532 | // Mark vector float intrinsics as expand. | ||||
1533 | if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64) { | ||||
1534 | setOperationAction(ISD::FSIN, VT, Expand); | ||||
1535 | setOperationAction(ISD::FCOS, VT, Expand); | ||||
1536 | setOperationAction(ISD::FPOW, VT, Expand); | ||||
1537 | setOperationAction(ISD::FLOG, VT, Expand); | ||||
1538 | setOperationAction(ISD::FLOG2, VT, Expand); | ||||
1539 | setOperationAction(ISD::FLOG10, VT, Expand); | ||||
1540 | setOperationAction(ISD::FEXP, VT, Expand); | ||||
1541 | setOperationAction(ISD::FEXP2, VT, Expand); | ||||
1542 | } | ||||
1543 | |||||
1544 | // But we do support custom-lowering for FCOPYSIGN. | ||||
1545 | if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64 || | ||||
1546 | ((VT == MVT::v4f16 || VT == MVT::v8f16) && Subtarget->hasFullFP16())) | ||||
1547 | setOperationAction(ISD::FCOPYSIGN, VT, Custom); | ||||
1548 | |||||
1549 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | ||||
1550 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | ||||
1551 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | ||||
1552 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | ||||
1553 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); | ||||
1554 | setOperationAction(ISD::SRA, VT, Custom); | ||||
1555 | setOperationAction(ISD::SRL, VT, Custom); | ||||
1556 | setOperationAction(ISD::SHL, VT, Custom); | ||||
1557 | setOperationAction(ISD::OR, VT, Custom); | ||||
1558 | setOperationAction(ISD::SETCC, VT, Custom); | ||||
1559 | setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); | ||||
1560 | |||||
1561 | setOperationAction(ISD::SELECT, VT, Expand); | ||||
1562 | setOperationAction(ISD::SELECT_CC, VT, Expand); | ||||
1563 | setOperationAction(ISD::VSELECT, VT, Expand); | ||||
1564 | for (MVT InnerVT : MVT::all_valuetypes()) | ||||
1565 | setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand); | ||||
1566 | |||||
1567 | // CNT supports only B element sizes, then use UADDLP to widen. | ||||
1568 | if (VT != MVT::v8i8 && VT != MVT::v16i8) | ||||
1569 | setOperationAction(ISD::CTPOP, VT, Custom); | ||||
1570 | |||||
1571 | setOperationAction(ISD::UDIV, VT, Expand); | ||||
1572 | setOperationAction(ISD::SDIV, VT, Expand); | ||||
1573 | setOperationAction(ISD::UREM, VT, Expand); | ||||
1574 | setOperationAction(ISD::SREM, VT, Expand); | ||||
1575 | setOperationAction(ISD::FREM, VT, Expand); | ||||
1576 | |||||
1577 | for (unsigned Opcode : | ||||
1578 | {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FP_TO_SINT_SAT, | ||||
1579 | ISD::FP_TO_UINT_SAT, ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}) | ||||
1580 | setOperationAction(Opcode, VT, Custom); | ||||
1581 | |||||
1582 | if (!VT.isFloatingPoint()) | ||||
1583 | setOperationAction(ISD::ABS, VT, Legal); | ||||
1584 | |||||
1585 | // [SU][MIN|MAX] are available for all NEON types apart from i64. | ||||
1586 | if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64) | ||||
1587 | for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) | ||||
1588 | setOperationAction(Opcode, VT, Legal); | ||||
1589 | |||||
1590 | // F[MIN|MAX][NUM|NAN] and simple strict operations are available for all FP | ||||
1591 | // NEON types. | ||||
1592 | if (VT.isFloatingPoint() && | ||||
1593 | VT.getVectorElementType() != MVT::bf16 && | ||||
1594 | (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16())) | ||||
1595 | for (unsigned Opcode : | ||||
1596 | {ISD::FMINIMUM, ISD::FMAXIMUM, ISD::FMINNUM, ISD::FMAXNUM, | ||||
1597 | ISD::STRICT_FMINIMUM, ISD::STRICT_FMAXIMUM, ISD::STRICT_FMINNUM, | ||||
1598 | ISD::STRICT_FMAXNUM, ISD::STRICT_FADD, ISD::STRICT_FSUB, | ||||
1599 | ISD::STRICT_FMUL, ISD::STRICT_FDIV, ISD::STRICT_FMA, | ||||
1600 | ISD::STRICT_FSQRT}) | ||||
1601 | setOperationAction(Opcode, VT, Legal); | ||||
1602 | |||||
1603 | // Strict fp extend and trunc are legal | ||||
1604 | if (VT.isFloatingPoint() && VT.getScalarSizeInBits() != 16) | ||||
1605 | setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal); | ||||
1606 | if (VT.isFloatingPoint() && VT.getScalarSizeInBits() != 64) | ||||
1607 | setOperationAction(ISD::STRICT_FP_ROUND, VT, Legal); | ||||
1608 | |||||
1609 | // FIXME: We could potentially make use of the vector comparison instructions | ||||
1610 | // for STRICT_FSETCC and STRICT_FSETCSS, but there's a number of | ||||
1611 | // complications: | ||||
1612 | // * FCMPEQ/NE are quiet comparisons, the rest are signalling comparisons, | ||||
1613 | // so we would need to expand when the condition code doesn't match the | ||||
1614 | // kind of comparison. | ||||
1615 | // * Some kinds of comparison require more than one FCMXY instruction so | ||||
1616 | // would need to be expanded instead. | ||||
1617 | // * The lowering of the non-strict versions involves target-specific ISD | ||||
1618 | // nodes so we would likely need to add strict versions of all of them and | ||||
1619 | // handle them appropriately. | ||||
1620 | setOperationAction(ISD::STRICT_FSETCC, VT, Expand); | ||||
1621 | setOperationAction(ISD::STRICT_FSETCCS, VT, Expand); | ||||
1622 | |||||
1623 | if (Subtarget->isLittleEndian()) { | ||||
1624 | for (unsigned im = (unsigned)ISD::PRE_INC; | ||||
1625 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { | ||||
1626 | setIndexedLoadAction(im, VT, Legal); | ||||
1627 | setIndexedStoreAction(im, VT, Legal); | ||||
1628 | } | ||||
1629 | } | ||||
1630 | |||||
1631 | if (Subtarget->hasD128()) { | ||||
1632 | setOperationAction(ISD::READ_REGISTER, MVT::i128, Custom); | ||||
1633 | setOperationAction(ISD::WRITE_REGISTER, MVT::i128, Custom); | ||||
1634 | } | ||||
1635 | } | ||||
1636 | |||||
1637 | bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT, | ||||
1638 | EVT OpVT) const { | ||||
1639 | // Only SVE has a 1:1 mapping from intrinsic -> instruction (whilelo). | ||||
1640 | if (!Subtarget->hasSVE()) | ||||
1641 | return true; | ||||
1642 | |||||
1643 | // We can only support legal predicate result types. We can use the SVE | ||||
1644 | // whilelo instruction for generating fixed-width predicates too. | ||||
1645 | if (ResVT != MVT::nxv2i1 && ResVT != MVT::nxv4i1 && ResVT != MVT::nxv8i1 && | ||||
1646 | ResVT != MVT::nxv16i1 && ResVT != MVT::v2i1 && ResVT != MVT::v4i1 && | ||||
1647 | ResVT != MVT::v8i1 && ResVT != MVT::v16i1) | ||||
1648 | return true; | ||||
1649 | |||||
1650 | // The whilelo instruction only works with i32 or i64 scalar inputs. | ||||
1651 | if (OpVT != MVT::i32 && OpVT != MVT::i64) | ||||
1652 | return true; | ||||
1653 | |||||
1654 | return false; | ||||
1655 | } | ||||
1656 | |||||
1657 | void AArch64TargetLowering::addTypeForStreamingSVE(MVT VT) { | ||||
1658 | setOperationAction(ISD::ANY_EXTEND, VT, Custom); | ||||
1659 | setOperationAction(ISD::ZERO_EXTEND, VT, Custom); | ||||
1660 | setOperationAction(ISD::SIGN_EXTEND, VT, Custom); | ||||
1661 | setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); | ||||
1662 | setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); | ||||
1663 | setOperationAction(ISD::AND, VT, Custom); | ||||
1664 | setOperationAction(ISD::ADD, VT, Custom); | ||||
1665 | setOperationAction(ISD::SUB, VT, Custom); | ||||
1666 | setOperationAction(ISD::MUL, VT, Custom); | ||||
1667 | setOperationAction(ISD::MULHS, VT, Custom); | ||||
1668 | setOperationAction(ISD::MULHU, VT, Custom); | ||||
1669 | setOperationAction(ISD::ABS, VT, Custom); | ||||
1670 | setOperationAction(ISD::XOR, VT, Custom); | ||||
1671 | setOperationAction(ISD::TRUNCATE, VT, Custom); | ||||
1672 | setOperationAction(ISD::FMUL, VT, Custom); | ||||
1673 | setOperationAction(ISD::FADD, VT, Custom); | ||||
1674 | setOperationAction(ISD::FDIV, VT, Custom); | ||||
1675 | setOperationAction(ISD::FMA, VT, Custom); | ||||
1676 | setOperationAction(ISD::FNEG, VT, Custom); | ||||
1677 | setOperationAction(ISD::FSQRT, VT, Custom); | ||||
1678 | setOperationAction(ISD::FSUB, VT, Custom); | ||||
1679 | setOperationAction(ISD::FABS, VT, Custom); | ||||
1680 | setOperationAction(ISD::SMIN, VT, Custom); | ||||
1681 | setOperationAction(ISD::SMAX, VT, Custom); | ||||
1682 | setOperationAction(ISD::UMIN, VT, Custom); | ||||
1683 | setOperationAction(ISD::UMAX, VT, Custom); | ||||
1684 | setOperationAction(ISD::FMAXNUM, VT, Custom); | ||||
1685 | setOperationAction(ISD::FMINNUM, VT, Custom); | ||||
1686 | setOperationAction(ISD::FMAXIMUM, VT, Custom); | ||||
1687 | setOperationAction(ISD::FMINIMUM, VT, Custom); | ||||
1688 | setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); | ||||
1689 | setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); | ||||
1690 | setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); | ||||
1691 | setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); | ||||
1692 | setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); | ||||
1693 | setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); | ||||
1694 | setOperationAction(ISD::FP_ROUND, VT, Custom); | ||||
1695 | setOperationAction(ISD::FCEIL, VT, Custom); | ||||
1696 | setOperationAction(ISD::FFLOOR, VT, Custom); | ||||
1697 | setOperationAction(ISD::FNEARBYINT, VT, Custom); | ||||
1698 | setOperationAction(ISD::FRINT, VT, Custom); | ||||
1699 | setOperationAction(ISD::FROUND, VT, Custom); | ||||
1700 | setOperationAction(ISD::FROUNDEVEN, VT, Custom); | ||||
1701 | setOperationAction(ISD::FTRUNC, VT, Custom); | ||||
1702 | setOperationAction(ISD::CTLZ, VT, Custom); | ||||
1703 | setOperationAction(ISD::CTPOP, VT, Custom); | ||||
1704 | if (VT.isFloatingPoint()) { | ||||
1705 | setCondCodeAction(ISD::SETO, VT, Expand); | ||||
1706 | setCondCodeAction(ISD::SETOLT, VT, Expand); | ||||
1707 | setCondCodeAction(ISD::SETOLE, VT, Expand); | ||||
1708 | setCondCodeAction(ISD::SETULT, VT, Expand); | ||||
1709 | setCondCodeAction(ISD::SETULE, VT, Expand); | ||||
1710 | setCondCodeAction(ISD::SETUGE, VT, Expand); | ||||
1711 | setCondCodeAction(ISD::SETUGT, VT, Expand); | ||||
1712 | setCondCodeAction(ISD::SETUEQ, VT, Expand); | ||||
1713 | setCondCodeAction(ISD::SETONE, VT, Expand); | ||||
1714 | } | ||||
1715 | } | ||||
1716 | |||||
1717 | void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) { | ||||
1718 | assert(VT.isFixedLengthVector() && "Expected fixed length vector type!")(static_cast <bool> (VT.isFixedLengthVector() && "Expected fixed length vector type!") ? void (0) : __assert_fail ("VT.isFixedLengthVector() && \"Expected fixed length vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 1718, __extension__ __PRETTY_FUNCTION__)); | ||||
1719 | |||||
1720 | // By default everything must be expanded. | ||||
1721 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) | ||||
1722 | setOperationAction(Op, VT, Expand); | ||||
1723 | |||||
1724 | // We use EXTRACT_SUBVECTOR to "cast" a scalable vector to a fixed length one. | ||||
1725 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); | ||||
1726 | |||||
1727 | if (VT.isFloatingPoint()) { | ||||
1728 | setCondCodeAction(ISD::SETO, VT, Expand); | ||||
1729 | setCondCodeAction(ISD::SETOLT, VT, Expand); | ||||
1730 | setCondCodeAction(ISD::SETOLE, VT, Expand); | ||||
1731 | setCondCodeAction(ISD::SETULT, VT, Expand); | ||||
1732 | setCondCodeAction(ISD::SETULE, VT, Expand); | ||||
1733 | setCondCodeAction(ISD::SETUGE, VT, Expand); | ||||
1734 | setCondCodeAction(ISD::SETUGT, VT, Expand); | ||||
1735 | setCondCodeAction(ISD::SETUEQ, VT, Expand); | ||||
1736 | setCondCodeAction(ISD::SETONE, VT, Expand); | ||||
1737 | } | ||||
1738 | |||||
1739 | // Mark integer truncating stores/extending loads as having custom lowering | ||||
1740 | if (VT.isInteger()) { | ||||
1741 | MVT InnerVT = VT.changeVectorElementType(MVT::i8); | ||||
1742 | while (InnerVT != VT) { | ||||
1743 | setTruncStoreAction(VT, InnerVT, Custom); | ||||
1744 | setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Custom); | ||||
1745 | setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Custom); | ||||
1746 | InnerVT = InnerVT.changeVectorElementType( | ||||
1747 | MVT::getIntegerVT(2 * InnerVT.getScalarSizeInBits())); | ||||
1748 | } | ||||
1749 | } | ||||
1750 | |||||
1751 | // Mark floating-point truncating stores/extending loads as having custom | ||||
1752 | // lowering | ||||
1753 | if (VT.isFloatingPoint()) { | ||||
1754 | MVT InnerVT = VT.changeVectorElementType(MVT::f16); | ||||
1755 | while (InnerVT != VT) { | ||||
1756 | setTruncStoreAction(VT, InnerVT, Custom); | ||||
1757 | setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Custom); | ||||
1758 | InnerVT = InnerVT.changeVectorElementType( | ||||
1759 | MVT::getFloatingPointVT(2 * InnerVT.getScalarSizeInBits())); | ||||
1760 | } | ||||
1761 | } | ||||
1762 | |||||
1763 | // Lower fixed length vector operations to scalable equivalents. | ||||
1764 | setOperationAction(ISD::ABS, VT, Custom); | ||||
1765 | setOperationAction(ISD::ADD, VT, Custom); | ||||
1766 | setOperationAction(ISD::AND, VT, Custom); | ||||
1767 | setOperationAction(ISD::ANY_EXTEND, VT, Custom); | ||||
1768 | setOperationAction(ISD::BITCAST, VT, Custom); | ||||
1769 | setOperationAction(ISD::BITREVERSE, VT, Custom); | ||||
1770 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | ||||
1771 | setOperationAction(ISD::BSWAP, VT, Custom); | ||||
1772 | setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); | ||||
1773 | setOperationAction(ISD::CTLZ, VT, Custom); | ||||
1774 | setOperationAction(ISD::CTPOP, VT, Custom); | ||||
1775 | setOperationAction(ISD::CTTZ, VT, Custom); | ||||
1776 | setOperationAction(ISD::FABS, VT, Custom); | ||||
1777 | setOperationAction(ISD::FADD, VT, Custom); | ||||
1778 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | ||||
1779 | setOperationAction(ISD::FCEIL, VT, Custom); | ||||
1780 | setOperationAction(ISD::FCOPYSIGN, VT, Custom); | ||||
1781 | setOperationAction(ISD::FDIV, VT, Custom); | ||||
1782 | setOperationAction(ISD::FFLOOR, VT, Custom); | ||||
1783 | setOperationAction(ISD::FMA, VT, Custom); | ||||
1784 | setOperationAction(ISD::FMAXIMUM, VT, Custom); | ||||
1785 | setOperationAction(ISD::FMAXNUM, VT, Custom); | ||||
1786 | setOperationAction(ISD::FMINIMUM, VT, Custom); | ||||
1787 | setOperationAction(ISD::FMINNUM, VT, Custom); | ||||
1788 | setOperationAction(ISD::FMUL, VT, Custom); | ||||
1789 | setOperationAction(ISD::FNEARBYINT, VT, Custom); | ||||
1790 | setOperationAction(ISD::FNEG, VT, Custom); | ||||
1791 | setOperationAction(ISD::FP_EXTEND, VT, Custom); | ||||
1792 | setOperationAction(ISD::FP_ROUND, VT, Custom); | ||||
1793 | setOperationAction(ISD::FP_TO_SINT, VT, Custom); | ||||
1794 | setOperationAction(ISD::FP_TO_UINT, VT, Custom); | ||||
1795 | setOperationAction(ISD::FRINT, VT, Custom); | ||||
1796 | setOperationAction(ISD::FROUND, VT, Custom); | ||||
1797 | setOperationAction(ISD::FROUNDEVEN, VT, Custom); | ||||
1798 | setOperationAction(ISD::FSQRT, VT, Custom); | ||||
1799 | setOperationAction(ISD::FSUB, VT, Custom); | ||||
1800 | setOperationAction(ISD::FTRUNC, VT, Custom); | ||||
1801 | setOperationAction(ISD::LOAD, VT, Custom); | ||||
1802 | setOperationAction(ISD::MGATHER, VT, Custom); | ||||
1803 | setOperationAction(ISD::MLOAD, VT, Custom); | ||||
1804 | setOperationAction(ISD::MSCATTER, VT, Custom); | ||||
1805 | setOperationAction(ISD::MSTORE, VT, Custom); | ||||
1806 | setOperationAction(ISD::MUL, VT, Custom); | ||||
1807 | setOperationAction(ISD::MULHS, VT, Custom); | ||||
1808 | setOperationAction(ISD::MULHU, VT, Custom); | ||||
1809 | setOperationAction(ISD::OR, VT, Custom); | ||||
1810 | setOperationAction(ISD::SDIV, VT, Custom); | ||||
1811 | setOperationAction(ISD::SELECT, VT, Custom); | ||||
1812 | setOperationAction(ISD::SETCC, VT, Custom); | ||||
1813 | setOperationAction(ISD::SHL, VT, Custom); | ||||
1814 | setOperationAction(ISD::SIGN_EXTEND, VT, Custom); | ||||
1815 | setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom); | ||||
1816 | setOperationAction(ISD::SINT_TO_FP, VT, Custom); | ||||
1817 | setOperationAction(ISD::SMAX, VT, Custom); | ||||
1818 | setOperationAction(ISD::SMIN, VT, Custom); | ||||
1819 | setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); | ||||
1820 | setOperationAction(ISD::VECTOR_SPLICE, VT, Custom); | ||||
1821 | setOperationAction(ISD::SRA, VT, Custom); | ||||
1822 | setOperationAction(ISD::SRL, VT, Custom); | ||||
1823 | setOperationAction(ISD::STORE, VT, Custom); | ||||
1824 | setOperationAction(ISD::SUB, VT, Custom); | ||||
1825 | setOperationAction(ISD::TRUNCATE, VT, Custom); | ||||
1826 | setOperationAction(ISD::UDIV, VT, Custom); | ||||
1827 | setOperationAction(ISD::UINT_TO_FP, VT, Custom); | ||||
1828 | setOperationAction(ISD::UMAX, VT, Custom); | ||||
1829 | setOperationAction(ISD::UMIN, VT, Custom); | ||||
1830 | setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); | ||||
1831 | setOperationAction(ISD::VECREDUCE_AND, VT, Custom); | ||||
1832 | setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); | ||||
1833 | setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); | ||||
1834 | setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); | ||||
1835 | setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); | ||||
1836 | setOperationAction(ISD::VECREDUCE_OR, VT, Custom); | ||||
1837 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | ||||
1838 | setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); | ||||
1839 | setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); | ||||
1840 | setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); | ||||
1841 | setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); | ||||
1842 | setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); | ||||
1843 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | ||||
1844 | setOperationAction(ISD::VSELECT, VT, Custom); | ||||
1845 | setOperationAction(ISD::XOR, VT, Custom); | ||||
1846 | setOperationAction(ISD::ZERO_EXTEND, VT, Custom); | ||||
1847 | } | ||||
1848 | |||||
1849 | void AArch64TargetLowering::addDRTypeForNEON(MVT VT) { | ||||
1850 | addRegisterClass(VT, &AArch64::FPR64RegClass); | ||||
1851 | addTypeForNEON(VT); | ||||
1852 | } | ||||
1853 | |||||
1854 | void AArch64TargetLowering::addQRTypeForNEON(MVT VT) { | ||||
1855 | addRegisterClass(VT, &AArch64::FPR128RegClass); | ||||
1856 | addTypeForNEON(VT); | ||||
1857 | } | ||||
1858 | |||||
1859 | EVT AArch64TargetLowering::getSetCCResultType(const DataLayout &, | ||||
1860 | LLVMContext &C, EVT VT) const { | ||||
1861 | if (!VT.isVector()) | ||||
1862 | return MVT::i32; | ||||
1863 | if (VT.isScalableVector()) | ||||
1864 | return EVT::getVectorVT(C, MVT::i1, VT.getVectorElementCount()); | ||||
1865 | return VT.changeVectorElementTypeToInteger(); | ||||
1866 | } | ||||
1867 | |||||
1868 | // isIntImmediate - This method tests to see if the node is a constant | ||||
1869 | // operand. If so Imm will receive the value. | ||||
1870 | static bool isIntImmediate(const SDNode *N, uint64_t &Imm) { | ||||
1871 | if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) { | ||||
1872 | Imm = C->getZExtValue(); | ||||
1873 | return true; | ||||
1874 | } | ||||
1875 | return false; | ||||
1876 | } | ||||
1877 | |||||
1878 | // isOpcWithIntImmediate - This method tests to see if the node is a specific | ||||
1879 | // opcode and that it has a immediate integer right operand. | ||||
1880 | // If so Imm will receive the value. | ||||
1881 | static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc, | ||||
1882 | uint64_t &Imm) { | ||||
1883 | return N->getOpcode() == Opc && | ||||
1884 | isIntImmediate(N->getOperand(1).getNode(), Imm); | ||||
1885 | } | ||||
1886 | |||||
1887 | static bool optimizeLogicalImm(SDValue Op, unsigned Size, uint64_t Imm, | ||||
1888 | const APInt &Demanded, | ||||
1889 | TargetLowering::TargetLoweringOpt &TLO, | ||||
1890 | unsigned NewOpc) { | ||||
1891 | uint64_t OldImm = Imm, NewImm, Enc; | ||||
1892 | uint64_t Mask = ((uint64_t)(-1LL) >> (64 - Size)), OrigMask = Mask; | ||||
1893 | |||||
1894 | // Return if the immediate is already all zeros, all ones, a bimm32 or a | ||||
1895 | // bimm64. | ||||
1896 | if (Imm == 0 || Imm == Mask || | ||||
1897 | AArch64_AM::isLogicalImmediate(Imm & Mask, Size)) | ||||
1898 | return false; | ||||
1899 | |||||
1900 | unsigned EltSize = Size; | ||||
1901 | uint64_t DemandedBits = Demanded.getZExtValue(); | ||||
1902 | |||||
1903 | // Clear bits that are not demanded. | ||||
1904 | Imm &= DemandedBits; | ||||
1905 | |||||
1906 | while (true) { | ||||
1907 | // The goal here is to set the non-demanded bits in a way that minimizes | ||||
1908 | // the number of switching between 0 and 1. In order to achieve this goal, | ||||
1909 | // we set the non-demanded bits to the value of the preceding demanded bits. | ||||
1910 | // For example, if we have an immediate 0bx10xx0x1 ('x' indicates a | ||||
1911 | // non-demanded bit), we copy bit0 (1) to the least significant 'x', | ||||
1912 | // bit2 (0) to 'xx', and bit6 (1) to the most significant 'x'. | ||||
1913 | // The final result is 0b11000011. | ||||
1914 | uint64_t NonDemandedBits = ~DemandedBits; | ||||
1915 | uint64_t InvertedImm = ~Imm & DemandedBits; | ||||
1916 | uint64_t RotatedImm = | ||||
1917 | ((InvertedImm << 1) | (InvertedImm >> (EltSize - 1) & 1)) & | ||||
1918 | NonDemandedBits; | ||||
1919 | uint64_t Sum = RotatedImm + NonDemandedBits; | ||||
1920 | bool Carry = NonDemandedBits & ~Sum & (1ULL << (EltSize - 1)); | ||||
1921 | uint64_t Ones = (Sum + Carry) & NonDemandedBits; | ||||
1922 | NewImm = (Imm | Ones) & Mask; | ||||
1923 | |||||
1924 | // If NewImm or its bitwise NOT is a shifted mask, it is a bitmask immediate | ||||
1925 | // or all-ones or all-zeros, in which case we can stop searching. Otherwise, | ||||
1926 | // we halve the element size and continue the search. | ||||
1927 | if (isShiftedMask_64(NewImm) || isShiftedMask_64(~(NewImm | ~Mask))) | ||||
1928 | break; | ||||
1929 | |||||
1930 | // We cannot shrink the element size any further if it is 2-bits. | ||||
1931 | if (EltSize == 2) | ||||
1932 | return false; | ||||
1933 | |||||
1934 | EltSize /= 2; | ||||
1935 | Mask >>= EltSize; | ||||
1936 | uint64_t Hi = Imm >> EltSize, DemandedBitsHi = DemandedBits >> EltSize; | ||||
1937 | |||||
1938 | // Return if there is mismatch in any of the demanded bits of Imm and Hi. | ||||
1939 | if (((Imm ^ Hi) & (DemandedBits & DemandedBitsHi) & Mask) != 0) | ||||
1940 | return false; | ||||
1941 | |||||
1942 | // Merge the upper and lower halves of Imm and DemandedBits. | ||||
1943 | Imm |= Hi; | ||||
1944 | DemandedBits |= DemandedBitsHi; | ||||
1945 | } | ||||
1946 | |||||
1947 | ++NumOptimizedImms; | ||||
1948 | |||||
1949 | // Replicate the element across the register width. | ||||
1950 | while (EltSize < Size) { | ||||
1951 | NewImm |= NewImm << EltSize; | ||||
1952 | EltSize *= 2; | ||||
1953 | } | ||||
1954 | |||||
1955 | (void)OldImm; | ||||
1956 | assert(((OldImm ^ NewImm) & Demanded.getZExtValue()) == 0 &&(static_cast <bool> (((OldImm ^ NewImm) & Demanded. getZExtValue()) == 0 && "demanded bits should never be altered" ) ? void (0) : __assert_fail ("((OldImm ^ NewImm) & Demanded.getZExtValue()) == 0 && \"demanded bits should never be altered\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 1957, __extension__ __PRETTY_FUNCTION__)) | ||||
1957 | "demanded bits should never be altered")(static_cast <bool> (((OldImm ^ NewImm) & Demanded. getZExtValue()) == 0 && "demanded bits should never be altered" ) ? void (0) : __assert_fail ("((OldImm ^ NewImm) & Demanded.getZExtValue()) == 0 && \"demanded bits should never be altered\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 1957, __extension__ __PRETTY_FUNCTION__)); | ||||
1958 | assert(OldImm != NewImm && "the new imm shouldn't be equal to the old imm")(static_cast <bool> (OldImm != NewImm && "the new imm shouldn't be equal to the old imm" ) ? void (0) : __assert_fail ("OldImm != NewImm && \"the new imm shouldn't be equal to the old imm\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 1958, __extension__ __PRETTY_FUNCTION__)); | ||||
1959 | |||||
1960 | // Create the new constant immediate node. | ||||
1961 | EVT VT = Op.getValueType(); | ||||
1962 | SDLoc DL(Op); | ||||
1963 | SDValue New; | ||||
1964 | |||||
1965 | // If the new constant immediate is all-zeros or all-ones, let the target | ||||
1966 | // independent DAG combine optimize this node. | ||||
1967 | if (NewImm == 0 || NewImm == OrigMask) { | ||||
1968 | New = TLO.DAG.getNode(Op.getOpcode(), DL, VT, Op.getOperand(0), | ||||
1969 | TLO.DAG.getConstant(NewImm, DL, VT)); | ||||
1970 | // Otherwise, create a machine node so that target independent DAG combine | ||||
1971 | // doesn't undo this optimization. | ||||
1972 | } else { | ||||
1973 | Enc = AArch64_AM::encodeLogicalImmediate(NewImm, Size); | ||||
1974 | SDValue EncConst = TLO.DAG.getTargetConstant(Enc, DL, VT); | ||||
1975 | New = SDValue( | ||||
1976 | TLO.DAG.getMachineNode(NewOpc, DL, VT, Op.getOperand(0), EncConst), 0); | ||||
1977 | } | ||||
1978 | |||||
1979 | return TLO.CombineTo(Op, New); | ||||
1980 | } | ||||
1981 | |||||
1982 | bool AArch64TargetLowering::targetShrinkDemandedConstant( | ||||
1983 | SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, | ||||
1984 | TargetLoweringOpt &TLO) const { | ||||
1985 | // Delay this optimization to as late as possible. | ||||
1986 | if (!TLO.LegalOps) | ||||
1987 | return false; | ||||
1988 | |||||
1989 | if (!EnableOptimizeLogicalImm) | ||||
1990 | return false; | ||||
1991 | |||||
1992 | EVT VT = Op.getValueType(); | ||||
1993 | if (VT.isVector()) | ||||
1994 | return false; | ||||
1995 | |||||
1996 | unsigned Size = VT.getSizeInBits(); | ||||
1997 | assert((Size == 32 || Size == 64) &&(static_cast <bool> ((Size == 32 || Size == 64) && "i32 or i64 is expected after legalization.") ? void (0) : __assert_fail ("(Size == 32 || Size == 64) && \"i32 or i64 is expected after legalization.\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 1998, __extension__ __PRETTY_FUNCTION__)) | ||||
1998 | "i32 or i64 is expected after legalization.")(static_cast <bool> ((Size == 32 || Size == 64) && "i32 or i64 is expected after legalization.") ? void (0) : __assert_fail ("(Size == 32 || Size == 64) && \"i32 or i64 is expected after legalization.\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 1998, __extension__ __PRETTY_FUNCTION__)); | ||||
1999 | |||||
2000 | // Exit early if we demand all bits. | ||||
2001 | if (DemandedBits.countPopulation() == Size) | ||||
2002 | return false; | ||||
2003 | |||||
2004 | unsigned NewOpc; | ||||
2005 | switch (Op.getOpcode()) { | ||||
2006 | default: | ||||
2007 | return false; | ||||
2008 | case ISD::AND: | ||||
2009 | NewOpc = Size == 32 ? AArch64::ANDWri : AArch64::ANDXri; | ||||
2010 | break; | ||||
2011 | case ISD::OR: | ||||
2012 | NewOpc = Size == 32 ? AArch64::ORRWri : AArch64::ORRXri; | ||||
2013 | break; | ||||
2014 | case ISD::XOR: | ||||
2015 | NewOpc = Size == 32 ? AArch64::EORWri : AArch64::EORXri; | ||||
2016 | break; | ||||
2017 | } | ||||
2018 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); | ||||
2019 | if (!C) | ||||
2020 | return false; | ||||
2021 | uint64_t Imm = C->getZExtValue(); | ||||
2022 | return optimizeLogicalImm(Op, Size, Imm, DemandedBits, TLO, NewOpc); | ||||
2023 | } | ||||
2024 | |||||
2025 | /// computeKnownBitsForTargetNode - Determine which of the bits specified in | ||||
2026 | /// Mask are known to be either zero or one and return them Known. | ||||
2027 | void AArch64TargetLowering::computeKnownBitsForTargetNode( | ||||
2028 | const SDValue Op, KnownBits &Known, const APInt &DemandedElts, | ||||
2029 | const SelectionDAG &DAG, unsigned Depth) const { | ||||
2030 | switch (Op.getOpcode()) { | ||||
2031 | default: | ||||
2032 | break; | ||||
2033 | case AArch64ISD::DUP: { | ||||
2034 | SDValue SrcOp = Op.getOperand(0); | ||||
2035 | Known = DAG.computeKnownBits(SrcOp, Depth + 1); | ||||
2036 | if (SrcOp.getValueSizeInBits() != Op.getScalarValueSizeInBits()) { | ||||
2037 | assert(SrcOp.getValueSizeInBits() > Op.getScalarValueSizeInBits() &&(static_cast <bool> (SrcOp.getValueSizeInBits() > Op .getScalarValueSizeInBits() && "Expected DUP implicit truncation" ) ? void (0) : __assert_fail ("SrcOp.getValueSizeInBits() > Op.getScalarValueSizeInBits() && \"Expected DUP implicit truncation\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 2038, __extension__ __PRETTY_FUNCTION__)) | ||||
2038 | "Expected DUP implicit truncation")(static_cast <bool> (SrcOp.getValueSizeInBits() > Op .getScalarValueSizeInBits() && "Expected DUP implicit truncation" ) ? void (0) : __assert_fail ("SrcOp.getValueSizeInBits() > Op.getScalarValueSizeInBits() && \"Expected DUP implicit truncation\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 2038, __extension__ __PRETTY_FUNCTION__)); | ||||
2039 | Known = Known.trunc(Op.getScalarValueSizeInBits()); | ||||
2040 | } | ||||
2041 | break; | ||||
2042 | } | ||||
2043 | case AArch64ISD::CSEL: { | ||||
2044 | KnownBits Known2; | ||||
2045 | Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); | ||||
2046 | Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1); | ||||
2047 | Known = KnownBits::commonBits(Known, Known2); | ||||
2048 | break; | ||||
2049 | } | ||||
2050 | case AArch64ISD::BICi: { | ||||
2051 | // Compute the bit cleared value. | ||||
2052 | uint64_t Mask = | ||||
2053 | ~(Op->getConstantOperandVal(1) << Op->getConstantOperandVal(2)); | ||||
2054 | Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); | ||||
2055 | Known &= KnownBits::makeConstant(APInt(Known.getBitWidth(), Mask)); | ||||
2056 | break; | ||||
2057 | } | ||||
2058 | case AArch64ISD::VLSHR: { | ||||
2059 | KnownBits Known2; | ||||
2060 | Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); | ||||
2061 | Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1); | ||||
2062 | Known = KnownBits::lshr(Known, Known2); | ||||
2063 | break; | ||||
2064 | } | ||||
2065 | case AArch64ISD::VASHR: { | ||||
2066 | KnownBits Known2; | ||||
2067 | Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); | ||||
2068 | Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1); | ||||
2069 | Known = KnownBits::ashr(Known, Known2); | ||||
2070 | break; | ||||
2071 | } | ||||
2072 | case AArch64ISD::MOVI: { | ||||
2073 | ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(0)); | ||||
2074 | Known = | ||||
2075 | KnownBits::makeConstant(APInt(Known.getBitWidth(), CN->getZExtValue())); | ||||
2076 | break; | ||||
2077 | } | ||||
2078 | case AArch64ISD::LOADgot: | ||||
2079 | case AArch64ISD::ADDlow: { | ||||
2080 | if (!Subtarget->isTargetILP32()) | ||||
2081 | break; | ||||
2082 | // In ILP32 mode all valid pointers are in the low 4GB of the address-space. | ||||
2083 | Known.Zero = APInt::getHighBitsSet(64, 32); | ||||
2084 | break; | ||||
2085 | } | ||||
2086 | case AArch64ISD::ASSERT_ZEXT_BOOL: { | ||||
2087 | Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); | ||||
2088 | Known.Zero |= APInt(Known.getBitWidth(), 0xFE); | ||||
2089 | break; | ||||
2090 | } | ||||
2091 | case ISD::INTRINSIC_W_CHAIN: { | ||||
2092 | ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1)); | ||||
2093 | Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue()); | ||||
2094 | switch (IntID) { | ||||
2095 | default: return; | ||||
2096 | case Intrinsic::aarch64_ldaxr: | ||||
2097 | case Intrinsic::aarch64_ldxr: { | ||||
2098 | unsigned BitWidth = Known.getBitWidth(); | ||||
2099 | EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT(); | ||||
2100 | unsigned MemBits = VT.getScalarSizeInBits(); | ||||
2101 | Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); | ||||
2102 | return; | ||||
2103 | } | ||||
2104 | } | ||||
2105 | break; | ||||
2106 | } | ||||
2107 | case ISD::INTRINSIC_WO_CHAIN: | ||||
2108 | case ISD::INTRINSIC_VOID: { | ||||
2109 | unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | ||||
2110 | switch (IntNo) { | ||||
2111 | default: | ||||
2112 | break; | ||||
2113 | case Intrinsic::aarch64_neon_umaxv: | ||||
2114 | case Intrinsic::aarch64_neon_uminv: { | ||||
2115 | // Figure out the datatype of the vector operand. The UMINV instruction | ||||
2116 | // will zero extend the result, so we can mark as known zero all the | ||||
2117 | // bits larger than the element datatype. 32-bit or larget doesn't need | ||||
2118 | // this as those are legal types and will be handled by isel directly. | ||||
2119 | MVT VT = Op.getOperand(1).getValueType().getSimpleVT(); | ||||
2120 | unsigned BitWidth = Known.getBitWidth(); | ||||
2121 | if (VT == MVT::v8i8 || VT == MVT::v16i8) { | ||||
2122 | assert(BitWidth >= 8 && "Unexpected width!")(static_cast <bool> (BitWidth >= 8 && "Unexpected width!" ) ? void (0) : __assert_fail ("BitWidth >= 8 && \"Unexpected width!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 2122, __extension__ __PRETTY_FUNCTION__)); | ||||
2123 | APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 8); | ||||
2124 | Known.Zero |= Mask; | ||||
2125 | } else if (VT == MVT::v4i16 || VT == MVT::v8i16) { | ||||
2126 | assert(BitWidth >= 16 && "Unexpected width!")(static_cast <bool> (BitWidth >= 16 && "Unexpected width!" ) ? void (0) : __assert_fail ("BitWidth >= 16 && \"Unexpected width!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 2126, __extension__ __PRETTY_FUNCTION__)); | ||||
2127 | APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 16); | ||||
2128 | Known.Zero |= Mask; | ||||
2129 | } | ||||
2130 | break; | ||||
2131 | } break; | ||||
2132 | } | ||||
2133 | } | ||||
2134 | } | ||||
2135 | } | ||||
2136 | |||||
2137 | MVT AArch64TargetLowering::getScalarShiftAmountTy(const DataLayout &DL, | ||||
2138 | EVT) const { | ||||
2139 | return MVT::i64; | ||||
2140 | } | ||||
2141 | |||||
2142 | bool AArch64TargetLowering::allowsMisalignedMemoryAccesses( | ||||
2143 | EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, | ||||
2144 | unsigned *Fast) const { | ||||
2145 | if (Subtarget->requiresStrictAlign()) | ||||
2146 | return false; | ||||
2147 | |||||
2148 | if (Fast) { | ||||
2149 | // Some CPUs are fine with unaligned stores except for 128-bit ones. | ||||
2150 | *Fast = !Subtarget->isMisaligned128StoreSlow() || VT.getStoreSize() != 16 || | ||||
2151 | // See comments in performSTORECombine() for more details about | ||||
2152 | // these conditions. | ||||
2153 | |||||
2154 | // Code that uses clang vector extensions can mark that it | ||||
2155 | // wants unaligned accesses to be treated as fast by | ||||
2156 | // underspecifying alignment to be 1 or 2. | ||||
2157 | Alignment <= 2 || | ||||
2158 | |||||
2159 | // Disregard v2i64. Memcpy lowering produces those and splitting | ||||
2160 | // them regresses performance on micro-benchmarks and olden/bh. | ||||
2161 | VT == MVT::v2i64; | ||||
2162 | } | ||||
2163 | return true; | ||||
2164 | } | ||||
2165 | |||||
2166 | // Same as above but handling LLTs instead. | ||||
2167 | bool AArch64TargetLowering::allowsMisalignedMemoryAccesses( | ||||
2168 | LLT Ty, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, | ||||
2169 | unsigned *Fast) const { | ||||
2170 | if (Subtarget->requiresStrictAlign()) | ||||
2171 | return false; | ||||
2172 | |||||
2173 | if (Fast) { | ||||
2174 | // Some CPUs are fine with unaligned stores except for 128-bit ones. | ||||
2175 | *Fast = !Subtarget->isMisaligned128StoreSlow() || | ||||
2176 | Ty.getSizeInBytes() != 16 || | ||||
2177 | // See comments in performSTORECombine() for more details about | ||||
2178 | // these conditions. | ||||
2179 | |||||
2180 | // Code that uses clang vector extensions can mark that it | ||||
2181 | // wants unaligned accesses to be treated as fast by | ||||
2182 | // underspecifying alignment to be 1 or 2. | ||||
2183 | Alignment <= 2 || | ||||
2184 | |||||
2185 | // Disregard v2i64. Memcpy lowering produces those and splitting | ||||
2186 | // them regresses performance on micro-benchmarks and olden/bh. | ||||
2187 | Ty == LLT::fixed_vector(2, 64); | ||||
2188 | } | ||||
2189 | return true; | ||||
2190 | } | ||||
2191 | |||||
2192 | FastISel * | ||||
2193 | AArch64TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, | ||||
2194 | const TargetLibraryInfo *libInfo) const { | ||||
2195 | return AArch64::createFastISel(funcInfo, libInfo); | ||||
2196 | } | ||||
2197 | |||||
2198 | const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const { | ||||
2199 | #define MAKE_CASE(V) \ | ||||
2200 | case V: \ | ||||
2201 | return #V; | ||||
2202 | switch ((AArch64ISD::NodeType)Opcode) { | ||||
2203 | case AArch64ISD::FIRST_NUMBER: | ||||
2204 | break; | ||||
2205 | MAKE_CASE(AArch64ISD::OBSCURE_COPY) | ||||
2206 | MAKE_CASE(AArch64ISD::SMSTART) | ||||
2207 | MAKE_CASE(AArch64ISD::SMSTOP) | ||||
2208 | MAKE_CASE(AArch64ISD::RESTORE_ZA) | ||||
2209 | MAKE_CASE(AArch64ISD::CALL) | ||||
2210 | MAKE_CASE(AArch64ISD::ADRP) | ||||
2211 | MAKE_CASE(AArch64ISD::ADR) | ||||
2212 | MAKE_CASE(AArch64ISD::ADDlow) | ||||
2213 | MAKE_CASE(AArch64ISD::LOADgot) | ||||
2214 | MAKE_CASE(AArch64ISD::RET_FLAG) | ||||
2215 | MAKE_CASE(AArch64ISD::BRCOND) | ||||
2216 | MAKE_CASE(AArch64ISD::CSEL) | ||||
2217 | MAKE_CASE(AArch64ISD::CSINV) | ||||
2218 | MAKE_CASE(AArch64ISD::CSNEG) | ||||
2219 | MAKE_CASE(AArch64ISD::CSINC) | ||||
2220 | MAKE_CASE(AArch64ISD::THREAD_POINTER) | ||||
2221 | MAKE_CASE(AArch64ISD::TLSDESC_CALLSEQ) | ||||
2222 | MAKE_CASE(AArch64ISD::ABDS_PRED) | ||||
2223 | MAKE_CASE(AArch64ISD::ABDU_PRED) | ||||
2224 | MAKE_CASE(AArch64ISD::MUL_PRED) | ||||
2225 | MAKE_CASE(AArch64ISD::MULHS_PRED) | ||||
2226 | MAKE_CASE(AArch64ISD::MULHU_PRED) | ||||
2227 | MAKE_CASE(AArch64ISD::SDIV_PRED) | ||||
2228 | MAKE_CASE(AArch64ISD::SHL_PRED) | ||||
2229 | MAKE_CASE(AArch64ISD::SMAX_PRED) | ||||
2230 | MAKE_CASE(AArch64ISD::SMIN_PRED) | ||||
2231 | MAKE_CASE(AArch64ISD::SRA_PRED) | ||||
2232 | MAKE_CASE(AArch64ISD::SRL_PRED) | ||||
2233 | MAKE_CASE(AArch64ISD::UDIV_PRED) | ||||
2234 | MAKE_CASE(AArch64ISD::UMAX_PRED) | ||||
2235 | MAKE_CASE(AArch64ISD::UMIN_PRED) | ||||
2236 | MAKE_CASE(AArch64ISD::SRAD_MERGE_OP1) | ||||
2237 | MAKE_CASE(AArch64ISD::FNEG_MERGE_PASSTHRU) | ||||
2238 | MAKE_CASE(AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU) | ||||
2239 | MAKE_CASE(AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU) | ||||
2240 | MAKE_CASE(AArch64ISD::FCEIL_MERGE_PASSTHRU) | ||||
2241 | MAKE_CASE(AArch64ISD::FFLOOR_MERGE_PASSTHRU) | ||||
2242 | MAKE_CASE(AArch64ISD::FNEARBYINT_MERGE_PASSTHRU) | ||||
2243 | MAKE_CASE(AArch64ISD::FRINT_MERGE_PASSTHRU) | ||||
2244 | MAKE_CASE(AArch64ISD::FROUND_MERGE_PASSTHRU) | ||||
2245 | MAKE_CASE(AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU) | ||||
2246 | MAKE_CASE(AArch64ISD::FTRUNC_MERGE_PASSTHRU) | ||||
2247 | MAKE_CASE(AArch64ISD::FP_ROUND_MERGE_PASSTHRU) | ||||
2248 | MAKE_CASE(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU) | ||||
2249 | MAKE_CASE(AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU) | ||||
2250 | MAKE_CASE(AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU) | ||||
2251 | MAKE_CASE(AArch64ISD::FCVTZU_MERGE_PASSTHRU) | ||||
2252 | MAKE_CASE(AArch64ISD::FCVTZS_MERGE_PASSTHRU) | ||||
2253 | MAKE_CASE(AArch64ISD::FSQRT_MERGE_PASSTHRU) | ||||
2254 | MAKE_CASE(AArch64ISD::FRECPX_MERGE_PASSTHRU) | ||||
2255 | MAKE_CASE(AArch64ISD::FABS_MERGE_PASSTHRU) | ||||
2256 | MAKE_CASE(AArch64ISD::ABS_MERGE_PASSTHRU) | ||||
2257 | MAKE_CASE(AArch64ISD::NEG_MERGE_PASSTHRU) | ||||
2258 | MAKE_CASE(AArch64ISD::SETCC_MERGE_ZERO) | ||||
2259 | MAKE_CASE(AArch64ISD::ADC) | ||||
2260 | MAKE_CASE(AArch64ISD::SBC) | ||||
2261 | MAKE_CASE(AArch64ISD::ADDS) | ||||
2262 | MAKE_CASE(AArch64ISD::SUBS) | ||||
2263 | MAKE_CASE(AArch64ISD::ADCS) | ||||
2264 | MAKE_CASE(AArch64ISD::SBCS) | ||||
2265 | MAKE_CASE(AArch64ISD::ANDS) | ||||
2266 | MAKE_CASE(AArch64ISD::CCMP) | ||||
2267 | MAKE_CASE(AArch64ISD::CCMN) | ||||
2268 | MAKE_CASE(AArch64ISD::FCCMP) | ||||
2269 | MAKE_CASE(AArch64ISD::FCMP) | ||||
2270 | MAKE_CASE(AArch64ISD::STRICT_FCMP) | ||||
2271 | MAKE_CASE(AArch64ISD::STRICT_FCMPE) | ||||
2272 | MAKE_CASE(AArch64ISD::DUP) | ||||
2273 | MAKE_CASE(AArch64ISD::DUPLANE8) | ||||
2274 | MAKE_CASE(AArch64ISD::DUPLANE16) | ||||
2275 | MAKE_CASE(AArch64ISD::DUPLANE32) | ||||
2276 | MAKE_CASE(AArch64ISD::DUPLANE64) | ||||
2277 | MAKE_CASE(AArch64ISD::DUPLANE128) | ||||
2278 | MAKE_CASE(AArch64ISD::MOVI) | ||||
2279 | MAKE_CASE(AArch64ISD::MOVIshift) | ||||
2280 | MAKE_CASE(AArch64ISD::MOVIedit) | ||||
2281 | MAKE_CASE(AArch64ISD::MOVImsl) | ||||
2282 | MAKE_CASE(AArch64ISD::FMOV) | ||||
2283 | MAKE_CASE(AArch64ISD::MVNIshift) | ||||
2284 | MAKE_CASE(AArch64ISD::MVNImsl) | ||||
2285 | MAKE_CASE(AArch64ISD::BICi) | ||||
2286 | MAKE_CASE(AArch64ISD::ORRi) | ||||
2287 | MAKE_CASE(AArch64ISD::BSP) | ||||
2288 | MAKE_CASE(AArch64ISD::EXTR) | ||||
2289 | MAKE_CASE(AArch64ISD::ZIP1) | ||||
2290 | MAKE_CASE(AArch64ISD::ZIP2) | ||||
2291 | MAKE_CASE(AArch64ISD::UZP1) | ||||
2292 | MAKE_CASE(AArch64ISD::UZP2) | ||||
2293 | MAKE_CASE(AArch64ISD::TRN1) | ||||
2294 | MAKE_CASE(AArch64ISD::TRN2) | ||||
2295 | MAKE_CASE(AArch64ISD::REV16) | ||||
2296 | MAKE_CASE(AArch64ISD::REV32) | ||||
2297 | MAKE_CASE(AArch64ISD::REV64) | ||||
2298 | MAKE_CASE(AArch64ISD::EXT) | ||||
2299 | MAKE_CASE(AArch64ISD::SPLICE) | ||||
2300 | MAKE_CASE(AArch64ISD::VSHL) | ||||
2301 | MAKE_CASE(AArch64ISD::VLSHR) | ||||
2302 | MAKE_CASE(AArch64ISD::VASHR) | ||||
2303 | MAKE_CASE(AArch64ISD::VSLI) | ||||
2304 | MAKE_CASE(AArch64ISD::VSRI) | ||||
2305 | MAKE_CASE(AArch64ISD::CMEQ) | ||||
2306 | MAKE_CASE(AArch64ISD::CMGE) | ||||
2307 | MAKE_CASE(AArch64ISD::CMGT) | ||||
2308 | MAKE_CASE(AArch64ISD::CMHI) | ||||
2309 | MAKE_CASE(AArch64ISD::CMHS) | ||||
2310 | MAKE_CASE(AArch64ISD::FCMEQ) | ||||
2311 | MAKE_CASE(AArch64ISD::FCMGE) | ||||
2312 | MAKE_CASE(AArch64ISD::FCMGT) | ||||
2313 | MAKE_CASE(AArch64ISD::CMEQz) | ||||
2314 | MAKE_CASE(AArch64ISD::CMGEz) | ||||
2315 | MAKE_CASE(AArch64ISD::CMGTz) | ||||
2316 | MAKE_CASE(AArch64ISD::CMLEz) | ||||
2317 | MAKE_CASE(AArch64ISD::CMLTz) | ||||
2318 | MAKE_CASE(AArch64ISD::FCMEQz) | ||||
2319 | MAKE_CASE(AArch64ISD::FCMGEz) | ||||
2320 | MAKE_CASE(AArch64ISD::FCMGTz) | ||||
2321 | MAKE_CASE(AArch64ISD::FCMLEz) | ||||
2322 | MAKE_CASE(AArch64ISD::FCMLTz) | ||||
2323 | MAKE_CASE(AArch64ISD::SADDV) | ||||
2324 | MAKE_CASE(AArch64ISD::UADDV) | ||||
2325 | MAKE_CASE(AArch64ISD::SDOT) | ||||
2326 | MAKE_CASE(AArch64ISD::UDOT) | ||||
2327 | MAKE_CASE(AArch64ISD::SMINV) | ||||
2328 | MAKE_CASE(AArch64ISD::UMINV) | ||||
2329 | MAKE_CASE(AArch64ISD::SMAXV) | ||||
2330 | MAKE_CASE(AArch64ISD::UMAXV) | ||||
2331 | MAKE_CASE(AArch64ISD::SADDV_PRED) | ||||
2332 | MAKE_CASE(AArch64ISD::UADDV_PRED) | ||||
2333 | MAKE_CASE(AArch64ISD::SMAXV_PRED) | ||||
2334 | MAKE_CASE(AArch64ISD::UMAXV_PRED) | ||||
2335 | MAKE_CASE(AArch64ISD::SMINV_PRED) | ||||
2336 | MAKE_CASE(AArch64ISD::UMINV_PRED) | ||||
2337 | MAKE_CASE(AArch64ISD::ORV_PRED) | ||||
2338 | MAKE_CASE(AArch64ISD::EORV_PRED) | ||||
2339 | MAKE_CASE(AArch64ISD::ANDV_PRED) | ||||
2340 | MAKE_CASE(AArch64ISD::CLASTA_N) | ||||
2341 | MAKE_CASE(AArch64ISD::CLASTB_N) | ||||
2342 | MAKE_CASE(AArch64ISD::LASTA) | ||||
2343 | MAKE_CASE(AArch64ISD::LASTB) | ||||
2344 | MAKE_CASE(AArch64ISD::REINTERPRET_CAST) | ||||
2345 | MAKE_CASE(AArch64ISD::LS64_BUILD) | ||||
2346 | MAKE_CASE(AArch64ISD::LS64_EXTRACT) | ||||
2347 | MAKE_CASE(AArch64ISD::TBL) | ||||
2348 | MAKE_CASE(AArch64ISD::FADD_PRED) | ||||
2349 | MAKE_CASE(AArch64ISD::FADDA_PRED) | ||||
2350 | MAKE_CASE(AArch64ISD::FADDV_PRED) | ||||
2351 | MAKE_CASE(AArch64ISD::FDIV_PRED) | ||||
2352 | MAKE_CASE(AArch64ISD::FMA_PRED) | ||||
2353 | MAKE_CASE(AArch64ISD::FMAX_PRED) | ||||
2354 | MAKE_CASE(AArch64ISD::FMAXV_PRED) | ||||
2355 | MAKE_CASE(AArch64ISD::FMAXNM_PRED) | ||||
2356 | MAKE_CASE(AArch64ISD::FMAXNMV_PRED) | ||||
2357 | MAKE_CASE(AArch64ISD::FMIN_PRED) | ||||
2358 | MAKE_CASE(AArch64ISD::FMINV_PRED) | ||||
2359 | MAKE_CASE(AArch64ISD::FMINNM_PRED) | ||||
2360 | MAKE_CASE(AArch64ISD::FMINNMV_PRED) | ||||
2361 | MAKE_CASE(AArch64ISD::FMUL_PRED) | ||||
2362 | MAKE_CASE(AArch64ISD::FSUB_PRED) | ||||
2363 | MAKE_CASE(AArch64ISD::RDSVL) | ||||
2364 | MAKE_CASE(AArch64ISD::BIC) | ||||
2365 | MAKE_CASE(AArch64ISD::BIT) | ||||
2366 | MAKE_CASE(AArch64ISD::CBZ) | ||||
2367 | MAKE_CASE(AArch64ISD::CBNZ) | ||||
2368 | MAKE_CASE(AArch64ISD::TBZ) | ||||
2369 | MAKE_CASE(AArch64ISD::TBNZ) | ||||
2370 | MAKE_CASE(AArch64ISD::TC_RETURN) | ||||
2371 | MAKE_CASE(AArch64ISD::PREFETCH) | ||||
2372 | MAKE_CASE(AArch64ISD::SITOF) | ||||
2373 | MAKE_CASE(AArch64ISD::UITOF) | ||||
2374 | MAKE_CASE(AArch64ISD::NVCAST) | ||||
2375 | MAKE_CASE(AArch64ISD::MRS) | ||||
2376 | MAKE_CASE(AArch64ISD::SQSHL_I) | ||||
2377 | MAKE_CASE(AArch64ISD::UQSHL_I) | ||||
2378 | MAKE_CASE(AArch64ISD::SRSHR_I) | ||||
2379 | MAKE_CASE(AArch64ISD::URSHR_I) | ||||
2380 | MAKE_CASE(AArch64ISD::SQSHLU_I) | ||||
2381 | MAKE_CASE(AArch64ISD::WrapperLarge) | ||||
2382 | MAKE_CASE(AArch64ISD::LD2post) | ||||
2383 | MAKE_CASE(AArch64ISD::LD3post) | ||||
2384 | MAKE_CASE(AArch64ISD::LD4post) | ||||
2385 | MAKE_CASE(AArch64ISD::ST2post) | ||||
2386 | MAKE_CASE(AArch64ISD::ST3post) | ||||
2387 | MAKE_CASE(AArch64ISD::ST4post) | ||||
2388 | MAKE_CASE(AArch64ISD::LD1x2post) | ||||
2389 | MAKE_CASE(AArch64ISD::LD1x3post) | ||||
2390 | MAKE_CASE(AArch64ISD::LD1x4post) | ||||
2391 | MAKE_CASE(AArch64ISD::ST1x2post) | ||||
2392 | MAKE_CASE(AArch64ISD::ST1x3post) | ||||
2393 | MAKE_CASE(AArch64ISD::ST1x4post) | ||||
2394 | MAKE_CASE(AArch64ISD::LD1DUPpost) | ||||
2395 | MAKE_CASE(AArch64ISD::LD2DUPpost) | ||||
2396 | MAKE_CASE(AArch64ISD::LD3DUPpost) | ||||
2397 | MAKE_CASE(AArch64ISD::LD4DUPpost) | ||||
2398 | MAKE_CASE(AArch64ISD::LD1LANEpost) | ||||
2399 | MAKE_CASE(AArch64ISD::LD2LANEpost) | ||||
2400 | MAKE_CASE(AArch64ISD::LD3LANEpost) | ||||
2401 | MAKE_CASE(AArch64ISD::LD4LANEpost) | ||||
2402 | MAKE_CASE(AArch64ISD::ST2LANEpost) | ||||
2403 | MAKE_CASE(AArch64ISD::ST3LANEpost) | ||||
2404 | MAKE_CASE(AArch64ISD::ST4LANEpost) | ||||
2405 | MAKE_CASE(AArch64ISD::SMULL) | ||||
2406 | MAKE_CASE(AArch64ISD::UMULL) | ||||
2407 | MAKE_CASE(AArch64ISD::PMULL) | ||||
2408 | MAKE_CASE(AArch64ISD::FRECPE) | ||||
2409 | MAKE_CASE(AArch64ISD::FRECPS) | ||||
2410 | MAKE_CASE(AArch64ISD::FRSQRTE) | ||||
2411 | MAKE_CASE(AArch64ISD::FRSQRTS) | ||||
2412 | MAKE_CASE(AArch64ISD::STG) | ||||
2413 | MAKE_CASE(AArch64ISD::STZG) | ||||
2414 | MAKE_CASE(AArch64ISD::ST2G) | ||||
2415 | MAKE_CASE(AArch64ISD::STZ2G) | ||||
2416 | MAKE_CASE(AArch64ISD::SUNPKHI) | ||||
2417 | MAKE_CASE(AArch64ISD::SUNPKLO) | ||||
2418 | MAKE_CASE(AArch64ISD::UUNPKHI) | ||||
2419 | MAKE_CASE(AArch64ISD::UUNPKLO) | ||||
2420 | MAKE_CASE(AArch64ISD::INSR) | ||||
2421 | MAKE_CASE(AArch64ISD::PTEST) | ||||
2422 | MAKE_CASE(AArch64ISD::PTEST_ANY) | ||||
2423 | MAKE_CASE(AArch64ISD::PTRUE) | ||||
2424 | MAKE_CASE(AArch64ISD::LD1_MERGE_ZERO) | ||||
2425 | MAKE_CASE(AArch64ISD::LD1S_MERGE_ZERO) | ||||
2426 | MAKE_CASE(AArch64ISD::LDNF1_MERGE_ZERO) | ||||
2427 | MAKE_CASE(AArch64ISD::LDNF1S_MERGE_ZERO) | ||||
2428 | MAKE_CASE(AArch64ISD::LDFF1_MERGE_ZERO) | ||||
2429 | MAKE_CASE(AArch64ISD::LDFF1S_MERGE_ZERO) | ||||
2430 | MAKE_CASE(AArch64ISD::LD1RQ_MERGE_ZERO) | ||||
2431 | MAKE_CASE(AArch64ISD::LD1RO_MERGE_ZERO) | ||||
2432 | MAKE_CASE(AArch64ISD::SVE_LD2_MERGE_ZERO) | ||||
2433 | MAKE_CASE(AArch64ISD::SVE_LD3_MERGE_ZERO) | ||||
2434 | MAKE_CASE(AArch64ISD::SVE_LD4_MERGE_ZERO) | ||||
2435 | MAKE_CASE(AArch64ISD::GLD1_MERGE_ZERO) | ||||
2436 | MAKE_CASE(AArch64ISD::GLD1_SCALED_MERGE_ZERO) | ||||
2437 | MAKE_CASE(AArch64ISD::GLD1_SXTW_MERGE_ZERO) | ||||
2438 | MAKE_CASE(AArch64ISD::GLD1_UXTW_MERGE_ZERO) | ||||
2439 | MAKE_CASE(AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO) | ||||
2440 | MAKE_CASE(AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO) | ||||
2441 | MAKE_CASE(AArch64ISD::GLD1_IMM_MERGE_ZERO) | ||||
2442 | MAKE_CASE(AArch64ISD::GLD1S_MERGE_ZERO) | ||||
2443 | MAKE_CASE(AArch64ISD::GLD1S_SCALED_MERGE_ZERO) | ||||
2444 | MAKE_CASE(AArch64ISD::GLD1S_SXTW_MERGE_ZERO) | ||||
2445 | MAKE_CASE(AArch64ISD::GLD1S_UXTW_MERGE_ZERO) | ||||
2446 | MAKE_CASE(AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO) | ||||
2447 | MAKE_CASE(AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO) | ||||
2448 | MAKE_CASE(AArch64ISD::GLD1S_IMM_MERGE_ZERO) | ||||
2449 | MAKE_CASE(AArch64ISD::GLDFF1_MERGE_ZERO) | ||||
2450 | MAKE_CASE(AArch64ISD::GLDFF1_SCALED_MERGE_ZERO) | ||||
2451 | MAKE_CASE(AArch64ISD::GLDFF1_SXTW_MERGE_ZERO) | ||||
2452 | MAKE_CASE(AArch64ISD::GLDFF1_UXTW_MERGE_ZERO) | ||||
2453 | MAKE_CASE(AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO) | ||||
2454 | MAKE_CASE(AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO) | ||||
2455 | MAKE_CASE(AArch64ISD::GLDFF1_IMM_MERGE_ZERO) | ||||
2456 | MAKE_CASE(AArch64ISD::GLDFF1S_MERGE_ZERO) | ||||
2457 | MAKE_CASE(AArch64ISD::GLDFF1S_SCALED_MERGE_ZERO) | ||||
2458 | MAKE_CASE(AArch64ISD::GLDFF1S_SXTW_MERGE_ZERO) | ||||
2459 | MAKE_CASE(AArch64ISD::GLDFF1S_UXTW_MERGE_ZERO) | ||||
2460 | MAKE_CASE(AArch64ISD::GLDFF1S_SXTW_SCALED_MERGE_ZERO) | ||||
2461 | MAKE_CASE(AArch64ISD::GLDFF1S_UXTW_SCALED_MERGE_ZERO) | ||||
2462 | MAKE_CASE(AArch64ISD::GLDFF1S_IMM_MERGE_ZERO) | ||||
2463 | MAKE_CASE(AArch64ISD::GLDNT1_MERGE_ZERO) | ||||
2464 | MAKE_CASE(AArch64ISD::GLDNT1_INDEX_MERGE_ZERO) | ||||
2465 | MAKE_CASE(AArch64ISD::GLDNT1S_MERGE_ZERO) | ||||
2466 | MAKE_CASE(AArch64ISD::ST1_PRED) | ||||
2467 | MAKE_CASE(AArch64ISD::SST1_PRED) | ||||
2468 | MAKE_CASE(AArch64ISD::SST1_SCALED_PRED) | ||||
2469 | MAKE_CASE(AArch64ISD::SST1_SXTW_PRED) | ||||
2470 | MAKE_CASE(AArch64ISD::SST1_UXTW_PRED) | ||||
2471 | MAKE_CASE(AArch64ISD::SST1_SXTW_SCALED_PRED) | ||||
2472 | MAKE_CASE(AArch64ISD::SST1_UXTW_SCALED_PRED) | ||||
2473 | MAKE_CASE(AArch64ISD::SST1_IMM_PRED) | ||||
2474 | MAKE_CASE(AArch64ISD::SSTNT1_PRED) | ||||
2475 | MAKE_CASE(AArch64ISD::SSTNT1_INDEX_PRED) | ||||
2476 | MAKE_CASE(AArch64ISD::LDP) | ||||
2477 | MAKE_CASE(AArch64ISD::LDNP) | ||||
2478 | MAKE_CASE(AArch64ISD::STP) | ||||
2479 | MAKE_CASE(AArch64ISD::STNP) | ||||
2480 | MAKE_CASE(AArch64ISD::BITREVERSE_MERGE_PASSTHRU) | ||||
2481 | MAKE_CASE(AArch64ISD::BSWAP_MERGE_PASSTHRU) | ||||
2482 | MAKE_CASE(AArch64ISD::REVH_MERGE_PASSTHRU) | ||||
2483 | MAKE_CASE(AArch64ISD::REVW_MERGE_PASSTHRU) | ||||
2484 | MAKE_CASE(AArch64ISD::REVD_MERGE_PASSTHRU) | ||||
2485 | MAKE_CASE(AArch64ISD::CTLZ_MERGE_PASSTHRU) | ||||
2486 | MAKE_CASE(AArch64ISD::CTPOP_MERGE_PASSTHRU) | ||||
2487 | MAKE_CASE(AArch64ISD::DUP_MERGE_PASSTHRU) | ||||
2488 | MAKE_CASE(AArch64ISD::INDEX_VECTOR) | ||||
2489 | MAKE_CASE(AArch64ISD::ADDP) | ||||
2490 | MAKE_CASE(AArch64ISD::SADDLP) | ||||
2491 | MAKE_CASE(AArch64ISD::UADDLP) | ||||
2492 | MAKE_CASE(AArch64ISD::CALL_RVMARKER) | ||||
2493 | MAKE_CASE(AArch64ISD::ASSERT_ZEXT_BOOL) | ||||
2494 | MAKE_CASE(AArch64ISD::MOPS_MEMSET) | ||||
2495 | MAKE_CASE(AArch64ISD::MOPS_MEMSET_TAGGING) | ||||
2496 | MAKE_CASE(AArch64ISD::MOPS_MEMCOPY) | ||||
2497 | MAKE_CASE(AArch64ISD::MOPS_MEMMOVE) | ||||
2498 | MAKE_CASE(AArch64ISD::CALL_BTI) | ||||
2499 | MAKE_CASE(AArch64ISD::MRRS) | ||||
2500 | MAKE_CASE(AArch64ISD::MSRR) | ||||
2501 | } | ||||
2502 | #undef MAKE_CASE | ||||
2503 | return nullptr; | ||||
2504 | } | ||||
2505 | |||||
2506 | MachineBasicBlock * | ||||
2507 | AArch64TargetLowering::EmitF128CSEL(MachineInstr &MI, | ||||
2508 | MachineBasicBlock *MBB) const { | ||||
2509 | // We materialise the F128CSEL pseudo-instruction as some control flow and a | ||||
2510 | // phi node: | ||||
2511 | |||||
2512 | // OrigBB: | ||||
2513 | // [... previous instrs leading to comparison ...] | ||||
2514 | // b.ne TrueBB | ||||
2515 | // b EndBB | ||||
2516 | // TrueBB: | ||||
2517 | // ; Fallthrough | ||||
2518 | // EndBB: | ||||
2519 | // Dest = PHI [IfTrue, TrueBB], [IfFalse, OrigBB] | ||||
2520 | |||||
2521 | MachineFunction *MF = MBB->getParent(); | ||||
2522 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | ||||
2523 | const BasicBlock *LLVM_BB = MBB->getBasicBlock(); | ||||
2524 | DebugLoc DL = MI.getDebugLoc(); | ||||
2525 | MachineFunction::iterator It = ++MBB->getIterator(); | ||||
2526 | |||||
2527 | Register DestReg = MI.getOperand(0).getReg(); | ||||
2528 | Register IfTrueReg = MI.getOperand(1).getReg(); | ||||
2529 | Register IfFalseReg = MI.getOperand(2).getReg(); | ||||
2530 | unsigned CondCode = MI.getOperand(3).getImm(); | ||||
2531 | bool NZCVKilled = MI.getOperand(4).isKill(); | ||||
2532 | |||||
2533 | MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB); | ||||
2534 | MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB); | ||||
2535 | MF->insert(It, TrueBB); | ||||
2536 | MF->insert(It, EndBB); | ||||
2537 | |||||
2538 | // Transfer rest of current basic-block to EndBB | ||||
2539 | EndBB->splice(EndBB->begin(), MBB, std::next(MachineBasicBlock::iterator(MI)), | ||||
2540 | MBB->end()); | ||||
2541 | EndBB->transferSuccessorsAndUpdatePHIs(MBB); | ||||
2542 | |||||
2543 | BuildMI(MBB, DL, TII->get(AArch64::Bcc)).addImm(CondCode).addMBB(TrueBB); | ||||
2544 | BuildMI(MBB, DL, TII->get(AArch64::B)).addMBB(EndBB); | ||||
2545 | MBB->addSuccessor(TrueBB); | ||||
2546 | MBB->addSuccessor(EndBB); | ||||
2547 | |||||
2548 | // TrueBB falls through to the end. | ||||
2549 | TrueBB->addSuccessor(EndBB); | ||||
2550 | |||||
2551 | if (!NZCVKilled) { | ||||
2552 | TrueBB->addLiveIn(AArch64::NZCV); | ||||
2553 | EndBB->addLiveIn(AArch64::NZCV); | ||||
2554 | } | ||||
2555 | |||||
2556 | BuildMI(*EndBB, EndBB->begin(), DL, TII->get(AArch64::PHI), DestReg) | ||||
2557 | .addReg(IfTrueReg) | ||||
2558 | .addMBB(TrueBB) | ||||
2559 | .addReg(IfFalseReg) | ||||
2560 | .addMBB(MBB); | ||||
2561 | |||||
2562 | MI.eraseFromParent(); | ||||
2563 | return EndBB; | ||||
2564 | } | ||||
2565 | |||||
2566 | MachineBasicBlock *AArch64TargetLowering::EmitLoweredCatchRet( | ||||
2567 | MachineInstr &MI, MachineBasicBlock *BB) const { | ||||
2568 | assert(!isAsynchronousEHPersonality(classifyEHPersonality((static_cast <bool> (!isAsynchronousEHPersonality(classifyEHPersonality ( BB->getParent()->getFunction().getPersonalityFn())) && "SEH does not use catchret!") ? void (0) : __assert_fail ("!isAsynchronousEHPersonality(classifyEHPersonality( BB->getParent()->getFunction().getPersonalityFn())) && \"SEH does not use catchret!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 2570, __extension__ __PRETTY_FUNCTION__)) | ||||
2569 | BB->getParent()->getFunction().getPersonalityFn())) &&(static_cast <bool> (!isAsynchronousEHPersonality(classifyEHPersonality ( BB->getParent()->getFunction().getPersonalityFn())) && "SEH does not use catchret!") ? void (0) : __assert_fail ("!isAsynchronousEHPersonality(classifyEHPersonality( BB->getParent()->getFunction().getPersonalityFn())) && \"SEH does not use catchret!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 2570, __extension__ __PRETTY_FUNCTION__)) | ||||
2570 | "SEH does not use catchret!")(static_cast <bool> (!isAsynchronousEHPersonality(classifyEHPersonality ( BB->getParent()->getFunction().getPersonalityFn())) && "SEH does not use catchret!") ? void (0) : __assert_fail ("!isAsynchronousEHPersonality(classifyEHPersonality( BB->getParent()->getFunction().getPersonalityFn())) && \"SEH does not use catchret!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 2570, __extension__ __PRETTY_FUNCTION__)); | ||||
2571 | return BB; | ||||
2572 | } | ||||
2573 | |||||
2574 | MachineBasicBlock * | ||||
2575 | AArch64TargetLowering::EmitTileLoad(unsigned Opc, unsigned BaseReg, | ||||
2576 | MachineInstr &MI, | ||||
2577 | MachineBasicBlock *BB) const { | ||||
2578 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | ||||
2579 | MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc)); | ||||
2580 | |||||
2581 | MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define); | ||||
2582 | MIB.add(MI.getOperand(1)); // slice index register | ||||
2583 | MIB.add(MI.getOperand(2)); // slice index offset | ||||
2584 | MIB.add(MI.getOperand(3)); // pg | ||||
2585 | MIB.add(MI.getOperand(4)); // base | ||||
2586 | MIB.add(MI.getOperand(5)); // offset | ||||
2587 | |||||
2588 | MI.eraseFromParent(); // The pseudo is gone now. | ||||
2589 | return BB; | ||||
2590 | } | ||||
2591 | |||||
2592 | MachineBasicBlock * | ||||
2593 | AArch64TargetLowering::EmitFill(MachineInstr &MI, MachineBasicBlock *BB) const { | ||||
2594 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | ||||
2595 | MachineInstrBuilder MIB = | ||||
2596 | BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(AArch64::LDR_ZA)); | ||||
2597 | |||||
2598 | MIB.addReg(AArch64::ZA, RegState::Define); | ||||
2599 | MIB.add(MI.getOperand(0)); // Vector select register | ||||
2600 | MIB.add(MI.getOperand(1)); // Vector select offset | ||||
2601 | MIB.add(MI.getOperand(2)); // Base | ||||
2602 | MIB.add(MI.getOperand(1)); // Offset, same as vector select offset | ||||
2603 | |||||
2604 | MI.eraseFromParent(); // The pseudo is gone now. | ||||
2605 | return BB; | ||||
2606 | } | ||||
2607 | |||||
2608 | MachineBasicBlock * | ||||
2609 | AArch64TargetLowering::EmitMopa(unsigned Opc, unsigned BaseReg, | ||||
2610 | MachineInstr &MI, MachineBasicBlock *BB) const { | ||||
2611 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | ||||
2612 | MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc)); | ||||
2613 | |||||
2614 | MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define); | ||||
2615 | MIB.addReg(BaseReg + MI.getOperand(0).getImm()); | ||||
2616 | MIB.add(MI.getOperand(1)); // pn | ||||
2617 | MIB.add(MI.getOperand(2)); // pm | ||||
2618 | MIB.add(MI.getOperand(3)); // zn | ||||
2619 | MIB.add(MI.getOperand(4)); // zm | ||||
2620 | |||||
2621 | MI.eraseFromParent(); // The pseudo is gone now. | ||||
2622 | return BB; | ||||
2623 | } | ||||
2624 | |||||
2625 | MachineBasicBlock * | ||||
2626 | AArch64TargetLowering::EmitInsertVectorToTile(unsigned Opc, unsigned BaseReg, | ||||
2627 | MachineInstr &MI, | ||||
2628 | MachineBasicBlock *BB) const { | ||||
2629 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | ||||
2630 | MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc)); | ||||
2631 | |||||
2632 | MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define); | ||||
2633 | MIB.addReg(BaseReg + MI.getOperand(0).getImm()); | ||||
2634 | MIB.add(MI.getOperand(1)); // Slice index register | ||||
2635 | MIB.add(MI.getOperand(2)); // Slice index offset | ||||
2636 | MIB.add(MI.getOperand(3)); // pg | ||||
2637 | MIB.add(MI.getOperand(4)); // zn | ||||
2638 | |||||
2639 | MI.eraseFromParent(); // The pseudo is gone now. | ||||
2640 | return BB; | ||||
2641 | } | ||||
2642 | |||||
2643 | MachineBasicBlock * | ||||
2644 | AArch64TargetLowering::EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const { | ||||
2645 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | ||||
2646 | MachineInstrBuilder MIB = | ||||
2647 | BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(AArch64::ZERO_M)); | ||||
2648 | MIB.add(MI.getOperand(0)); // Mask | ||||
2649 | |||||
2650 | unsigned Mask = MI.getOperand(0).getImm(); | ||||
2651 | for (unsigned I = 0; I < 8; I++) { | ||||
2652 | if (Mask & (1 << I)) | ||||
2653 | MIB.addDef(AArch64::ZAD0 + I, RegState::ImplicitDefine); | ||||
2654 | } | ||||
2655 | |||||
2656 | MI.eraseFromParent(); // The pseudo is gone now. | ||||
2657 | return BB; | ||||
2658 | } | ||||
2659 | |||||
2660 | MachineBasicBlock * | ||||
2661 | AArch64TargetLowering::EmitAddVectorToTile(unsigned Opc, unsigned BaseReg, | ||||
2662 | MachineInstr &MI, | ||||
2663 | MachineBasicBlock *BB) const { | ||||
2664 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | ||||
2665 | MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc)); | ||||
2666 | |||||
2667 | MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define); | ||||
2668 | MIB.addReg(BaseReg + MI.getOperand(0).getImm()); | ||||
2669 | MIB.add(MI.getOperand(1)); // pn | ||||
2670 | MIB.add(MI.getOperand(2)); // pm | ||||
2671 | MIB.add(MI.getOperand(3)); // zn | ||||
2672 | |||||
2673 | MI.eraseFromParent(); // The pseudo is gone now. | ||||
2674 | return BB; | ||||
2675 | } | ||||
2676 | |||||
2677 | MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter( | ||||
2678 | MachineInstr &MI, MachineBasicBlock *BB) const { | ||||
2679 | switch (MI.getOpcode()) { | ||||
2680 | default: | ||||
2681 | #ifndef NDEBUG | ||||
2682 | MI.dump(); | ||||
2683 | #endif | ||||
2684 | llvm_unreachable("Unexpected instruction for custom inserter!")::llvm::llvm_unreachable_internal("Unexpected instruction for custom inserter!" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 2684); | ||||
2685 | |||||
2686 | case AArch64::F128CSEL: | ||||
2687 | return EmitF128CSEL(MI, BB); | ||||
2688 | case TargetOpcode::STATEPOINT: | ||||
2689 | // STATEPOINT is a pseudo instruction which has no implicit defs/uses | ||||
2690 | // while bl call instruction (where statepoint will be lowered at the end) | ||||
2691 | // has implicit def. This def is early-clobber as it will be set at | ||||
2692 | // the moment of the call and earlier than any use is read. | ||||
2693 | // Add this implicit dead def here as a workaround. | ||||
2694 | MI.addOperand(*MI.getMF(), | ||||
2695 | MachineOperand::CreateReg( | ||||
2696 | AArch64::LR, /*isDef*/ true, | ||||
2697 | /*isImp*/ true, /*isKill*/ false, /*isDead*/ true, | ||||
2698 | /*isUndef*/ false, /*isEarlyClobber*/ true)); | ||||
2699 | [[fallthrough]]; | ||||
2700 | case TargetOpcode::STACKMAP: | ||||
2701 | case TargetOpcode::PATCHPOINT: | ||||
2702 | return emitPatchPoint(MI, BB); | ||||
2703 | |||||
2704 | case AArch64::CATCHRET: | ||||
2705 | return EmitLoweredCatchRet(MI, BB); | ||||
2706 | case AArch64::LD1_MXIPXX_H_PSEUDO_B: | ||||
2707 | return EmitTileLoad(AArch64::LD1_MXIPXX_H_B, AArch64::ZAB0, MI, BB); | ||||
2708 | case AArch64::LD1_MXIPXX_H_PSEUDO_H: | ||||
2709 | return EmitTileLoad(AArch64::LD1_MXIPXX_H_H, AArch64::ZAH0, MI, BB); | ||||
2710 | case AArch64::LD1_MXIPXX_H_PSEUDO_S: | ||||
2711 | return EmitTileLoad(AArch64::LD1_MXIPXX_H_S, AArch64::ZAS0, MI, BB); | ||||
2712 | case AArch64::LD1_MXIPXX_H_PSEUDO_D: | ||||
2713 | return EmitTileLoad(AArch64::LD1_MXIPXX_H_D, AArch64::ZAD0, MI, BB); | ||||
2714 | case AArch64::LD1_MXIPXX_H_PSEUDO_Q: | ||||
2715 | return EmitTileLoad(AArch64::LD1_MXIPXX_H_Q, AArch64::ZAQ0, MI, BB); | ||||
2716 | case AArch64::LD1_MXIPXX_V_PSEUDO_B: | ||||
2717 | return EmitTileLoad(AArch64::LD1_MXIPXX_V_B, AArch64::ZAB0, MI, BB); | ||||
2718 | case AArch64::LD1_MXIPXX_V_PSEUDO_H: | ||||
2719 | return EmitTileLoad(AArch64::LD1_MXIPXX_V_H, AArch64::ZAH0, MI, BB); | ||||
2720 | case AArch64::LD1_MXIPXX_V_PSEUDO_S: | ||||
2721 | return EmitTileLoad(AArch64::LD1_MXIPXX_V_S, AArch64::ZAS0, MI, BB); | ||||
2722 | case AArch64::LD1_MXIPXX_V_PSEUDO_D: | ||||
2723 | return EmitTileLoad(AArch64::LD1_MXIPXX_V_D, AArch64::ZAD0, MI, BB); | ||||
2724 | case AArch64::LD1_MXIPXX_V_PSEUDO_Q: | ||||
2725 | return EmitTileLoad(AArch64::LD1_MXIPXX_V_Q, AArch64::ZAQ0, MI, BB); | ||||
2726 | case AArch64::LDR_ZA_PSEUDO: | ||||
2727 | return EmitFill(MI, BB); | ||||
2728 | case AArch64::BFMOPA_MPPZZ_PSEUDO: | ||||
2729 | return EmitMopa(AArch64::BFMOPA_MPPZZ, AArch64::ZAS0, MI, BB); | ||||
2730 | case AArch64::BFMOPS_MPPZZ_PSEUDO: | ||||
2731 | return EmitMopa(AArch64::BFMOPS_MPPZZ, AArch64::ZAS0, MI, BB); | ||||
2732 | case AArch64::FMOPAL_MPPZZ_PSEUDO: | ||||
2733 | return EmitMopa(AArch64::FMOPAL_MPPZZ, AArch64::ZAS0, MI, BB); | ||||
2734 | case AArch64::FMOPSL_MPPZZ_PSEUDO: | ||||
2735 | return EmitMopa(AArch64::FMOPSL_MPPZZ, AArch64::ZAS0, MI, BB); | ||||
2736 | case AArch64::FMOPA_MPPZZ_S_PSEUDO: | ||||
2737 | return EmitMopa(AArch64::FMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB); | ||||
2738 | case AArch64::FMOPS_MPPZZ_S_PSEUDO: | ||||
2739 | return EmitMopa(AArch64::FMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB); | ||||
2740 | case AArch64::FMOPA_MPPZZ_D_PSEUDO: | ||||
2741 | return EmitMopa(AArch64::FMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB); | ||||
2742 | case AArch64::FMOPS_MPPZZ_D_PSEUDO: | ||||
2743 | return EmitMopa(AArch64::FMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB); | ||||
2744 | case AArch64::SMOPA_MPPZZ_S_PSEUDO: | ||||
2745 | return EmitMopa(AArch64::SMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB); | ||||
2746 | case AArch64::SMOPS_MPPZZ_S_PSEUDO: | ||||
2747 | return EmitMopa(AArch64::SMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB); | ||||
2748 | case AArch64::UMOPA_MPPZZ_S_PSEUDO: | ||||
2749 | return EmitMopa(AArch64::UMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB); | ||||
2750 | case AArch64::UMOPS_MPPZZ_S_PSEUDO: | ||||
2751 | return EmitMopa(AArch64::UMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB); | ||||
2752 | case AArch64::SUMOPA_MPPZZ_S_PSEUDO: | ||||
2753 | return EmitMopa(AArch64::SUMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB); | ||||
2754 | case AArch64::SUMOPS_MPPZZ_S_PSEUDO: | ||||
2755 | return EmitMopa(AArch64::SUMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB); | ||||
2756 | case AArch64::USMOPA_MPPZZ_S_PSEUDO: | ||||
2757 | return EmitMopa(AArch64::USMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB); | ||||
2758 | case AArch64::USMOPS_MPPZZ_S_PSEUDO: | ||||
2759 | return EmitMopa(AArch64::USMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB); | ||||
2760 | case AArch64::SMOPA_MPPZZ_D_PSEUDO: | ||||
2761 | return EmitMopa(AArch64::SMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB); | ||||
2762 | case AArch64::SMOPS_MPPZZ_D_PSEUDO: | ||||
2763 | return EmitMopa(AArch64::SMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB); | ||||
2764 | case AArch64::UMOPA_MPPZZ_D_PSEUDO: | ||||
2765 | return EmitMopa(AArch64::UMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB); | ||||
2766 | case AArch64::UMOPS_MPPZZ_D_PSEUDO: | ||||
2767 | return EmitMopa(AArch64::UMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB); | ||||
2768 | case AArch64::SUMOPA_MPPZZ_D_PSEUDO: | ||||
2769 | return EmitMopa(AArch64::SUMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB); | ||||
2770 | case AArch64::SUMOPS_MPPZZ_D_PSEUDO: | ||||
2771 | return EmitMopa(AArch64::SUMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB); | ||||
2772 | case AArch64::USMOPA_MPPZZ_D_PSEUDO: | ||||
2773 | return EmitMopa(AArch64::USMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB); | ||||
2774 | case AArch64::USMOPS_MPPZZ_D_PSEUDO: | ||||
2775 | return EmitMopa(AArch64::USMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB); | ||||
2776 | case AArch64::INSERT_MXIPZ_H_PSEUDO_B: | ||||
2777 | return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_B, AArch64::ZAB0, MI, | ||||
2778 | BB); | ||||
2779 | case AArch64::INSERT_MXIPZ_H_PSEUDO_H: | ||||
2780 | return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_H, AArch64::ZAH0, MI, | ||||
2781 | BB); | ||||
2782 | case AArch64::INSERT_MXIPZ_H_PSEUDO_S: | ||||
2783 | return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_S, AArch64::ZAS0, MI, | ||||
2784 | BB); | ||||
2785 | case AArch64::INSERT_MXIPZ_H_PSEUDO_D: | ||||
2786 | return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_D, AArch64::ZAD0, MI, | ||||
2787 | BB); | ||||
2788 | case AArch64::INSERT_MXIPZ_H_PSEUDO_Q: | ||||
2789 | return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_Q, AArch64::ZAQ0, MI, | ||||
2790 | BB); | ||||
2791 | case AArch64::INSERT_MXIPZ_V_PSEUDO_B: | ||||
2792 | return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_B, AArch64::ZAB0, MI, | ||||
2793 | BB); | ||||
2794 | case AArch64::INSERT_MXIPZ_V_PSEUDO_H: | ||||
2795 | return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_H, AArch64::ZAH0, MI, | ||||
2796 | BB); | ||||
2797 | case AArch64::INSERT_MXIPZ_V_PSEUDO_S: | ||||
2798 | return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_S, AArch64::ZAS0, MI, | ||||
2799 | BB); | ||||
2800 | case AArch64::INSERT_MXIPZ_V_PSEUDO_D: | ||||
2801 | return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_D, AArch64::ZAD0, MI, | ||||
2802 | BB); | ||||
2803 | case AArch64::INSERT_MXIPZ_V_PSEUDO_Q: | ||||
2804 | return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_Q, AArch64::ZAQ0, MI, | ||||
2805 | BB); | ||||
2806 | case AArch64::ZERO_M_PSEUDO: | ||||
2807 | return EmitZero(MI, BB); | ||||
2808 | case AArch64::ADDHA_MPPZ_PSEUDO_S: | ||||
2809 | return EmitAddVectorToTile(AArch64::ADDHA_MPPZ_S, AArch64::ZAS0, MI, BB); | ||||
2810 | case AArch64::ADDVA_MPPZ_PSEUDO_S: | ||||
2811 | return EmitAddVectorToTile(AArch64::ADDVA_MPPZ_S, AArch64::ZAS0, MI, BB); | ||||
2812 | case AArch64::ADDHA_MPPZ_PSEUDO_D: | ||||
2813 | return EmitAddVectorToTile(AArch64::ADDHA_MPPZ_D, AArch64::ZAD0, MI, BB); | ||||
2814 | case AArch64::ADDVA_MPPZ_PSEUDO_D: | ||||
2815 | return EmitAddVectorToTile(AArch64::ADDVA_MPPZ_D, AArch64::ZAD0, MI, BB); | ||||
2816 | } | ||||
2817 | } | ||||
2818 | |||||
2819 | //===----------------------------------------------------------------------===// | ||||
2820 | // AArch64 Lowering private implementation. | ||||
2821 | //===----------------------------------------------------------------------===// | ||||
2822 | |||||
2823 | //===----------------------------------------------------------------------===// | ||||
2824 | // Lowering Code | ||||
2825 | //===----------------------------------------------------------------------===// | ||||
2826 | |||||
2827 | // Forward declarations of SVE fixed length lowering helpers | ||||
2828 | static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT); | ||||
2829 | static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V); | ||||
2830 | static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V); | ||||
2831 | static SDValue convertFixedMaskToScalableVector(SDValue Mask, | ||||
2832 | SelectionDAG &DAG); | ||||
2833 | static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL, | ||||
2834 | EVT VT); | ||||
2835 | |||||
2836 | /// isZerosVector - Check whether SDNode N is a zero-filled vector. | ||||
2837 | static bool isZerosVector(const SDNode *N) { | ||||
2838 | // Look through a bit convert. | ||||
2839 | while (N->getOpcode() == ISD::BITCAST) | ||||
2840 | N = N->getOperand(0).getNode(); | ||||
2841 | |||||
2842 | if (ISD::isConstantSplatVectorAllZeros(N)) | ||||
2843 | return true; | ||||
2844 | |||||
2845 | if (N->getOpcode() != AArch64ISD::DUP) | ||||
2846 | return false; | ||||
2847 | |||||
2848 | auto Opnd0 = N->getOperand(0); | ||||
2849 | return isNullConstant(Opnd0) || isNullFPConstant(Opnd0); | ||||
2850 | } | ||||
2851 | |||||
2852 | /// changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64 | ||||
2853 | /// CC | ||||
2854 | static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC) { | ||||
2855 | switch (CC) { | ||||
2856 | default: | ||||
2857 | llvm_unreachable("Unknown condition code!")::llvm::llvm_unreachable_internal("Unknown condition code!", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2857); | ||||
2858 | case ISD::SETNE: | ||||
2859 | return AArch64CC::NE; | ||||
2860 | case ISD::SETEQ: | ||||
2861 | return AArch64CC::EQ; | ||||
2862 | case ISD::SETGT: | ||||
2863 | return AArch64CC::GT; | ||||
2864 | case ISD::SETGE: | ||||
2865 | return AArch64CC::GE; | ||||
2866 | case ISD::SETLT: | ||||
2867 | return AArch64CC::LT; | ||||
2868 | case ISD::SETLE: | ||||
2869 | return AArch64CC::LE; | ||||
2870 | case ISD::SETUGT: | ||||
2871 | return AArch64CC::HI; | ||||
2872 | case ISD::SETUGE: | ||||
2873 | return AArch64CC::HS; | ||||
2874 | case ISD::SETULT: | ||||
2875 | return AArch64CC::LO; | ||||
2876 | case ISD::SETULE: | ||||
2877 | return AArch64CC::LS; | ||||
2878 | } | ||||
2879 | } | ||||
2880 | |||||
2881 | /// changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC. | ||||
2882 | static void changeFPCCToAArch64CC(ISD::CondCode CC, | ||||
2883 | AArch64CC::CondCode &CondCode, | ||||
2884 | AArch64CC::CondCode &CondCode2) { | ||||
2885 | CondCode2 = AArch64CC::AL; | ||||
2886 | switch (CC) { | ||||
2887 | default: | ||||
2888 | llvm_unreachable("Unknown FP condition!")::llvm::llvm_unreachable_internal("Unknown FP condition!", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2888); | ||||
2889 | case ISD::SETEQ: | ||||
2890 | case ISD::SETOEQ: | ||||
2891 | CondCode = AArch64CC::EQ; | ||||
2892 | break; | ||||
2893 | case ISD::SETGT: | ||||
2894 | case ISD::SETOGT: | ||||
2895 | CondCode = AArch64CC::GT; | ||||
2896 | break; | ||||
2897 | case ISD::SETGE: | ||||
2898 | case ISD::SETOGE: | ||||
2899 | CondCode = AArch64CC::GE; | ||||
2900 | break; | ||||
2901 | case ISD::SETOLT: | ||||
2902 | CondCode = AArch64CC::MI; | ||||
2903 | break; | ||||
2904 | case ISD::SETOLE: | ||||
2905 | CondCode = AArch64CC::LS; | ||||
2906 | break; | ||||
2907 | case ISD::SETONE: | ||||
2908 | CondCode = AArch64CC::MI; | ||||
2909 | CondCode2 = AArch64CC::GT; | ||||
2910 | break; | ||||
2911 | case ISD::SETO: | ||||
2912 | CondCode = AArch64CC::VC; | ||||
2913 | break; | ||||
2914 | case ISD::SETUO: | ||||
2915 | CondCode = AArch64CC::VS; | ||||
2916 | break; | ||||
2917 | case ISD::SETUEQ: | ||||
2918 | CondCode = AArch64CC::EQ; | ||||
2919 | CondCode2 = AArch64CC::VS; | ||||
2920 | break; | ||||
2921 | case ISD::SETUGT: | ||||
2922 | CondCode = AArch64CC::HI; | ||||
2923 | break; | ||||
2924 | case ISD::SETUGE: | ||||
2925 | CondCode = AArch64CC::PL; | ||||
2926 | break; | ||||
2927 | case ISD::SETLT: | ||||
2928 | case ISD::SETULT: | ||||
2929 | CondCode = AArch64CC::LT; | ||||
2930 | break; | ||||
2931 | case ISD::SETLE: | ||||
2932 | case ISD::SETULE: | ||||
2933 | CondCode = AArch64CC::LE; | ||||
2934 | break; | ||||
2935 | case ISD::SETNE: | ||||
2936 | case ISD::SETUNE: | ||||
2937 | CondCode = AArch64CC::NE; | ||||
2938 | break; | ||||
2939 | } | ||||
2940 | } | ||||
2941 | |||||
2942 | /// Convert a DAG fp condition code to an AArch64 CC. | ||||
2943 | /// This differs from changeFPCCToAArch64CC in that it returns cond codes that | ||||
2944 | /// should be AND'ed instead of OR'ed. | ||||
2945 | static void changeFPCCToANDAArch64CC(ISD::CondCode CC, | ||||
2946 | AArch64CC::CondCode &CondCode, | ||||
2947 | AArch64CC::CondCode &CondCode2) { | ||||
2948 | CondCode2 = AArch64CC::AL; | ||||
2949 | switch (CC) { | ||||
2950 | default: | ||||
2951 | changeFPCCToAArch64CC(CC, CondCode, CondCode2); | ||||
2952 | assert(CondCode2 == AArch64CC::AL)(static_cast <bool> (CondCode2 == AArch64CC::AL) ? void (0) : __assert_fail ("CondCode2 == AArch64CC::AL", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 2952, __extension__ __PRETTY_FUNCTION__)); | ||||
2953 | break; | ||||
2954 | case ISD::SETONE: | ||||
2955 | // (a one b) | ||||
2956 | // == ((a olt b) || (a ogt b)) | ||||
2957 | // == ((a ord b) && (a une b)) | ||||
2958 | CondCode = AArch64CC::VC; | ||||
2959 | CondCode2 = AArch64CC::NE; | ||||
2960 | break; | ||||
2961 | case ISD::SETUEQ: | ||||
2962 | // (a ueq b) | ||||
2963 | // == ((a uno b) || (a oeq b)) | ||||
2964 | // == ((a ule b) && (a uge b)) | ||||
2965 | CondCode = AArch64CC::PL; | ||||
2966 | CondCode2 = AArch64CC::LE; | ||||
2967 | break; | ||||
2968 | } | ||||
2969 | } | ||||
2970 | |||||
2971 | /// changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 | ||||
2972 | /// CC usable with the vector instructions. Fewer operations are available | ||||
2973 | /// without a real NZCV register, so we have to use less efficient combinations | ||||
2974 | /// to get the same effect. | ||||
2975 | static void changeVectorFPCCToAArch64CC(ISD::CondCode CC, | ||||
2976 | AArch64CC::CondCode &CondCode, | ||||
2977 | AArch64CC::CondCode &CondCode2, | ||||
2978 | bool &Invert) { | ||||
2979 | Invert = false; | ||||
2980 | switch (CC) { | ||||
2981 | default: | ||||
2982 | // Mostly the scalar mappings work fine. | ||||
2983 | changeFPCCToAArch64CC(CC, CondCode, CondCode2); | ||||
2984 | break; | ||||
2985 | case ISD::SETUO: | ||||
2986 | Invert = true; | ||||
2987 | [[fallthrough]]; | ||||
2988 | case ISD::SETO: | ||||
2989 | CondCode = AArch64CC::MI; | ||||
2990 | CondCode2 = AArch64CC::GE; | ||||
2991 | break; | ||||
2992 | case ISD::SETUEQ: | ||||
2993 | case ISD::SETULT: | ||||
2994 | case ISD::SETULE: | ||||
2995 | case ISD::SETUGT: | ||||
2996 | case ISD::SETUGE: | ||||
2997 | // All of the compare-mask comparisons are ordered, but we can switch | ||||
2998 | // between the two by a double inversion. E.g. ULE == !OGT. | ||||
2999 | Invert = true; | ||||
3000 | changeFPCCToAArch64CC(getSetCCInverse(CC, /* FP inverse */ MVT::f32), | ||||
3001 | CondCode, CondCode2); | ||||
3002 | break; | ||||
3003 | } | ||||
3004 | } | ||||
3005 | |||||
3006 | static bool isLegalArithImmed(uint64_t C) { | ||||
3007 | // Matches AArch64DAGToDAGISel::SelectArithImmed(). | ||||
3008 | bool IsLegal = (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0); | ||||
3009 | LLVM_DEBUG(dbgs() << "Is imm " << Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Is imm " << C << " legal: " << (IsLegal ? "yes\n" : "no\n"); } } while ( false) | ||||
3010 | << " legal: " << (IsLegal ? "yes\n" : "no\n"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Is imm " << C << " legal: " << (IsLegal ? "yes\n" : "no\n"); } } while ( false); | ||||
3011 | return IsLegal; | ||||
3012 | } | ||||
3013 | |||||
3014 | // Can a (CMP op1, (sub 0, op2) be turned into a CMN instruction on | ||||
3015 | // the grounds that "op1 - (-op2) == op1 + op2" ? Not always, the C and V flags | ||||
3016 | // can be set differently by this operation. It comes down to whether | ||||
3017 | // "SInt(~op2)+1 == SInt(~op2+1)" (and the same for UInt). If they are then | ||||
3018 | // everything is fine. If not then the optimization is wrong. Thus general | ||||
3019 | // comparisons are only valid if op2 != 0. | ||||
3020 | // | ||||
3021 | // So, finally, the only LLVM-native comparisons that don't mention C and V | ||||
3022 | // are SETEQ and SETNE. They're the only ones we can safely use CMN for in | ||||
3023 | // the absence of information about op2. | ||||
3024 | static bool isCMN(SDValue Op, ISD::CondCode CC) { | ||||
3025 | return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0)) && | ||||
3026 | (CC == ISD::SETEQ || CC == ISD::SETNE); | ||||
3027 | } | ||||
3028 | |||||
3029 | static SDValue emitStrictFPComparison(SDValue LHS, SDValue RHS, const SDLoc &dl, | ||||
3030 | SelectionDAG &DAG, SDValue Chain, | ||||
3031 | bool IsSignaling) { | ||||
3032 | EVT VT = LHS.getValueType(); | ||||
3033 | assert(VT != MVT::f128)(static_cast <bool> (VT != MVT::f128) ? void (0) : __assert_fail ("VT != MVT::f128", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3033, __extension__ __PRETTY_FUNCTION__)); | ||||
3034 | |||||
3035 | const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16(); | ||||
3036 | |||||
3037 | if (VT == MVT::f16 && !FullFP16) { | ||||
3038 | LHS = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other}, | ||||
3039 | {Chain, LHS}); | ||||
3040 | RHS = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other}, | ||||
3041 | {LHS.getValue(1), RHS}); | ||||
3042 | Chain = RHS.getValue(1); | ||||
3043 | VT = MVT::f32; | ||||
3044 | } | ||||
3045 | unsigned Opcode = | ||||
3046 | IsSignaling ? AArch64ISD::STRICT_FCMPE : AArch64ISD::STRICT_FCMP; | ||||
3047 | return DAG.getNode(Opcode, dl, {VT, MVT::Other}, {Chain, LHS, RHS}); | ||||
3048 | } | ||||
3049 | |||||
3050 | static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC, | ||||
3051 | const SDLoc &dl, SelectionDAG &DAG) { | ||||
3052 | EVT VT = LHS.getValueType(); | ||||
3053 | const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16(); | ||||
3054 | |||||
3055 | if (VT.isFloatingPoint()) { | ||||
3056 | assert(VT != MVT::f128)(static_cast <bool> (VT != MVT::f128) ? void (0) : __assert_fail ("VT != MVT::f128", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3056, __extension__ __PRETTY_FUNCTION__)); | ||||
3057 | if (VT == MVT::f16 && !FullFP16) { | ||||
3058 | LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS); | ||||
3059 | RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS); | ||||
3060 | VT = MVT::f32; | ||||
3061 | } | ||||
3062 | return DAG.getNode(AArch64ISD::FCMP, dl, VT, LHS, RHS); | ||||
3063 | } | ||||
3064 | |||||
3065 | // The CMP instruction is just an alias for SUBS, and representing it as | ||||
3066 | // SUBS means that it's possible to get CSE with subtract operations. | ||||
3067 | // A later phase can perform the optimization of setting the destination | ||||
3068 | // register to WZR/XZR if it ends up being unused. | ||||
3069 | unsigned Opcode = AArch64ISD::SUBS; | ||||
3070 | |||||
3071 | if (isCMN(RHS, CC)) { | ||||
3072 | // Can we combine a (CMP op1, (sub 0, op2) into a CMN instruction ? | ||||
3073 | Opcode = AArch64ISD::ADDS; | ||||
3074 | RHS = RHS.getOperand(1); | ||||
3075 | } else if (isCMN(LHS, CC)) { | ||||
3076 | // As we are looking for EQ/NE compares, the operands can be commuted ; can | ||||
3077 | // we combine a (CMP (sub 0, op1), op2) into a CMN instruction ? | ||||
3078 | Opcode = AArch64ISD::ADDS; | ||||
3079 | LHS = LHS.getOperand(1); | ||||
3080 | } else if (isNullConstant(RHS) && !isUnsignedIntSetCC(CC)) { | ||||
3081 | if (LHS.getOpcode() == ISD::AND) { | ||||
3082 | // Similarly, (CMP (and X, Y), 0) can be implemented with a TST | ||||
3083 | // (a.k.a. ANDS) except that the flags are only guaranteed to work for one | ||||
3084 | // of the signed comparisons. | ||||
3085 | const SDValue ANDSNode = DAG.getNode(AArch64ISD::ANDS, dl, | ||||
3086 | DAG.getVTList(VT, MVT_CC), | ||||
3087 | LHS.getOperand(0), | ||||
3088 | LHS.getOperand(1)); | ||||
3089 | // Replace all users of (and X, Y) with newly generated (ands X, Y) | ||||
3090 | DAG.ReplaceAllUsesWith(LHS, ANDSNode); | ||||
3091 | return ANDSNode.getValue(1); | ||||
3092 | } else if (LHS.getOpcode() == AArch64ISD::ANDS) { | ||||
3093 | // Use result of ANDS | ||||
3094 | return LHS.getValue(1); | ||||
3095 | } | ||||
3096 | } | ||||
3097 | |||||
3098 | return DAG.getNode(Opcode, dl, DAG.getVTList(VT, MVT_CC), LHS, RHS) | ||||
3099 | .getValue(1); | ||||
3100 | } | ||||
3101 | |||||
3102 | /// \defgroup AArch64CCMP CMP;CCMP matching | ||||
3103 | /// | ||||
3104 | /// These functions deal with the formation of CMP;CCMP;... sequences. | ||||
3105 | /// The CCMP/CCMN/FCCMP/FCCMPE instructions allow the conditional execution of | ||||
3106 | /// a comparison. They set the NZCV flags to a predefined value if their | ||||
3107 | /// predicate is false. This allows to express arbitrary conjunctions, for | ||||
3108 | /// example "cmp 0 (and (setCA (cmp A)) (setCB (cmp B)))" | ||||
3109 | /// expressed as: | ||||
3110 | /// cmp A | ||||
3111 | /// ccmp B, inv(CB), CA | ||||
3112 | /// check for CB flags | ||||
3113 | /// | ||||
3114 | /// This naturally lets us implement chains of AND operations with SETCC | ||||
3115 | /// operands. And we can even implement some other situations by transforming | ||||
3116 | /// them: | ||||
3117 | /// - We can implement (NEG SETCC) i.e. negating a single comparison by | ||||
3118 | /// negating the flags used in a CCMP/FCCMP operations. | ||||
3119 | /// - We can negate the result of a whole chain of CMP/CCMP/FCCMP operations | ||||
3120 | /// by negating the flags we test for afterwards. i.e. | ||||
3121 | /// NEG (CMP CCMP CCCMP ...) can be implemented. | ||||
3122 | /// - Note that we can only ever negate all previously processed results. | ||||
3123 | /// What we can not implement by flipping the flags to test is a negation | ||||
3124 | /// of two sub-trees (because the negation affects all sub-trees emitted so | ||||
3125 | /// far, so the 2nd sub-tree we emit would also affect the first). | ||||
3126 | /// With those tools we can implement some OR operations: | ||||
3127 | /// - (OR (SETCC A) (SETCC B)) can be implemented via: | ||||
3128 | /// NEG (AND (NEG (SETCC A)) (NEG (SETCC B))) | ||||
3129 | /// - After transforming OR to NEG/AND combinations we may be able to use NEG | ||||
3130 | /// elimination rules from earlier to implement the whole thing as a | ||||
3131 | /// CCMP/FCCMP chain. | ||||
3132 | /// | ||||
3133 | /// As complete example: | ||||
3134 | /// or (or (setCA (cmp A)) (setCB (cmp B))) | ||||
3135 | /// (and (setCC (cmp C)) (setCD (cmp D)))" | ||||
3136 | /// can be reassociated to: | ||||
3137 | /// or (and (setCC (cmp C)) setCD (cmp D)) | ||||
3138 | // (or (setCA (cmp A)) (setCB (cmp B))) | ||||
3139 | /// can be transformed to: | ||||
3140 | /// not (and (not (and (setCC (cmp C)) (setCD (cmp D)))) | ||||
3141 | /// (and (not (setCA (cmp A)) (not (setCB (cmp B))))))" | ||||
3142 | /// which can be implemented as: | ||||
3143 | /// cmp C | ||||
3144 | /// ccmp D, inv(CD), CC | ||||
3145 | /// ccmp A, CA, inv(CD) | ||||
3146 | /// ccmp B, CB, inv(CA) | ||||
3147 | /// check for CB flags | ||||
3148 | /// | ||||
3149 | /// A counterexample is "or (and A B) (and C D)" which translates to | ||||
3150 | /// not (and (not (and (not A) (not B))) (not (and (not C) (not D)))), we | ||||
3151 | /// can only implement 1 of the inner (not) operations, but not both! | ||||
3152 | /// @{ | ||||
3153 | |||||
3154 | /// Create a conditional comparison; Use CCMP, CCMN or FCCMP as appropriate. | ||||
3155 | static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS, | ||||
3156 | ISD::CondCode CC, SDValue CCOp, | ||||
3157 | AArch64CC::CondCode Predicate, | ||||
3158 | AArch64CC::CondCode OutCC, | ||||
3159 | const SDLoc &DL, SelectionDAG &DAG) { | ||||
3160 | unsigned Opcode = 0; | ||||
3161 | const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16(); | ||||
3162 | |||||
3163 | if (LHS.getValueType().isFloatingPoint()) { | ||||
3164 | assert(LHS.getValueType() != MVT::f128)(static_cast <bool> (LHS.getValueType() != MVT::f128) ? void (0) : __assert_fail ("LHS.getValueType() != MVT::f128", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 3164, __extension__ __PRETTY_FUNCTION__)); | ||||
3165 | if (LHS.getValueType() == MVT::f16 && !FullFP16) { | ||||
3166 | LHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, LHS); | ||||
3167 | RHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, RHS); | ||||
3168 | } | ||||
3169 | Opcode = AArch64ISD::FCCMP; | ||||
3170 | } else if (RHS.getOpcode() == ISD::SUB) { | ||||
3171 | SDValue SubOp0 = RHS.getOperand(0); | ||||
3172 | if (isNullConstant(SubOp0) && (CC == ISD::SETEQ || CC == ISD::SETNE)) { | ||||
3173 | // See emitComparison() on why we can only do this for SETEQ and SETNE. | ||||
3174 | Opcode = AArch64ISD::CCMN; | ||||
3175 | RHS = RHS.getOperand(1); | ||||
3176 | } | ||||
3177 | } | ||||
3178 | if (Opcode == 0) | ||||
3179 | Opcode = AArch64ISD::CCMP; | ||||
3180 | |||||
3181 | SDValue Condition = DAG.getConstant(Predicate, DL, MVT_CC); | ||||
3182 | AArch64CC::CondCode InvOutCC = AArch64CC::getInvertedCondCode(OutCC); | ||||
3183 | unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvOutCC); | ||||
3184 | SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32); | ||||
3185 | return DAG.getNode(Opcode, DL, MVT_CC, LHS, RHS, NZCVOp, Condition, CCOp); | ||||
3186 | } | ||||
3187 | |||||
3188 | /// Returns true if @p Val is a tree of AND/OR/SETCC operations that can be | ||||
3189 | /// expressed as a conjunction. See \ref AArch64CCMP. | ||||
3190 | /// \param CanNegate Set to true if we can negate the whole sub-tree just by | ||||
3191 | /// changing the conditions on the SETCC tests. | ||||
3192 | /// (this means we can call emitConjunctionRec() with | ||||
3193 | /// Negate==true on this sub-tree) | ||||
3194 | /// \param MustBeFirst Set to true if this subtree needs to be negated and we | ||||
3195 | /// cannot do the negation naturally. We are required to | ||||
3196 | /// emit the subtree first in this case. | ||||
3197 | /// \param WillNegate Is true if are called when the result of this | ||||
3198 | /// subexpression must be negated. This happens when the | ||||
3199 | /// outer expression is an OR. We can use this fact to know | ||||
3200 | /// that we have a double negation (or (or ...) ...) that | ||||
3201 | /// can be implemented for free. | ||||
3202 | static bool canEmitConjunction(const SDValue Val, bool &CanNegate, | ||||
3203 | bool &MustBeFirst, bool WillNegate, | ||||
3204 | unsigned Depth = 0) { | ||||
3205 | if (!Val.hasOneUse()) | ||||
3206 | return false; | ||||
3207 | unsigned Opcode = Val->getOpcode(); | ||||
3208 | if (Opcode == ISD::SETCC) { | ||||
3209 | if (Val->getOperand(0).getValueType() == MVT::f128) | ||||
3210 | return false; | ||||
3211 | CanNegate = true; | ||||
3212 | MustBeFirst = false; | ||||
3213 | return true; | ||||
3214 | } | ||||
3215 | // Protect against exponential runtime and stack overflow. | ||||
3216 | if (Depth > 6) | ||||
3217 | return false; | ||||
3218 | if (Opcode == ISD::AND || Opcode == ISD::OR) { | ||||
3219 | bool IsOR = Opcode == ISD::OR; | ||||
3220 | SDValue O0 = Val->getOperand(0); | ||||
3221 | SDValue O1 = Val->getOperand(1); | ||||
3222 | bool CanNegateL; | ||||
3223 | bool MustBeFirstL; | ||||
3224 | if (!canEmitConjunction(O0, CanNegateL, MustBeFirstL, IsOR, Depth+1)) | ||||
3225 | return false; | ||||
3226 | bool CanNegateR; | ||||
3227 | bool MustBeFirstR; | ||||
3228 | if (!canEmitConjunction(O1, CanNegateR, MustBeFirstR, IsOR, Depth+1)) | ||||
3229 | return false; | ||||
3230 | |||||
3231 | if (MustBeFirstL && MustBeFirstR) | ||||
3232 | return false; | ||||
3233 | |||||
3234 | if (IsOR) { | ||||
3235 | // For an OR expression we need to be able to naturally negate at least | ||||
3236 | // one side or we cannot do the transformation at all. | ||||
3237 | if (!CanNegateL && !CanNegateR) | ||||
3238 | return false; | ||||
3239 | // If we the result of the OR will be negated and we can naturally negate | ||||
3240 | // the leafs, then this sub-tree as a whole negates naturally. | ||||
3241 | CanNegate = WillNegate && CanNegateL && CanNegateR; | ||||
3242 | // If we cannot naturally negate the whole sub-tree, then this must be | ||||
3243 | // emitted first. | ||||
3244 | MustBeFirst = !CanNegate; | ||||
3245 | } else { | ||||
3246 | assert(Opcode == ISD::AND && "Must be OR or AND")(static_cast <bool> (Opcode == ISD::AND && "Must be OR or AND" ) ? void (0) : __assert_fail ("Opcode == ISD::AND && \"Must be OR or AND\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 3246, __extension__ __PRETTY_FUNCTION__)); | ||||
3247 | // We cannot naturally negate an AND operation. | ||||
3248 | CanNegate = false; | ||||
3249 | MustBeFirst = MustBeFirstL || MustBeFirstR; | ||||
3250 | } | ||||
3251 | return true; | ||||
3252 | } | ||||
3253 | return false; | ||||
3254 | } | ||||
3255 | |||||
3256 | /// Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain | ||||
3257 | /// of CCMP/CFCMP ops. See @ref AArch64CCMP. | ||||
3258 | /// Tries to transform the given i1 producing node @p Val to a series compare | ||||
3259 | /// and conditional compare operations. @returns an NZCV flags producing node | ||||
3260 | /// and sets @p OutCC to the flags that should be tested or returns SDValue() if | ||||
3261 | /// transformation was not possible. | ||||
3262 | /// \p Negate is true if we want this sub-tree being negated just by changing | ||||
3263 | /// SETCC conditions. | ||||
3264 | static SDValue emitConjunctionRec(SelectionDAG &DAG, SDValue Val, | ||||
3265 | AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp, | ||||
3266 | AArch64CC::CondCode Predicate) { | ||||
3267 | // We're at a tree leaf, produce a conditional comparison operation. | ||||
3268 | unsigned Opcode = Val->getOpcode(); | ||||
3269 | if (Opcode == ISD::SETCC) { | ||||
3270 | SDValue LHS = Val->getOperand(0); | ||||
3271 | SDValue RHS = Val->getOperand(1); | ||||
3272 | ISD::CondCode CC = cast<CondCodeSDNode>(Val->getOperand(2))->get(); | ||||
3273 | bool isInteger = LHS.getValueType().isInteger(); | ||||
3274 | if (Negate) | ||||
3275 | CC = getSetCCInverse(CC, LHS.getValueType()); | ||||
3276 | SDLoc DL(Val); | ||||
3277 | // Determine OutCC and handle FP special case. | ||||
3278 | if (isInteger) { | ||||
3279 | OutCC = changeIntCCToAArch64CC(CC); | ||||
3280 | } else { | ||||
3281 | assert(LHS.getValueType().isFloatingPoint())(static_cast <bool> (LHS.getValueType().isFloatingPoint ()) ? void (0) : __assert_fail ("LHS.getValueType().isFloatingPoint()" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 3281, __extension__ __PRETTY_FUNCTION__)); | ||||
3282 | AArch64CC::CondCode ExtraCC; | ||||
3283 | changeFPCCToANDAArch64CC(CC, OutCC, ExtraCC); | ||||
3284 | // Some floating point conditions can't be tested with a single condition | ||||
3285 | // code. Construct an additional comparison in this case. | ||||
3286 | if (ExtraCC != AArch64CC::AL) { | ||||
3287 | SDValue ExtraCmp; | ||||
3288 | if (!CCOp.getNode()) | ||||
3289 | ExtraCmp = emitComparison(LHS, RHS, CC, DL, DAG); | ||||
3290 | else | ||||
3291 | ExtraCmp = emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate, | ||||
3292 | ExtraCC, DL, DAG); | ||||
3293 | CCOp = ExtraCmp; | ||||
3294 | Predicate = ExtraCC; | ||||
3295 | } | ||||
3296 | } | ||||
3297 | |||||
3298 | // Produce a normal comparison if we are first in the chain | ||||
3299 | if (!CCOp) | ||||
3300 | return emitComparison(LHS, RHS, CC, DL, DAG); | ||||
3301 | // Otherwise produce a ccmp. | ||||
3302 | return emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate, OutCC, DL, | ||||
3303 | DAG); | ||||
3304 | } | ||||
3305 | assert(Val->hasOneUse() && "Valid conjunction/disjunction tree")(static_cast <bool> (Val->hasOneUse() && "Valid conjunction/disjunction tree" ) ? void (0) : __assert_fail ("Val->hasOneUse() && \"Valid conjunction/disjunction tree\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 3305, __extension__ __PRETTY_FUNCTION__)); | ||||
3306 | |||||
3307 | bool IsOR = Opcode == ISD::OR; | ||||
3308 | |||||
3309 | SDValue LHS = Val->getOperand(0); | ||||
3310 | bool CanNegateL; | ||||
3311 | bool MustBeFirstL; | ||||
3312 | bool ValidL = canEmitConjunction(LHS, CanNegateL, MustBeFirstL, IsOR); | ||||
3313 | assert(ValidL && "Valid conjunction/disjunction tree")(static_cast <bool> (ValidL && "Valid conjunction/disjunction tree" ) ? void (0) : __assert_fail ("ValidL && \"Valid conjunction/disjunction tree\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 3313, __extension__ __PRETTY_FUNCTION__)); | ||||
3314 | (void)ValidL; | ||||
3315 | |||||
3316 | SDValue RHS = Val->getOperand(1); | ||||
3317 | bool CanNegateR; | ||||
3318 | bool MustBeFirstR; | ||||
3319 | bool ValidR = canEmitConjunction(RHS, CanNegateR, MustBeFirstR, IsOR); | ||||
3320 | assert(ValidR && "Valid conjunction/disjunction tree")(static_cast <bool> (ValidR && "Valid conjunction/disjunction tree" ) ? void (0) : __assert_fail ("ValidR && \"Valid conjunction/disjunction tree\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 3320, __extension__ __PRETTY_FUNCTION__)); | ||||
3321 | (void)ValidR; | ||||
3322 | |||||
3323 | // Swap sub-tree that must come first to the right side. | ||||
3324 | if (MustBeFirstL) { | ||||
3325 | assert(!MustBeFirstR && "Valid conjunction/disjunction tree")(static_cast <bool> (!MustBeFirstR && "Valid conjunction/disjunction tree" ) ? void (0) : __assert_fail ("!MustBeFirstR && \"Valid conjunction/disjunction tree\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 3325, __extension__ __PRETTY_FUNCTION__)); | ||||
3326 | std::swap(LHS, RHS); | ||||
3327 | std::swap(CanNegateL, CanNegateR); | ||||
3328 | std::swap(MustBeFirstL, MustBeFirstR); | ||||
3329 | } | ||||
3330 | |||||
3331 | bool NegateR; | ||||
3332 | bool NegateAfterR; | ||||
3333 | bool NegateL; | ||||
3334 | bool NegateAfterAll; | ||||
3335 | if (Opcode == ISD::OR) { | ||||
3336 | // Swap the sub-tree that we can negate naturally to the left. | ||||
3337 | if (!CanNegateL) { | ||||
3338 | assert(CanNegateR && "at least one side must be negatable")(static_cast <bool> (CanNegateR && "at least one side must be negatable" ) ? void (0) : __assert_fail ("CanNegateR && \"at least one side must be negatable\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 3338, __extension__ __PRETTY_FUNCTION__)); | ||||
3339 | assert(!MustBeFirstR && "invalid conjunction/disjunction tree")(static_cast <bool> (!MustBeFirstR && "invalid conjunction/disjunction tree" ) ? void (0) : __assert_fail ("!MustBeFirstR && \"invalid conjunction/disjunction tree\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 3339, __extension__ __PRETTY_FUNCTION__)); | ||||
3340 | assert(!Negate)(static_cast <bool> (!Negate) ? void (0) : __assert_fail ("!Negate", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3340, __extension__ __PRETTY_FUNCTION__)); | ||||
3341 | std::swap(LHS, RHS); | ||||
3342 | NegateR = false; | ||||
3343 | NegateAfterR = true; | ||||
3344 | } else { | ||||
3345 | // Negate the left sub-tree if possible, otherwise negate the result. | ||||
3346 | NegateR = CanNegateR; | ||||
3347 | NegateAfterR = !CanNegateR; | ||||
3348 | } | ||||
3349 | NegateL = true; | ||||
3350 | NegateAfterAll = !Negate; | ||||
3351 | } else { | ||||
3352 | assert(Opcode == ISD::AND && "Valid conjunction/disjunction tree")(static_cast <bool> (Opcode == ISD::AND && "Valid conjunction/disjunction tree" ) ? void (0) : __assert_fail ("Opcode == ISD::AND && \"Valid conjunction/disjunction tree\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 3352, __extension__ __PRETTY_FUNCTION__)); | ||||
3353 | assert(!Negate && "Valid conjunction/disjunction tree")(static_cast <bool> (!Negate && "Valid conjunction/disjunction tree" ) ? void (0) : __assert_fail ("!Negate && \"Valid conjunction/disjunction tree\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 3353, __extension__ __PRETTY_FUNCTION__)); | ||||
3354 | |||||
3355 | NegateL = false; | ||||
3356 | NegateR = false; | ||||
3357 | NegateAfterR = false; | ||||
3358 | NegateAfterAll = false; | ||||
3359 | } | ||||
3360 | |||||
3361 | // Emit sub-trees. | ||||
3362 | AArch64CC::CondCode RHSCC; | ||||
3363 | SDValue CmpR = emitConjunctionRec(DAG, RHS, RHSCC, NegateR, CCOp, Predicate); | ||||
3364 | if (NegateAfterR) | ||||
3365 | RHSCC = AArch64CC::getInvertedCondCode(RHSCC); | ||||
3366 | SDValue CmpL = emitConjunctionRec(DAG, LHS, OutCC, NegateL, CmpR, RHSCC); | ||||
3367 | if (NegateAfterAll) | ||||
3368 | OutCC = AArch64CC::getInvertedCondCode(OutCC); | ||||
3369 | return CmpL; | ||||
3370 | } | ||||
3371 | |||||
3372 | /// Emit expression as a conjunction (a series of CCMP/CFCMP ops). | ||||
3373 | /// In some cases this is even possible with OR operations in the expression. | ||||
3374 | /// See \ref AArch64CCMP. | ||||
3375 | /// \see emitConjunctionRec(). | ||||
3376 | static SDValue emitConjunction(SelectionDAG &DAG, SDValue Val, | ||||
3377 | AArch64CC::CondCode &OutCC) { | ||||
3378 | bool DummyCanNegate; | ||||
3379 | bool DummyMustBeFirst; | ||||
3380 | if (!canEmitConjunction(Val, DummyCanNegate, DummyMustBeFirst, false)) | ||||
3381 | return SDValue(); | ||||
3382 | |||||
3383 | return emitConjunctionRec(DAG, Val, OutCC, false, SDValue(), AArch64CC::AL); | ||||
3384 | } | ||||
3385 | |||||
3386 | /// @} | ||||
3387 | |||||
3388 | /// Returns how profitable it is to fold a comparison's operand's shift and/or | ||||
3389 | /// extension operations. | ||||
3390 | static unsigned getCmpOperandFoldingProfit(SDValue Op) { | ||||
3391 | auto isSupportedExtend = [&](SDValue V) { | ||||
3392 | if (V.getOpcode() == ISD::SIGN_EXTEND_INREG) | ||||
3393 | return true; | ||||
3394 | |||||
3395 | if (V.getOpcode() == ISD::AND) | ||||
3396 | if (ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(V.getOperand(1))) { | ||||
3397 | uint64_t Mask = MaskCst->getZExtValue(); | ||||
3398 | return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF); | ||||
3399 | } | ||||
3400 | |||||
3401 | return false; | ||||
3402 | }; | ||||
3403 | |||||
3404 | if (!Op.hasOneUse()) | ||||
3405 | return 0; | ||||
3406 | |||||
3407 | if (isSupportedExtend(Op)) | ||||
3408 | return 1; | ||||
3409 | |||||
3410 | unsigned Opc = Op.getOpcode(); | ||||
3411 | if (Opc == ISD::SHL || Opc == ISD::SRL || Opc == ISD::SRA) | ||||
3412 | if (ConstantSDNode *ShiftCst = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { | ||||
3413 | uint64_t Shift = ShiftCst->getZExtValue(); | ||||
3414 | if (isSupportedExtend(Op.getOperand(0))) | ||||
3415 | return (Shift <= 4) ? 2 : 1; | ||||
3416 | EVT VT = Op.getValueType(); | ||||
3417 | if ((VT == MVT::i32 && Shift <= 31) || (VT == MVT::i64 && Shift <= 63)) | ||||
3418 | return 1; | ||||
3419 | } | ||||
3420 | |||||
3421 | return 0; | ||||
3422 | } | ||||
3423 | |||||
3424 | static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, | ||||
3425 | SDValue &AArch64cc, SelectionDAG &DAG, | ||||
3426 | const SDLoc &dl) { | ||||
3427 | if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { | ||||
3428 | EVT VT = RHS.getValueType(); | ||||
3429 | uint64_t C = RHSC->getZExtValue(); | ||||
3430 | if (!isLegalArithImmed(C)) { | ||||
3431 | // Constant does not fit, try adjusting it by one? | ||||
3432 | switch (CC) { | ||||
3433 | default: | ||||
3434 | break; | ||||
3435 | case ISD::SETLT: | ||||
3436 | case ISD::SETGE: | ||||
3437 | if ((VT == MVT::i32 && C != 0x80000000 && | ||||
3438 | isLegalArithImmed((uint32_t)(C - 1))) || | ||||
3439 | (VT == MVT::i64 && C != 0x80000000ULL && | ||||
3440 | isLegalArithImmed(C - 1ULL))) { | ||||
3441 | CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; | ||||
3442 | C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1; | ||||
3443 | RHS = DAG.getConstant(C, dl, VT); | ||||
3444 | } | ||||
3445 | break; | ||||
3446 | case ISD::SETULT: | ||||
3447 | case ISD::SETUGE: | ||||
3448 | if ((VT == MVT::i32 && C != 0 && | ||||
3449 | isLegalArithImmed((uint32_t)(C - 1))) || | ||||
3450 | (VT == MVT::i64 && C != 0ULL && isLegalArithImmed(C - 1ULL))) { | ||||
3451 | CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; | ||||
3452 | C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1; | ||||
3453 | RHS = DAG.getConstant(C, dl, VT); | ||||
3454 | } | ||||
3455 | break; | ||||
3456 | case ISD::SETLE: | ||||
3457 | case ISD::SETGT: | ||||
3458 | if ((VT == MVT::i32 && C != INT32_MAX(2147483647) && | ||||
3459 | isLegalArithImmed((uint32_t)(C + 1))) || | ||||
3460 | (VT == MVT::i64 && C != INT64_MAX(9223372036854775807L) && | ||||
3461 | isLegalArithImmed(C + 1ULL))) { | ||||
3462 | CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; | ||||
3463 | C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1; | ||||
3464 | RHS = DAG.getConstant(C, dl, VT); | ||||
3465 | } | ||||
3466 | break; | ||||
3467 | case ISD::SETULE: | ||||
3468 | case ISD::SETUGT: | ||||
3469 | if ((VT == MVT::i32 && C != UINT32_MAX(4294967295U) && | ||||
3470 | isLegalArithImmed((uint32_t)(C + 1))) || | ||||
3471 | (VT == MVT::i64 && C != UINT64_MAX(18446744073709551615UL) && | ||||
3472 | isLegalArithImmed(C + 1ULL))) { | ||||
3473 | CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; | ||||
3474 | C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1; | ||||
3475 | RHS = DAG.getConstant(C, dl, VT); | ||||
3476 | } | ||||
3477 | break; | ||||
3478 | } | ||||
3479 | } | ||||
3480 | } | ||||
3481 | |||||
3482 | // Comparisons are canonicalized so that the RHS operand is simpler than the | ||||
3483 | // LHS one, the extreme case being when RHS is an immediate. However, AArch64 | ||||
3484 | // can fold some shift+extend operations on the RHS operand, so swap the | ||||
3485 | // operands if that can be done. | ||||
3486 | // | ||||
3487 | // For example: | ||||
3488 | // lsl w13, w11, #1 | ||||
3489 | // cmp w13, w12 | ||||
3490 | // can be turned into: | ||||
3491 | // cmp w12, w11, lsl #1 | ||||
3492 | if (!isa<ConstantSDNode>(RHS) || | ||||
3493 | !isLegalArithImmed(cast<ConstantSDNode>(RHS)->getZExtValue())) { | ||||
3494 | SDValue TheLHS = isCMN(LHS, CC) ? LHS.getOperand(1) : LHS; | ||||
3495 | |||||
3496 | if (getCmpOperandFoldingProfit(TheLHS) > getCmpOperandFoldingProfit(RHS)) { | ||||
3497 | std::swap(LHS, RHS); | ||||
3498 | CC = ISD::getSetCCSwappedOperands(CC); | ||||
3499 | } | ||||
3500 | } | ||||
3501 | |||||
3502 | SDValue Cmp; | ||||
3503 | AArch64CC::CondCode AArch64CC; | ||||
3504 | if ((CC == ISD::SETEQ || CC == ISD::SETNE) && isa<ConstantSDNode>(RHS)) { | ||||
3505 | const ConstantSDNode *RHSC = cast<ConstantSDNode>(RHS); | ||||
3506 | |||||
3507 | // The imm operand of ADDS is an unsigned immediate, in the range 0 to 4095. | ||||
3508 | // For the i8 operand, the largest immediate is 255, so this can be easily | ||||
3509 | // encoded in the compare instruction. For the i16 operand, however, the | ||||
3510 | // largest immediate cannot be encoded in the compare. | ||||
3511 | // Therefore, use a sign extending load and cmn to avoid materializing the | ||||
3512 | // -1 constant. For example, | ||||
3513 | // movz w1, #65535 | ||||
3514 | // ldrh w0, [x0, #0] | ||||
3515 | // cmp w0, w1 | ||||
3516 | // > | ||||
3517 | // ldrsh w0, [x0, #0] | ||||
3518 | // cmn w0, #1 | ||||
3519 | // Fundamental, we're relying on the property that (zext LHS) == (zext RHS) | ||||
3520 | // if and only if (sext LHS) == (sext RHS). The checks are in place to | ||||
3521 | // ensure both the LHS and RHS are truly zero extended and to make sure the | ||||
3522 | // transformation is profitable. | ||||
3523 | if ((RHSC->getZExtValue() >> 16 == 0) && isa<LoadSDNode>(LHS) && | ||||
3524 | cast<LoadSDNode>(LHS)->getExtensionType() == ISD::ZEXTLOAD && | ||||
3525 | cast<LoadSDNode>(LHS)->getMemoryVT() == MVT::i16 && | ||||
3526 | LHS.getNode()->hasNUsesOfValue(1, 0)) { | ||||
3527 | int16_t ValueofRHS = cast<ConstantSDNode>(RHS)->getZExtValue(); | ||||
3528 | if (ValueofRHS < 0 && isLegalArithImmed(-ValueofRHS)) { | ||||
3529 | SDValue SExt = | ||||
3530 | DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, LHS.getValueType(), LHS, | ||||
3531 | DAG.getValueType(MVT::i16)); | ||||
3532 | Cmp = emitComparison(SExt, DAG.getConstant(ValueofRHS, dl, | ||||
3533 | RHS.getValueType()), | ||||
3534 | CC, dl, DAG); | ||||
3535 | AArch64CC = changeIntCCToAArch64CC(CC); | ||||
3536 | } | ||||
3537 | } | ||||
3538 | |||||
3539 | if (!Cmp && (RHSC->isZero() || RHSC->isOne())) { | ||||
3540 | if ((Cmp = emitConjunction(DAG, LHS, AArch64CC))) { | ||||
3541 | if ((CC == ISD::SETNE) ^ RHSC->isZero()) | ||||
3542 | AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC); | ||||
3543 | } | ||||
3544 | } | ||||
3545 | } | ||||
3546 | |||||
3547 | if (!Cmp) { | ||||
3548 | Cmp = emitComparison(LHS, RHS, CC, dl, DAG); | ||||
3549 | AArch64CC = changeIntCCToAArch64CC(CC); | ||||
3550 | } | ||||
3551 | AArch64cc = DAG.getConstant(AArch64CC, dl, MVT_CC); | ||||
3552 | return Cmp; | ||||
3553 | } | ||||
3554 | |||||
3555 | static std::pair<SDValue, SDValue> | ||||
3556 | getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) { | ||||
3557 | assert((Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::i64) &&(static_cast <bool> ((Op.getValueType() == MVT::i32 || Op .getValueType() == MVT::i64) && "Unsupported value type" ) ? void (0) : __assert_fail ("(Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::i64) && \"Unsupported value type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 3558, __extension__ __PRETTY_FUNCTION__)) | ||||
3558 | "Unsupported value type")(static_cast <bool> ((Op.getValueType() == MVT::i32 || Op .getValueType() == MVT::i64) && "Unsupported value type" ) ? void (0) : __assert_fail ("(Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::i64) && \"Unsupported value type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 3558, __extension__ __PRETTY_FUNCTION__)); | ||||
3559 | SDValue Value, Overflow; | ||||
3560 | SDLoc DL(Op); | ||||
3561 | SDValue LHS = Op.getOperand(0); | ||||
3562 | SDValue RHS = Op.getOperand(1); | ||||
3563 | unsigned Opc = 0; | ||||
3564 | switch (Op.getOpcode()) { | ||||
3565 | default: | ||||
3566 | llvm_unreachable("Unknown overflow instruction!")::llvm::llvm_unreachable_internal("Unknown overflow instruction!" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 3566); | ||||
3567 | case ISD::SADDO: | ||||
3568 | Opc = AArch64ISD::ADDS; | ||||
3569 | CC = AArch64CC::VS; | ||||
3570 | break; | ||||
3571 | case ISD::UADDO: | ||||
3572 | Opc = AArch64ISD::ADDS; | ||||
3573 | CC = AArch64CC::HS; | ||||
3574 | break; | ||||
3575 | case ISD::SSUBO: | ||||
3576 | Opc = AArch64ISD::SUBS; | ||||
3577 | CC = AArch64CC::VS; | ||||
3578 | break; | ||||
3579 | case ISD::USUBO: | ||||
3580 | Opc = AArch64ISD::SUBS; | ||||
3581 | CC = AArch64CC::LO; | ||||
3582 | break; | ||||
3583 | // Multiply needs a little bit extra work. | ||||
3584 | case ISD::SMULO: | ||||
3585 | case ISD::UMULO: { | ||||
3586 | CC = AArch64CC::NE; | ||||
3587 | bool IsSigned = Op.getOpcode() == ISD::SMULO; | ||||
3588 | if (Op.getValueType() == MVT::i32) { | ||||
3589 | // Extend to 64-bits, then perform a 64-bit multiply. | ||||
3590 | unsigned ExtendOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; | ||||
3591 | LHS = DAG.getNode(ExtendOpc, DL, MVT::i64, LHS); | ||||
3592 | RHS = DAG.getNode(ExtendOpc, DL, MVT::i64, RHS); | ||||
3593 | SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS); | ||||
3594 | Value = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul); | ||||
3595 | |||||
3596 | // Check that the result fits into a 32-bit integer. | ||||
3597 | SDVTList VTs = DAG.getVTList(MVT::i64, MVT_CC); | ||||
3598 | if (IsSigned) { | ||||
3599 | // cmp xreg, wreg, sxtw | ||||
3600 | SDValue SExtMul = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Value); | ||||
3601 | Overflow = | ||||
3602 | DAG.getNode(AArch64ISD::SUBS, DL, VTs, Mul, SExtMul).getValue(1); | ||||
3603 | } else { | ||||
3604 | // tst xreg, #0xffffffff00000000 | ||||
3605 | SDValue UpperBits = DAG.getConstant(0xFFFFFFFF00000000, DL, MVT::i64); | ||||
3606 | Overflow = | ||||
3607 | DAG.getNode(AArch64ISD::ANDS, DL, VTs, Mul, UpperBits).getValue(1); | ||||
3608 | } | ||||
3609 | break; | ||||
3610 | } | ||||
3611 | assert(Op.getValueType() == MVT::i64 && "Expected an i64 value type")(static_cast <bool> (Op.getValueType() == MVT::i64 && "Expected an i64 value type") ? void (0) : __assert_fail ("Op.getValueType() == MVT::i64 && \"Expected an i64 value type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 3611, __extension__ __PRETTY_FUNCTION__)); | ||||
3612 | // For the 64 bit multiply | ||||
3613 | Value = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS); | ||||
3614 | if (IsSigned) { | ||||
3615 | SDValue UpperBits = DAG.getNode(ISD::MULHS, DL, MVT::i64, LHS, RHS); | ||||
3616 | SDValue LowerBits = DAG.getNode(ISD::SRA, DL, MVT::i64, Value, | ||||
3617 | DAG.getConstant(63, DL, MVT::i64)); | ||||
3618 | // It is important that LowerBits is last, otherwise the arithmetic | ||||
3619 | // shift will not be folded into the compare (SUBS). | ||||
3620 | SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32); | ||||
3621 | Overflow = DAG.getNode(AArch64ISD::SUBS, DL, VTs, UpperBits, LowerBits) | ||||
3622 | .getValue(1); | ||||
3623 | } else { | ||||
3624 | SDValue UpperBits = DAG.getNode(ISD::MULHU, DL, MVT::i64, LHS, RHS); | ||||
3625 | SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32); | ||||
3626 | Overflow = | ||||
3627 | DAG.getNode(AArch64ISD::SUBS, DL, VTs, | ||||
3628 | DAG.getConstant(0, DL, MVT::i64), | ||||
3629 | UpperBits).getValue(1); | ||||
3630 | } | ||||
3631 | break; | ||||
3632 | } | ||||
3633 | } // switch (...) | ||||
3634 | |||||
3635 | if (Opc) { | ||||
3636 | SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32); | ||||
3637 | |||||
3638 | // Emit the AArch64 operation with overflow check. | ||||
3639 | Value = DAG.getNode(Opc, DL, VTs, LHS, RHS); | ||||
3640 | Overflow = Value.getValue(1); | ||||
3641 | } | ||||
3642 | return std::make_pair(Value, Overflow); | ||||
3643 | } | ||||
3644 | |||||
3645 | SDValue AArch64TargetLowering::LowerXOR(SDValue Op, SelectionDAG &DAG) const { | ||||
3646 | if (useSVEForFixedLengthVectorVT(Op.getValueType(), | ||||
3647 | Subtarget->forceStreamingCompatibleSVE())) | ||||
3648 | return LowerToScalableOp(Op, DAG); | ||||
3649 | |||||
3650 | SDValue Sel = Op.getOperand(0); | ||||
3651 | SDValue Other = Op.getOperand(1); | ||||
3652 | SDLoc dl(Sel); | ||||
3653 | |||||
3654 | // If the operand is an overflow checking operation, invert the condition | ||||
3655 | // code and kill the Not operation. I.e., transform: | ||||
3656 | // (xor (overflow_op_bool, 1)) | ||||
3657 | // --> | ||||
3658 | // (csel 1, 0, invert(cc), overflow_op_bool) | ||||
3659 | // ... which later gets transformed to just a cset instruction with an | ||||
3660 | // inverted condition code, rather than a cset + eor sequence. | ||||
3661 | if (isOneConstant(Other) && ISD::isOverflowIntrOpRes(Sel)) { | ||||
3662 | // Only lower legal XALUO ops. | ||||
3663 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Sel->getValueType(0))) | ||||
3664 | return SDValue(); | ||||
3665 | |||||
3666 | SDValue TVal = DAG.getConstant(1, dl, MVT::i32); | ||||
3667 | SDValue FVal = DAG.getConstant(0, dl, MVT::i32); | ||||
3668 | AArch64CC::CondCode CC; | ||||
3669 | SDValue Value, Overflow; | ||||
3670 | std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Sel.getValue(0), DAG); | ||||
3671 | SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32); | ||||
3672 | return DAG.getNode(AArch64ISD::CSEL, dl, Op.getValueType(), TVal, FVal, | ||||
3673 | CCVal, Overflow); | ||||
3674 | } | ||||
3675 | // If neither operand is a SELECT_CC, give up. | ||||
3676 | if (Sel.getOpcode() != ISD::SELECT_CC) | ||||
3677 | std::swap(Sel, Other); | ||||
3678 | if (Sel.getOpcode() != ISD::SELECT_CC) | ||||
3679 | return Op; | ||||
3680 | |||||
3681 | // The folding we want to perform is: | ||||
3682 | // (xor x, (select_cc a, b, cc, 0, -1) ) | ||||
3683 | // --> | ||||
3684 | // (csel x, (xor x, -1), cc ...) | ||||
3685 | // | ||||
3686 | // The latter will get matched to a CSINV instruction. | ||||
3687 | |||||
3688 | ISD::CondCode CC = cast<CondCodeSDNode>(Sel.getOperand(4))->get(); | ||||
3689 | SDValue LHS = Sel.getOperand(0); | ||||
3690 | SDValue RHS = Sel.getOperand(1); | ||||
3691 | SDValue TVal = Sel.getOperand(2); | ||||
3692 | SDValue FVal = Sel.getOperand(3); | ||||
3693 | |||||
3694 | // FIXME: This could be generalized to non-integer comparisons. | ||||
3695 | if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64) | ||||
3696 | return Op; | ||||
3697 | |||||
3698 | ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal); | ||||
3699 | ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal); | ||||
3700 | |||||
3701 | // The values aren't constants, this isn't the pattern we're looking for. | ||||
3702 | if (!CFVal || !CTVal) | ||||
3703 | return Op; | ||||
3704 | |||||
3705 | // We can commute the SELECT_CC by inverting the condition. This | ||||
3706 | // might be needed to make this fit into a CSINV pattern. | ||||
3707 | if (CTVal->isAllOnes() && CFVal->isZero()) { | ||||
3708 | std::swap(TVal, FVal); | ||||
3709 | std::swap(CTVal, CFVal); | ||||
3710 | CC = ISD::getSetCCInverse(CC, LHS.getValueType()); | ||||
3711 | } | ||||
3712 | |||||
3713 | // If the constants line up, perform the transform! | ||||
3714 | if (CTVal->isZero() && CFVal->isAllOnes()) { | ||||
3715 | SDValue CCVal; | ||||
3716 | SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl); | ||||
3717 | |||||
3718 | FVal = Other; | ||||
3719 | TVal = DAG.getNode(ISD::XOR, dl, Other.getValueType(), Other, | ||||
3720 | DAG.getConstant(-1ULL, dl, Other.getValueType())); | ||||
3721 | |||||
3722 | return DAG.getNode(AArch64ISD::CSEL, dl, Sel.getValueType(), FVal, TVal, | ||||
3723 | CCVal, Cmp); | ||||
3724 | } | ||||
3725 | |||||
3726 | return Op; | ||||
3727 | } | ||||
3728 | |||||
3729 | // If Invert is false, sets 'C' bit of NZCV to 0 if value is 0, else sets 'C' | ||||
3730 | // bit to 1. If Invert is true, sets 'C' bit of NZCV to 1 if value is 0, else | ||||
3731 | // sets 'C' bit to 0. | ||||
3732 | static SDValue valueToCarryFlag(SDValue Value, SelectionDAG &DAG, bool Invert) { | ||||
3733 | SDLoc DL(Value); | ||||
3734 | EVT VT = Value.getValueType(); | ||||
3735 | SDValue Op0 = Invert ? DAG.getConstant(0, DL, VT) : Value; | ||||
3736 | SDValue Op1 = Invert ? Value : DAG.getConstant(1, DL, VT); | ||||
3737 | SDValue Cmp = | ||||
3738 | DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::Glue), Op0, Op1); | ||||
3739 | return Cmp.getValue(1); | ||||
3740 | } | ||||
3741 | |||||
3742 | // If Invert is false, value is 1 if 'C' bit of NZCV is 1, else 0. | ||||
3743 | // If Invert is true, value is 0 if 'C' bit of NZCV is 1, else 1. | ||||
3744 | static SDValue carryFlagToValue(SDValue Flag, EVT VT, SelectionDAG &DAG, | ||||
3745 | bool Invert) { | ||||
3746 | assert(Flag.getResNo() == 1)(static_cast <bool> (Flag.getResNo() == 1) ? void (0) : __assert_fail ("Flag.getResNo() == 1", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3746, __extension__ __PRETTY_FUNCTION__)); | ||||
3747 | SDLoc DL(Flag); | ||||
3748 | SDValue Zero = DAG.getConstant(0, DL, VT); | ||||
3749 | SDValue One = DAG.getConstant(1, DL, VT); | ||||
3750 | unsigned Cond = Invert ? AArch64CC::LO : AArch64CC::HS; | ||||
3751 | SDValue CC = DAG.getConstant(Cond, DL, MVT::i32); | ||||
3752 | return DAG.getNode(AArch64ISD::CSEL, DL, VT, One, Zero, CC, Flag); | ||||
3753 | } | ||||
3754 | |||||
3755 | // Value is 1 if 'V' bit of NZCV is 1, else 0 | ||||
3756 | static SDValue overflowFlagToValue(SDValue Flag, EVT VT, SelectionDAG &DAG) { | ||||
3757 | assert(Flag.getResNo() == 1)(static_cast <bool> (Flag.getResNo() == 1) ? void (0) : __assert_fail ("Flag.getResNo() == 1", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 3757, __extension__ __PRETTY_FUNCTION__)); | ||||
3758 | SDLoc DL(Flag); | ||||
3759 | SDValue Zero = DAG.getConstant(0, DL, VT); | ||||
3760 | SDValue One = DAG.getConstant(1, DL, VT); | ||||
3761 | SDValue CC = DAG.getConstant(AArch64CC::VS, DL, MVT::i32); | ||||
3762 | return DAG.getNode(AArch64ISD::CSEL, DL, VT, One, Zero, CC, Flag); | ||||
3763 | } | ||||
3764 | |||||
3765 | // This lowering is inefficient, but it will get cleaned up by | ||||
3766 | // `foldOverflowCheck` | ||||
3767 | static SDValue lowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG, unsigned Opcode, | ||||
3768 | bool IsSigned) { | ||||
3769 | EVT VT0 = Op.getValue(0).getValueType(); | ||||
3770 | EVT VT1 = Op.getValue(1).getValueType(); | ||||
3771 | |||||
3772 | if (VT0 != MVT::i32 && VT0 != MVT::i64) | ||||
3773 | return SDValue(); | ||||
3774 | |||||
3775 | bool InvertCarry = Opcode == AArch64ISD::SBCS; | ||||
3776 | SDValue OpLHS = Op.getOperand(0); | ||||
3777 | SDValue OpRHS = Op.getOperand(1); | ||||
3778 | SDValue OpCarryIn = valueToCarryFlag(Op.getOperand(2), DAG, InvertCarry); | ||||
3779 | |||||
3780 | SDLoc DL(Op); | ||||
3781 | SDVTList VTs = DAG.getVTList(VT0, VT1); | ||||
3782 | |||||
3783 | SDValue Sum = DAG.getNode(Opcode, DL, DAG.getVTList(VT0, MVT::Glue), OpLHS, | ||||
3784 | OpRHS, OpCarryIn); | ||||
3785 | |||||
3786 | SDValue OutFlag = | ||||
3787 | IsSigned ? overflowFlagToValue(Sum.getValue(1), VT1, DAG) | ||||
3788 | : carryFlagToValue(Sum.getValue(1), VT1, DAG, InvertCarry); | ||||
3789 | |||||
3790 | return DAG.getNode(ISD::MERGE_VALUES, DL, VTs, Sum, OutFlag); | ||||
3791 | } | ||||
3792 | |||||
3793 | static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { | ||||
3794 | // Let legalize expand this if it isn't a legal type yet. | ||||
3795 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) | ||||
3796 | return SDValue(); | ||||
3797 | |||||
3798 | SDLoc dl(Op); | ||||
3799 | AArch64CC::CondCode CC; | ||||
3800 | // The actual operation that sets the overflow or carry flag. | ||||
3801 | SDValue Value, Overflow; | ||||
3802 | std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Op, DAG); | ||||
3803 | |||||
3804 | // We use 0 and 1 as false and true values. | ||||
3805 | SDValue TVal = DAG.getConstant(1, dl, MVT::i32); | ||||
3806 | SDValue FVal = DAG.getConstant(0, dl, MVT::i32); | ||||
3807 | |||||
3808 | // We use an inverted condition, because the conditional select is inverted | ||||
3809 | // too. This will allow it to be selected to a single instruction: | ||||
3810 | // CSINC Wd, WZR, WZR, invert(cond). | ||||
3811 | SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32); | ||||
3812 | Overflow = DAG.getNode(AArch64ISD::CSEL, dl, MVT::i32, FVal, TVal, | ||||
3813 | CCVal, Overflow); | ||||
3814 | |||||
3815 | SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); | ||||
3816 | return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); | ||||
3817 | } | ||||
3818 | |||||
3819 | // Prefetch operands are: | ||||
3820 | // 1: Address to prefetch | ||||
3821 | // 2: bool isWrite | ||||
3822 | // 3: int locality (0 = no locality ... 3 = extreme locality) | ||||
3823 | // 4: bool isDataCache | ||||
3824 | static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) { | ||||
3825 | SDLoc DL(Op); | ||||
3826 | unsigned IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); | ||||
3827 | unsigned Locality = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); | ||||
3828 | unsigned IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); | ||||
3829 | |||||
3830 | bool IsStream = !Locality; | ||||
3831 | // When the locality number is set | ||||
3832 | if (Locality) { | ||||
3833 | // The front-end should have filtered out the out-of-range values | ||||
3834 | assert(Locality <= 3 && "Prefetch locality out-of-range")(static_cast <bool> (Locality <= 3 && "Prefetch locality out-of-range" ) ? void (0) : __assert_fail ("Locality <= 3 && \"Prefetch locality out-of-range\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 3834, __extension__ __PRETTY_FUNCTION__)); | ||||
3835 | // The locality degree is the opposite of the cache speed. | ||||
3836 | // Put the number the other way around. | ||||
3837 | // The encoding starts at 0 for level 1 | ||||
3838 | Locality = 3 - Locality; | ||||
3839 | } | ||||
3840 | |||||
3841 | // built the mask value encoding the expected behavior. | ||||
3842 | unsigned PrfOp = (IsWrite << 4) | // Load/Store bit | ||||
3843 | (!IsData << 3) | // IsDataCache bit | ||||
3844 | (Locality << 1) | // Cache level bits | ||||
3845 | (unsigned)IsStream; // Stream bit | ||||
3846 | return DAG.getNode(AArch64ISD::PREFETCH, DL, MVT::Other, Op.getOperand(0), | ||||
3847 | DAG.getTargetConstant(PrfOp, DL, MVT::i32), | ||||
3848 | Op.getOperand(1)); | ||||
3849 | } | ||||
3850 | |||||
3851 | SDValue AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, | ||||
3852 | SelectionDAG &DAG) const { | ||||
3853 | EVT VT = Op.getValueType(); | ||||
3854 | if (VT.isScalableVector()) | ||||
3855 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FP_EXTEND_MERGE_PASSTHRU); | ||||
3856 | |||||
3857 | if (useSVEForFixedLengthVectorVT(VT)) | ||||
3858 | return LowerFixedLengthFPExtendToSVE(Op, DAG); | ||||
3859 | |||||
3860 | assert(Op.getValueType() == MVT::f128 && "Unexpected lowering")(static_cast <bool> (Op.getValueType() == MVT::f128 && "Unexpected lowering") ? void (0) : __assert_fail ("Op.getValueType() == MVT::f128 && \"Unexpected lowering\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 3860, __extension__ __PRETTY_FUNCTION__)); | ||||
3861 | return SDValue(); | ||||
3862 | } | ||||
3863 | |||||
3864 | SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op, | ||||
3865 | SelectionDAG &DAG) const { | ||||
3866 | if (Op.getValueType().isScalableVector()) | ||||
3867 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FP_ROUND_MERGE_PASSTHRU); | ||||
3868 | |||||
3869 | bool IsStrict = Op->isStrictFPOpcode(); | ||||
3870 | SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); | ||||
3871 | EVT SrcVT = SrcVal.getValueType(); | ||||
3872 | |||||
3873 | if (useSVEForFixedLengthVectorVT(SrcVT, | ||||
3874 | Subtarget->forceStreamingCompatibleSVE())) | ||||
3875 | return LowerFixedLengthFPRoundToSVE(Op, DAG); | ||||
3876 | |||||
3877 | if (SrcVT != MVT::f128) { | ||||
3878 | // Expand cases where the input is a vector bigger than NEON. | ||||
3879 | if (useSVEForFixedLengthVectorVT(SrcVT)) | ||||
3880 | return SDValue(); | ||||
3881 | |||||
3882 | // It's legal except when f128 is involved | ||||
3883 | return Op; | ||||
3884 | } | ||||
3885 | |||||
3886 | return SDValue(); | ||||
3887 | } | ||||
3888 | |||||
3889 | SDValue AArch64TargetLowering::LowerVectorFP_TO_INT(SDValue Op, | ||||
3890 | SelectionDAG &DAG) const { | ||||
3891 | // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp. | ||||
3892 | // Any additional optimization in this function should be recorded | ||||
3893 | // in the cost tables. | ||||
3894 | bool IsStrict = Op->isStrictFPOpcode(); | ||||
3895 | EVT InVT = Op.getOperand(IsStrict ? 1 : 0).getValueType(); | ||||
3896 | EVT VT = Op.getValueType(); | ||||
3897 | |||||
3898 | if (VT.isScalableVector()) { | ||||
3899 | unsigned Opcode = Op.getOpcode() == ISD::FP_TO_UINT | ||||
3900 | ? AArch64ISD::FCVTZU_MERGE_PASSTHRU | ||||
3901 | : AArch64ISD::FCVTZS_MERGE_PASSTHRU; | ||||
3902 | return LowerToPredicatedOp(Op, DAG, Opcode); | ||||
3903 | } | ||||
3904 | |||||
3905 | if (useSVEForFixedLengthVectorVT(VT, | ||||
3906 | Subtarget->forceStreamingCompatibleSVE()) || | ||||
3907 | useSVEForFixedLengthVectorVT(InVT, | ||||
3908 | Subtarget->forceStreamingCompatibleSVE())) | ||||
3909 | return LowerFixedLengthFPToIntToSVE(Op, DAG); | ||||
3910 | |||||
3911 | unsigned NumElts = InVT.getVectorNumElements(); | ||||
3912 | |||||
3913 | // f16 conversions are promoted to f32 when full fp16 is not supported. | ||||
3914 | if (InVT.getVectorElementType() == MVT::f16 && | ||||
3915 | !Subtarget->hasFullFP16()) { | ||||
3916 | MVT NewVT = MVT::getVectorVT(MVT::f32, NumElts); | ||||
3917 | SDLoc dl(Op); | ||||
3918 | if (IsStrict) { | ||||
3919 | SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {NewVT, MVT::Other}, | ||||
3920 | {Op.getOperand(0), Op.getOperand(1)}); | ||||
3921 | return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other}, | ||||
3922 | {Ext.getValue(1), Ext.getValue(0)}); | ||||
3923 | } | ||||
3924 | return DAG.getNode( | ||||
3925 | Op.getOpcode(), dl, Op.getValueType(), | ||||
3926 | DAG.getNode(ISD::FP_EXTEND, dl, NewVT, Op.getOperand(0))); | ||||
3927 | } | ||||
3928 | |||||
3929 | uint64_t VTSize = VT.getFixedSizeInBits(); | ||||
3930 | uint64_t InVTSize = InVT.getFixedSizeInBits(); | ||||
3931 | if (VTSize < InVTSize) { | ||||
3932 | SDLoc dl(Op); | ||||
3933 | if (IsStrict) { | ||||
3934 | InVT = InVT.changeVectorElementTypeToInteger(); | ||||
3935 | SDValue Cv = DAG.getNode(Op.getOpcode(), dl, {InVT, MVT::Other}, | ||||
3936 | {Op.getOperand(0), Op.getOperand(1)}); | ||||
3937 | SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, VT, Cv); | ||||
3938 | return DAG.getMergeValues({Trunc, Cv.getValue(1)}, dl); | ||||
3939 | } | ||||
3940 | SDValue Cv = | ||||
3941 | DAG.getNode(Op.getOpcode(), dl, InVT.changeVectorElementTypeToInteger(), | ||||
3942 | Op.getOperand(0)); | ||||
3943 | return DAG.getNode(ISD::TRUNCATE, dl, VT, Cv); | ||||
3944 | } | ||||
3945 | |||||
3946 | if (VTSize > InVTSize) { | ||||
3947 | SDLoc dl(Op); | ||||
3948 | MVT ExtVT = | ||||
3949 | MVT::getVectorVT(MVT::getFloatingPointVT(VT.getScalarSizeInBits()), | ||||
3950 | VT.getVectorNumElements()); | ||||
3951 | if (IsStrict) { | ||||
3952 | SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {ExtVT, MVT::Other}, | ||||
3953 | {Op.getOperand(0), Op.getOperand(1)}); | ||||
3954 | return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other}, | ||||
3955 | {Ext.getValue(1), Ext.getValue(0)}); | ||||
3956 | } | ||||
3957 | SDValue Ext = DAG.getNode(ISD::FP_EXTEND, dl, ExtVT, Op.getOperand(0)); | ||||
3958 | return DAG.getNode(Op.getOpcode(), dl, VT, Ext); | ||||
3959 | } | ||||
3960 | |||||
3961 | // Use a scalar operation for conversions between single-element vectors of | ||||
3962 | // the same size. | ||||
3963 | if (NumElts == 1) { | ||||
3964 | SDLoc dl(Op); | ||||
3965 | SDValue Extract = DAG.getNode( | ||||
3966 | ISD::EXTRACT_VECTOR_ELT, dl, InVT.getScalarType(), | ||||
3967 | Op.getOperand(IsStrict ? 1 : 0), DAG.getConstant(0, dl, MVT::i64)); | ||||
3968 | EVT ScalarVT = VT.getScalarType(); | ||||
3969 | if (IsStrict) | ||||
3970 | return DAG.getNode(Op.getOpcode(), dl, {ScalarVT, MVT::Other}, | ||||
3971 | {Op.getOperand(0), Extract}); | ||||
3972 | return DAG.getNode(Op.getOpcode(), dl, ScalarVT, Extract); | ||||
3973 | } | ||||
3974 | |||||
3975 | // Type changing conversions are illegal. | ||||
3976 | return Op; | ||||
3977 | } | ||||
3978 | |||||
3979 | SDValue AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, | ||||
3980 | SelectionDAG &DAG) const { | ||||
3981 | bool IsStrict = Op->isStrictFPOpcode(); | ||||
3982 | SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); | ||||
3983 | |||||
3984 | if (SrcVal.getValueType().isVector()) | ||||
3985 | return LowerVectorFP_TO_INT(Op, DAG); | ||||
3986 | |||||
3987 | // f16 conversions are promoted to f32 when full fp16 is not supported. | ||||
3988 | if (SrcVal.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) { | ||||
3989 | SDLoc dl(Op); | ||||
3990 | if (IsStrict) { | ||||
3991 | SDValue Ext = | ||||
3992 | DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other}, | ||||
3993 | {Op.getOperand(0), SrcVal}); | ||||
3994 | return DAG.getNode(Op.getOpcode(), dl, {Op.getValueType(), MVT::Other}, | ||||
3995 | {Ext.getValue(1), Ext.getValue(0)}); | ||||
3996 | } | ||||
3997 | return DAG.getNode( | ||||
3998 | Op.getOpcode(), dl, Op.getValueType(), | ||||
3999 | DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, SrcVal)); | ||||
4000 | } | ||||
4001 | |||||
4002 | if (SrcVal.getValueType() != MVT::f128) { | ||||
4003 | // It's legal except when f128 is involved | ||||
4004 | return Op; | ||||
4005 | } | ||||
4006 | |||||
4007 | return SDValue(); | ||||
4008 | } | ||||
4009 | |||||
4010 | SDValue | ||||
4011 | AArch64TargetLowering::LowerVectorFP_TO_INT_SAT(SDValue Op, | ||||
4012 | SelectionDAG &DAG) const { | ||||
4013 | // AArch64 FP-to-int conversions saturate to the destination element size, so | ||||
4014 | // we can lower common saturating conversions to simple instructions. | ||||
4015 | SDValue SrcVal = Op.getOperand(0); | ||||
4016 | EVT SrcVT = SrcVal.getValueType(); | ||||
4017 | EVT DstVT = Op.getValueType(); | ||||
4018 | EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); | ||||
4019 | |||||
4020 | uint64_t SrcElementWidth = SrcVT.getScalarSizeInBits(); | ||||
4021 | uint64_t DstElementWidth = DstVT.getScalarSizeInBits(); | ||||
4022 | uint64_t SatWidth = SatVT.getScalarSizeInBits(); | ||||
4023 | assert(SatWidth <= DstElementWidth &&(static_cast <bool> (SatWidth <= DstElementWidth && "Saturation width cannot exceed result width") ? void (0) : __assert_fail ("SatWidth <= DstElementWidth && \"Saturation width cannot exceed result width\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4024, __extension__ __PRETTY_FUNCTION__)) | ||||
4024 | "Saturation width cannot exceed result width")(static_cast <bool> (SatWidth <= DstElementWidth && "Saturation width cannot exceed result width") ? void (0) : __assert_fail ("SatWidth <= DstElementWidth && \"Saturation width cannot exceed result width\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4024, __extension__ __PRETTY_FUNCTION__)); | ||||
4025 | |||||
4026 | // TODO: Consider lowering to SVE operations, as in LowerVectorFP_TO_INT. | ||||
4027 | // Currently, the `llvm.fpto[su]i.sat.*` intrinsics don't accept scalable | ||||
4028 | // types, so this is hard to reach. | ||||
4029 | if (DstVT.isScalableVector()) | ||||
4030 | return SDValue(); | ||||
4031 | |||||
4032 | EVT SrcElementVT = SrcVT.getVectorElementType(); | ||||
4033 | |||||
4034 | // In the absence of FP16 support, promote f16 to f32 and saturate the result. | ||||
4035 | if (SrcElementVT == MVT::f16 && | ||||
4036 | (!Subtarget->hasFullFP16() || DstElementWidth > 16)) { | ||||
4037 | MVT F32VT = MVT::getVectorVT(MVT::f32, SrcVT.getVectorNumElements()); | ||||
4038 | SrcVal = DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), F32VT, SrcVal); | ||||
4039 | SrcVT = F32VT; | ||||
4040 | SrcElementVT = MVT::f32; | ||||
4041 | SrcElementWidth = 32; | ||||
4042 | } else if (SrcElementVT != MVT::f64 && SrcElementVT != MVT::f32 && | ||||
4043 | SrcElementVT != MVT::f16) | ||||
4044 | return SDValue(); | ||||
4045 | |||||
4046 | SDLoc DL(Op); | ||||
4047 | // Cases that we can emit directly. | ||||
4048 | if (SrcElementWidth == DstElementWidth && SrcElementWidth == SatWidth) | ||||
4049 | return DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal, | ||||
4050 | DAG.getValueType(DstVT.getScalarType())); | ||||
4051 | |||||
4052 | // Otherwise we emit a cvt that saturates to a higher BW, and saturate the | ||||
4053 | // result. This is only valid if the legal cvt is larger than the saturate | ||||
4054 | // width. For double, as we don't have MIN/MAX, it can be simpler to scalarize | ||||
4055 | // (at least until sqxtn is selected). | ||||
4056 | if (SrcElementWidth < SatWidth || SrcElementVT == MVT::f64) | ||||
4057 | return SDValue(); | ||||
4058 | |||||
4059 | EVT IntVT = SrcVT.changeVectorElementTypeToInteger(); | ||||
4060 | SDValue NativeCvt = DAG.getNode(Op.getOpcode(), DL, IntVT, SrcVal, | ||||
4061 | DAG.getValueType(IntVT.getScalarType())); | ||||
4062 | SDValue Sat; | ||||
4063 | if (Op.getOpcode() == ISD::FP_TO_SINT_SAT) { | ||||
4064 | SDValue MinC = DAG.getConstant( | ||||
4065 | APInt::getSignedMaxValue(SatWidth).sext(SrcElementWidth), DL, IntVT); | ||||
4066 | SDValue Min = DAG.getNode(ISD::SMIN, DL, IntVT, NativeCvt, MinC); | ||||
4067 | SDValue MaxC = DAG.getConstant( | ||||
4068 | APInt::getSignedMinValue(SatWidth).sext(SrcElementWidth), DL, IntVT); | ||||
4069 | Sat = DAG.getNode(ISD::SMAX, DL, IntVT, Min, MaxC); | ||||
4070 | } else { | ||||
4071 | SDValue MinC = DAG.getConstant( | ||||
4072 | APInt::getAllOnesValue(SatWidth).zext(SrcElementWidth), DL, IntVT); | ||||
4073 | Sat = DAG.getNode(ISD::UMIN, DL, IntVT, NativeCvt, MinC); | ||||
4074 | } | ||||
4075 | |||||
4076 | return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Sat); | ||||
4077 | } | ||||
4078 | |||||
4079 | SDValue AArch64TargetLowering::LowerFP_TO_INT_SAT(SDValue Op, | ||||
4080 | SelectionDAG &DAG) const { | ||||
4081 | // AArch64 FP-to-int conversions saturate to the destination register size, so | ||||
4082 | // we can lower common saturating conversions to simple instructions. | ||||
4083 | SDValue SrcVal = Op.getOperand(0); | ||||
4084 | EVT SrcVT = SrcVal.getValueType(); | ||||
4085 | |||||
4086 | if (SrcVT.isVector()) | ||||
4087 | return LowerVectorFP_TO_INT_SAT(Op, DAG); | ||||
4088 | |||||
4089 | EVT DstVT = Op.getValueType(); | ||||
4090 | EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); | ||||
4091 | uint64_t SatWidth = SatVT.getScalarSizeInBits(); | ||||
4092 | uint64_t DstWidth = DstVT.getScalarSizeInBits(); | ||||
4093 | assert(SatWidth <= DstWidth && "Saturation width cannot exceed result width")(static_cast <bool> (SatWidth <= DstWidth && "Saturation width cannot exceed result width") ? void (0) : __assert_fail ("SatWidth <= DstWidth && \"Saturation width cannot exceed result width\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4093, __extension__ __PRETTY_FUNCTION__)); | ||||
4094 | |||||
4095 | // In the absence of FP16 support, promote f16 to f32 and saturate the result. | ||||
4096 | if (SrcVT == MVT::f16 && !Subtarget->hasFullFP16()) { | ||||
4097 | SrcVal = DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), MVT::f32, SrcVal); | ||||
4098 | SrcVT = MVT::f32; | ||||
4099 | } else if (SrcVT != MVT::f64 && SrcVT != MVT::f32 && SrcVT != MVT::f16) | ||||
4100 | return SDValue(); | ||||
4101 | |||||
4102 | SDLoc DL(Op); | ||||
4103 | // Cases that we can emit directly. | ||||
4104 | if ((SrcVT == MVT::f64 || SrcVT == MVT::f32 || | ||||
4105 | (SrcVT == MVT::f16 && Subtarget->hasFullFP16())) && | ||||
4106 | DstVT == SatVT && (DstVT == MVT::i64 || DstVT == MVT::i32)) | ||||
4107 | return DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal, | ||||
4108 | DAG.getValueType(DstVT)); | ||||
4109 | |||||
4110 | // Otherwise we emit a cvt that saturates to a higher BW, and saturate the | ||||
4111 | // result. This is only valid if the legal cvt is larger than the saturate | ||||
4112 | // width. | ||||
4113 | if (DstWidth < SatWidth) | ||||
4114 | return SDValue(); | ||||
4115 | |||||
4116 | SDValue NativeCvt = | ||||
4117 | DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal, DAG.getValueType(DstVT)); | ||||
4118 | SDValue Sat; | ||||
4119 | if (Op.getOpcode() == ISD::FP_TO_SINT_SAT) { | ||||
4120 | SDValue MinC = DAG.getConstant( | ||||
4121 | APInt::getSignedMaxValue(SatWidth).sext(DstWidth), DL, DstVT); | ||||
4122 | SDValue Min = DAG.getNode(ISD::SMIN, DL, DstVT, NativeCvt, MinC); | ||||
4123 | SDValue MaxC = DAG.getConstant( | ||||
4124 | APInt::getSignedMinValue(SatWidth).sext(DstWidth), DL, DstVT); | ||||
4125 | Sat = DAG.getNode(ISD::SMAX, DL, DstVT, Min, MaxC); | ||||
4126 | } else { | ||||
4127 | SDValue MinC = DAG.getConstant( | ||||
4128 | APInt::getAllOnesValue(SatWidth).zext(DstWidth), DL, DstVT); | ||||
4129 | Sat = DAG.getNode(ISD::UMIN, DL, DstVT, NativeCvt, MinC); | ||||
4130 | } | ||||
4131 | |||||
4132 | return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Sat); | ||||
4133 | } | ||||
4134 | |||||
4135 | SDValue AArch64TargetLowering::LowerVectorINT_TO_FP(SDValue Op, | ||||
4136 | SelectionDAG &DAG) const { | ||||
4137 | // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp. | ||||
4138 | // Any additional optimization in this function should be recorded | ||||
4139 | // in the cost tables. | ||||
4140 | bool IsStrict = Op->isStrictFPOpcode(); | ||||
4141 | EVT VT = Op.getValueType(); | ||||
4142 | SDLoc dl(Op); | ||||
4143 | SDValue In = Op.getOperand(IsStrict ? 1 : 0); | ||||
4144 | EVT InVT = In.getValueType(); | ||||
4145 | unsigned Opc = Op.getOpcode(); | ||||
4146 | bool IsSigned = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP; | ||||
4147 | |||||
4148 | if (VT.isScalableVector()) { | ||||
4149 | if (InVT.getVectorElementType() == MVT::i1) { | ||||
4150 | // We can't directly extend an SVE predicate; extend it first. | ||||
4151 | unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; | ||||
4152 | EVT CastVT = getPromotedVTForPredicate(InVT); | ||||
4153 | In = DAG.getNode(CastOpc, dl, CastVT, In); | ||||
4154 | return DAG.getNode(Opc, dl, VT, In); | ||||
4155 | } | ||||
4156 | |||||
4157 | unsigned Opcode = IsSigned ? AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU | ||||
4158 | : AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU; | ||||
4159 | return LowerToPredicatedOp(Op, DAG, Opcode); | ||||
4160 | } | ||||
4161 | |||||
4162 | if (useSVEForFixedLengthVectorVT(VT, | ||||
4163 | Subtarget->forceStreamingCompatibleSVE()) || | ||||
4164 | useSVEForFixedLengthVectorVT(InVT, | ||||
4165 | Subtarget->forceStreamingCompatibleSVE())) | ||||
4166 | return LowerFixedLengthIntToFPToSVE(Op, DAG); | ||||
4167 | |||||
4168 | uint64_t VTSize = VT.getFixedSizeInBits(); | ||||
4169 | uint64_t InVTSize = InVT.getFixedSizeInBits(); | ||||
4170 | if (VTSize < InVTSize) { | ||||
4171 | MVT CastVT = | ||||
4172 | MVT::getVectorVT(MVT::getFloatingPointVT(InVT.getScalarSizeInBits()), | ||||
4173 | InVT.getVectorNumElements()); | ||||
4174 | if (IsStrict) { | ||||
4175 | In = DAG.getNode(Opc, dl, {CastVT, MVT::Other}, | ||||
4176 | {Op.getOperand(0), In}); | ||||
4177 | return DAG.getNode( | ||||
4178 | ISD::STRICT_FP_ROUND, dl, {VT, MVT::Other}, | ||||
4179 | {In.getValue(1), In.getValue(0), DAG.getIntPtrConstant(0, dl)}); | ||||
4180 | } | ||||
4181 | In = DAG.getNode(Opc, dl, CastVT, In); | ||||
4182 | return DAG.getNode(ISD::FP_ROUND, dl, VT, In, | ||||
4183 | DAG.getIntPtrConstant(0, dl, /*isTarget=*/true)); | ||||
4184 | } | ||||
4185 | |||||
4186 | if (VTSize > InVTSize) { | ||||
4187 | unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; | ||||
4188 | EVT CastVT = VT.changeVectorElementTypeToInteger(); | ||||
4189 | In = DAG.getNode(CastOpc, dl, CastVT, In); | ||||
4190 | if (IsStrict) | ||||
4191 | return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op.getOperand(0), In}); | ||||
4192 | return DAG.getNode(Opc, dl, VT, In); | ||||
4193 | } | ||||
4194 | |||||
4195 | // Use a scalar operation for conversions between single-element vectors of | ||||
4196 | // the same size. | ||||
4197 | if (VT.getVectorNumElements() == 1) { | ||||
4198 | SDValue Extract = DAG.getNode( | ||||
4199 | ISD::EXTRACT_VECTOR_ELT, dl, InVT.getScalarType(), | ||||
4200 | In, DAG.getConstant(0, dl, MVT::i64)); | ||||
4201 | EVT ScalarVT = VT.getScalarType(); | ||||
4202 | if (IsStrict) | ||||
4203 | return DAG.getNode(Op.getOpcode(), dl, {ScalarVT, MVT::Other}, | ||||
4204 | {Op.getOperand(0), Extract}); | ||||
4205 | return DAG.getNode(Op.getOpcode(), dl, ScalarVT, Extract); | ||||
4206 | } | ||||
4207 | |||||
4208 | return Op; | ||||
4209 | } | ||||
4210 | |||||
4211 | SDValue AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, | ||||
4212 | SelectionDAG &DAG) const { | ||||
4213 | if (Op.getValueType().isVector()) | ||||
4214 | return LowerVectorINT_TO_FP(Op, DAG); | ||||
4215 | |||||
4216 | bool IsStrict = Op->isStrictFPOpcode(); | ||||
4217 | SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); | ||||
4218 | |||||
4219 | // f16 conversions are promoted to f32 when full fp16 is not supported. | ||||
4220 | if (Op.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) { | ||||
4221 | SDLoc dl(Op); | ||||
4222 | if (IsStrict) { | ||||
4223 | SDValue Val = DAG.getNode(Op.getOpcode(), dl, {MVT::f32, MVT::Other}, | ||||
4224 | {Op.getOperand(0), SrcVal}); | ||||
4225 | return DAG.getNode( | ||||
4226 | ISD::STRICT_FP_ROUND, dl, {MVT::f16, MVT::Other}, | ||||
4227 | {Val.getValue(1), Val.getValue(0), DAG.getIntPtrConstant(0, dl)}); | ||||
4228 | } | ||||
4229 | return DAG.getNode( | ||||
4230 | ISD::FP_ROUND, dl, MVT::f16, | ||||
4231 | DAG.getNode(Op.getOpcode(), dl, MVT::f32, SrcVal), | ||||
4232 | DAG.getIntPtrConstant(0, dl)); | ||||
4233 | } | ||||
4234 | |||||
4235 | // i128 conversions are libcalls. | ||||
4236 | if (SrcVal.getValueType() == MVT::i128) | ||||
4237 | return SDValue(); | ||||
4238 | |||||
4239 | // Other conversions are legal, unless it's to the completely software-based | ||||
4240 | // fp128. | ||||
4241 | if (Op.getValueType() != MVT::f128) | ||||
4242 | return Op; | ||||
4243 | return SDValue(); | ||||
4244 | } | ||||
4245 | |||||
4246 | SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op, | ||||
4247 | SelectionDAG &DAG) const { | ||||
4248 | // For iOS, we want to call an alternative entry point: __sincos_stret, | ||||
4249 | // which returns the values in two S / D registers. | ||||
4250 | SDLoc dl(Op); | ||||
4251 | SDValue Arg = Op.getOperand(0); | ||||
4252 | EVT ArgVT = Arg.getValueType(); | ||||
4253 | Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); | ||||
4254 | |||||
4255 | ArgListTy Args; | ||||
4256 | ArgListEntry Entry; | ||||
4257 | |||||
4258 | Entry.Node = Arg; | ||||
4259 | Entry.Ty = ArgTy; | ||||
4260 | Entry.IsSExt = false; | ||||
4261 | Entry.IsZExt = false; | ||||
4262 | Args.push_back(Entry); | ||||
4263 | |||||
4264 | RTLIB::Libcall LC = ArgVT == MVT::f64 ? RTLIB::SINCOS_STRET_F64 | ||||
4265 | : RTLIB::SINCOS_STRET_F32; | ||||
4266 | const char *LibcallName = getLibcallName(LC); | ||||
4267 | SDValue Callee = | ||||
4268 | DAG.getExternalSymbol(LibcallName, getPointerTy(DAG.getDataLayout())); | ||||
4269 | |||||
4270 | StructType *RetTy = StructType::get(ArgTy, ArgTy); | ||||
4271 | TargetLowering::CallLoweringInfo CLI(DAG); | ||||
4272 | CLI.setDebugLoc(dl) | ||||
4273 | .setChain(DAG.getEntryNode()) | ||||
4274 | .setLibCallee(CallingConv::Fast, RetTy, Callee, std::move(Args)); | ||||
4275 | |||||
4276 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); | ||||
4277 | return CallResult.first; | ||||
4278 | } | ||||
4279 | |||||
4280 | static MVT getSVEContainerType(EVT ContentTy); | ||||
4281 | |||||
4282 | SDValue AArch64TargetLowering::LowerBITCAST(SDValue Op, | ||||
4283 | SelectionDAG &DAG) const { | ||||
4284 | EVT OpVT = Op.getValueType(); | ||||
4285 | EVT ArgVT = Op.getOperand(0).getValueType(); | ||||
4286 | |||||
4287 | if (useSVEForFixedLengthVectorVT(OpVT)) | ||||
4288 | return LowerFixedLengthBitcastToSVE(Op, DAG); | ||||
4289 | |||||
4290 | if (OpVT.isScalableVector()) { | ||||
4291 | // Bitcasting between unpacked vector types of different element counts is | ||||
4292 | // not a NOP because the live elements are laid out differently. | ||||
4293 | // 01234567 | ||||
4294 | // e.g. nxv2i32 = XX??XX?? | ||||
4295 | // nxv4f16 = X?X?X?X? | ||||
4296 | if (OpVT.getVectorElementCount() != ArgVT.getVectorElementCount()) | ||||
4297 | return SDValue(); | ||||
4298 | |||||
4299 | if (isTypeLegal(OpVT) && !isTypeLegal(ArgVT)) { | ||||
4300 | assert(OpVT.isFloatingPoint() && !ArgVT.isFloatingPoint() &&(static_cast <bool> (OpVT.isFloatingPoint() && ! ArgVT.isFloatingPoint() && "Expected int->fp bitcast!" ) ? void (0) : __assert_fail ("OpVT.isFloatingPoint() && !ArgVT.isFloatingPoint() && \"Expected int->fp bitcast!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4301, __extension__ __PRETTY_FUNCTION__)) | ||||
4301 | "Expected int->fp bitcast!")(static_cast <bool> (OpVT.isFloatingPoint() && ! ArgVT.isFloatingPoint() && "Expected int->fp bitcast!" ) ? void (0) : __assert_fail ("OpVT.isFloatingPoint() && !ArgVT.isFloatingPoint() && \"Expected int->fp bitcast!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4301, __extension__ __PRETTY_FUNCTION__)); | ||||
4302 | SDValue ExtResult = | ||||
4303 | DAG.getNode(ISD::ANY_EXTEND, SDLoc(Op), getSVEContainerType(ArgVT), | ||||
4304 | Op.getOperand(0)); | ||||
4305 | return getSVESafeBitCast(OpVT, ExtResult, DAG); | ||||
4306 | } | ||||
4307 | return getSVESafeBitCast(OpVT, Op.getOperand(0), DAG); | ||||
4308 | } | ||||
4309 | |||||
4310 | if (OpVT != MVT::f16 && OpVT != MVT::bf16) | ||||
4311 | return SDValue(); | ||||
4312 | |||||
4313 | // Bitcasts between f16 and bf16 are legal. | ||||
4314 | if (ArgVT == MVT::f16 || ArgVT == MVT::bf16) | ||||
4315 | return Op; | ||||
4316 | |||||
4317 | assert(ArgVT == MVT::i16)(static_cast <bool> (ArgVT == MVT::i16) ? void (0) : __assert_fail ("ArgVT == MVT::i16", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4317, __extension__ __PRETTY_FUNCTION__)); | ||||
4318 | SDLoc DL(Op); | ||||
4319 | |||||
4320 | Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op.getOperand(0)); | ||||
4321 | Op = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Op); | ||||
4322 | return SDValue( | ||||
4323 | DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, OpVT, Op, | ||||
4324 | DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)), | ||||
4325 | 0); | ||||
4326 | } | ||||
4327 | |||||
4328 | static EVT getExtensionTo64Bits(const EVT &OrigVT) { | ||||
4329 | if (OrigVT.getSizeInBits() >= 64) | ||||
4330 | return OrigVT; | ||||
4331 | |||||
4332 | assert(OrigVT.isSimple() && "Expecting a simple value type")(static_cast <bool> (OrigVT.isSimple() && "Expecting a simple value type" ) ? void (0) : __assert_fail ("OrigVT.isSimple() && \"Expecting a simple value type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4332, __extension__ __PRETTY_FUNCTION__)); | ||||
4333 | |||||
4334 | MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; | ||||
4335 | switch (OrigSimpleTy) { | ||||
4336 | default: llvm_unreachable("Unexpected Vector Type")::llvm::llvm_unreachable_internal("Unexpected Vector Type", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 4336); | ||||
4337 | case MVT::v2i8: | ||||
4338 | case MVT::v2i16: | ||||
4339 | return MVT::v2i32; | ||||
4340 | case MVT::v4i8: | ||||
4341 | return MVT::v4i16; | ||||
4342 | } | ||||
4343 | } | ||||
4344 | |||||
4345 | static SDValue addRequiredExtensionForVectorMULL(SDValue N, SelectionDAG &DAG, | ||||
4346 | const EVT &OrigTy, | ||||
4347 | const EVT &ExtTy, | ||||
4348 | unsigned ExtOpcode) { | ||||
4349 | // The vector originally had a size of OrigTy. It was then extended to ExtTy. | ||||
4350 | // We expect the ExtTy to be 128-bits total. If the OrigTy is less than | ||||
4351 | // 64-bits we need to insert a new extension so that it will be 64-bits. | ||||
4352 | assert(ExtTy.is128BitVector() && "Unexpected extension size")(static_cast <bool> (ExtTy.is128BitVector() && "Unexpected extension size" ) ? void (0) : __assert_fail ("ExtTy.is128BitVector() && \"Unexpected extension size\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4352, __extension__ __PRETTY_FUNCTION__)); | ||||
4353 | if (OrigTy.getSizeInBits() >= 64) | ||||
4354 | return N; | ||||
4355 | |||||
4356 | // Must extend size to at least 64 bits to be used as an operand for VMULL. | ||||
4357 | EVT NewVT = getExtensionTo64Bits(OrigTy); | ||||
4358 | |||||
4359 | return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N); | ||||
4360 | } | ||||
4361 | |||||
4362 | // Returns lane if Op extracts from a two-element vector and lane is constant | ||||
4363 | // (i.e., extractelt(<2 x Ty> %v, ConstantLane)), and None otherwise. | ||||
4364 | static std::optional<uint64_t> | ||||
4365 | getConstantLaneNumOfExtractHalfOperand(SDValue &Op) { | ||||
4366 | SDNode *OpNode = Op.getNode(); | ||||
4367 | if (OpNode->getOpcode() != ISD::EXTRACT_VECTOR_ELT) | ||||
4368 | return std::nullopt; | ||||
4369 | |||||
4370 | EVT VT = OpNode->getOperand(0).getValueType(); | ||||
4371 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpNode->getOperand(1)); | ||||
4372 | if (!VT.isFixedLengthVector() || VT.getVectorNumElements() != 2 || !C) | ||||
4373 | return std::nullopt; | ||||
4374 | |||||
4375 | return C->getZExtValue(); | ||||
4376 | } | ||||
4377 | |||||
4378 | static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, | ||||
4379 | bool isSigned) { | ||||
4380 | EVT VT = N->getValueType(0); | ||||
4381 | |||||
4382 | if (N->getOpcode() != ISD::BUILD_VECTOR) | ||||
4383 | return false; | ||||
4384 | |||||
4385 | for (const SDValue &Elt : N->op_values()) { | ||||
4386 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { | ||||
4387 | unsigned EltSize = VT.getScalarSizeInBits(); | ||||
4388 | unsigned HalfSize = EltSize / 2; | ||||
4389 | if (isSigned) { | ||||
4390 | if (!isIntN(HalfSize, C->getSExtValue())) | ||||
4391 | return false; | ||||
4392 | } else { | ||||
4393 | if (!isUIntN(HalfSize, C->getZExtValue())) | ||||
4394 | return false; | ||||
4395 | } | ||||
4396 | continue; | ||||
4397 | } | ||||
4398 | return false; | ||||
4399 | } | ||||
4400 | |||||
4401 | return true; | ||||
4402 | } | ||||
4403 | |||||
4404 | static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) { | ||||
4405 | if (N->getOpcode() == ISD::SIGN_EXTEND || | ||||
4406 | N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND) | ||||
4407 | return addRequiredExtensionForVectorMULL(N->getOperand(0), DAG, | ||||
4408 | N->getOperand(0)->getValueType(0), | ||||
4409 | N->getValueType(0), | ||||
4410 | N->getOpcode()); | ||||
4411 | |||||
4412 | assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR")(static_cast <bool> (N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR") ? void (0) : __assert_fail ("N->getOpcode() == ISD::BUILD_VECTOR && \"expected BUILD_VECTOR\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4412, __extension__ __PRETTY_FUNCTION__)); | ||||
4413 | EVT VT = N->getValueType(0); | ||||
4414 | SDLoc dl(N); | ||||
4415 | unsigned EltSize = VT.getScalarSizeInBits() / 2; | ||||
4416 | unsigned NumElts = VT.getVectorNumElements(); | ||||
4417 | MVT TruncVT = MVT::getIntegerVT(EltSize); | ||||
4418 | SmallVector<SDValue, 8> Ops; | ||||
4419 | for (unsigned i = 0; i != NumElts; ++i) { | ||||
4420 | ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); | ||||
4421 | const APInt &CInt = C->getAPIntValue(); | ||||
4422 | // Element types smaller than 32 bits are not legal, so use i32 elements. | ||||
4423 | // The values are implicitly truncated so sext vs. zext doesn't matter. | ||||
4424 | Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32)); | ||||
4425 | } | ||||
4426 | return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops); | ||||
4427 | } | ||||
4428 | |||||
4429 | static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { | ||||
4430 | return N->getOpcode() == ISD::SIGN_EXTEND || | ||||
4431 | N->getOpcode() == ISD::ANY_EXTEND || | ||||
4432 | isExtendedBUILD_VECTOR(N, DAG, true); | ||||
4433 | } | ||||
4434 | |||||
4435 | static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { | ||||
4436 | return N->getOpcode() == ISD::ZERO_EXTEND || | ||||
4437 | N->getOpcode() == ISD::ANY_EXTEND || | ||||
4438 | isExtendedBUILD_VECTOR(N, DAG, false); | ||||
4439 | } | ||||
4440 | |||||
4441 | static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { | ||||
4442 | unsigned Opcode = N->getOpcode(); | ||||
4443 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { | ||||
4444 | SDNode *N0 = N->getOperand(0).getNode(); | ||||
4445 | SDNode *N1 = N->getOperand(1).getNode(); | ||||
4446 | return N0->hasOneUse() && N1->hasOneUse() && | ||||
4447 | isSignExtended(N0, DAG) && isSignExtended(N1, DAG); | ||||
4448 | } | ||||
4449 | return false; | ||||
4450 | } | ||||
4451 | |||||
4452 | static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { | ||||
4453 | unsigned Opcode = N->getOpcode(); | ||||
4454 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { | ||||
4455 | SDNode *N0 = N->getOperand(0).getNode(); | ||||
4456 | SDNode *N1 = N->getOperand(1).getNode(); | ||||
4457 | return N0->hasOneUse() && N1->hasOneUse() && | ||||
4458 | isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); | ||||
4459 | } | ||||
4460 | return false; | ||||
4461 | } | ||||
4462 | |||||
4463 | SDValue AArch64TargetLowering::LowerFLT_ROUNDS_(SDValue Op, | ||||
4464 | SelectionDAG &DAG) const { | ||||
4465 | // The rounding mode is in bits 23:22 of the FPSCR. | ||||
4466 | // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 | ||||
4467 | // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) | ||||
4468 | // so that the shift + and get folded into a bitfield extract. | ||||
4469 | SDLoc dl(Op); | ||||
4470 | |||||
4471 | SDValue Chain = Op.getOperand(0); | ||||
4472 | SDValue FPCR_64 = DAG.getNode( | ||||
4473 | ISD::INTRINSIC_W_CHAIN, dl, {MVT::i64, MVT::Other}, | ||||
4474 | {Chain, DAG.getConstant(Intrinsic::aarch64_get_fpcr, dl, MVT::i64)}); | ||||
4475 | Chain = FPCR_64.getValue(1); | ||||
4476 | SDValue FPCR_32 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, FPCR_64); | ||||
4477 | SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPCR_32, | ||||
4478 | DAG.getConstant(1U << 22, dl, MVT::i32)); | ||||
4479 | SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, | ||||
4480 | DAG.getConstant(22, dl, MVT::i32)); | ||||
4481 | SDValue AND = DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, | ||||
4482 | DAG.getConstant(3, dl, MVT::i32)); | ||||
4483 | return DAG.getMergeValues({AND, Chain}, dl); | ||||
4484 | } | ||||
4485 | |||||
4486 | SDValue AArch64TargetLowering::LowerSET_ROUNDING(SDValue Op, | ||||
4487 | SelectionDAG &DAG) const { | ||||
4488 | SDLoc DL(Op); | ||||
4489 | SDValue Chain = Op->getOperand(0); | ||||
4490 | SDValue RMValue = Op->getOperand(1); | ||||
4491 | |||||
4492 | // The rounding mode is in bits 23:22 of the FPCR. | ||||
4493 | // The llvm.set.rounding argument value to the rounding mode in FPCR mapping | ||||
4494 | // is 0->3, 1->0, 2->1, 3->2. The formula we use to implement this is | ||||
4495 | // ((arg - 1) & 3) << 22). | ||||
4496 | // | ||||
4497 | // The argument of llvm.set.rounding must be within the segment [0, 3], so | ||||
4498 | // NearestTiesToAway (4) is not handled here. It is responsibility of the code | ||||
4499 | // generated llvm.set.rounding to ensure this condition. | ||||
4500 | |||||
4501 | // Calculate new value of FPCR[23:22]. | ||||
4502 | RMValue = DAG.getNode(ISD::SUB, DL, MVT::i32, RMValue, | ||||
4503 | DAG.getConstant(1, DL, MVT::i32)); | ||||
4504 | RMValue = DAG.getNode(ISD::AND, DL, MVT::i32, RMValue, | ||||
4505 | DAG.getConstant(0x3, DL, MVT::i32)); | ||||
4506 | RMValue = | ||||
4507 | DAG.getNode(ISD::SHL, DL, MVT::i32, RMValue, | ||||
4508 | DAG.getConstant(AArch64::RoundingBitsPos, DL, MVT::i32)); | ||||
4509 | RMValue = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, RMValue); | ||||
4510 | |||||
4511 | // Get current value of FPCR. | ||||
4512 | SDValue Ops[] = { | ||||
4513 | Chain, DAG.getTargetConstant(Intrinsic::aarch64_get_fpcr, DL, MVT::i64)}; | ||||
4514 | SDValue FPCR = | ||||
4515 | DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, {MVT::i64, MVT::Other}, Ops); | ||||
4516 | Chain = FPCR.getValue(1); | ||||
4517 | FPCR = FPCR.getValue(0); | ||||
4518 | |||||
4519 | // Put new rounding mode into FPSCR[23:22]. | ||||
4520 | const int RMMask = ~(AArch64::Rounding::rmMask << AArch64::RoundingBitsPos); | ||||
4521 | FPCR = DAG.getNode(ISD::AND, DL, MVT::i64, FPCR, | ||||
4522 | DAG.getConstant(RMMask, DL, MVT::i64)); | ||||
4523 | FPCR = DAG.getNode(ISD::OR, DL, MVT::i64, FPCR, RMValue); | ||||
4524 | SDValue Ops2[] = { | ||||
4525 | Chain, DAG.getTargetConstant(Intrinsic::aarch64_set_fpcr, DL, MVT::i64), | ||||
4526 | FPCR}; | ||||
4527 | return DAG.getNode(ISD::INTRINSIC_VOID, DL, MVT::Other, Ops2); | ||||
4528 | } | ||||
4529 | |||||
4530 | static unsigned selectUmullSmull(SDNode *&N0, SDNode *&N1, SelectionDAG &DAG, | ||||
4531 | bool &IsMLA) { | ||||
4532 | bool IsN0SExt = isSignExtended(N0, DAG); | ||||
4533 | bool IsN1SExt = isSignExtended(N1, DAG); | ||||
4534 | if (IsN0SExt && IsN1SExt) | ||||
4535 | return AArch64ISD::SMULL; | ||||
4536 | |||||
4537 | bool IsN0ZExt = isZeroExtended(N0, DAG); | ||||
4538 | bool IsN1ZExt = isZeroExtended(N1, DAG); | ||||
4539 | |||||
4540 | if (IsN0ZExt && IsN1ZExt) | ||||
4541 | return AArch64ISD::UMULL; | ||||
4542 | |||||
4543 | if (!IsN1SExt && !IsN1ZExt) | ||||
4544 | return 0; | ||||
4545 | // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these | ||||
4546 | // into (s/zext A * s/zext C) + (s/zext B * s/zext C) | ||||
4547 | if (IsN1SExt && isAddSubSExt(N0, DAG)) { | ||||
4548 | IsMLA = true; | ||||
4549 | return AArch64ISD::SMULL; | ||||
4550 | } | ||||
4551 | if (IsN1ZExt && isAddSubZExt(N0, DAG)) { | ||||
4552 | IsMLA = true; | ||||
4553 | return AArch64ISD::UMULL; | ||||
4554 | } | ||||
4555 | if (IsN0ZExt && isAddSubZExt(N1, DAG)) { | ||||
4556 | std::swap(N0, N1); | ||||
4557 | IsMLA = true; | ||||
4558 | return AArch64ISD::UMULL; | ||||
4559 | } | ||||
4560 | return 0; | ||||
4561 | } | ||||
4562 | |||||
4563 | SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { | ||||
4564 | EVT VT = Op.getValueType(); | ||||
4565 | |||||
4566 | // If SVE is available then i64 vector multiplications can also be made legal. | ||||
4567 | bool OverrideNEON = VT == MVT::v2i64 || VT == MVT::v1i64 || | ||||
4568 | Subtarget->forceStreamingCompatibleSVE(); | ||||
4569 | |||||
4570 | if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT, OverrideNEON)) | ||||
4571 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::MUL_PRED); | ||||
4572 | |||||
4573 | // Multiplications are only custom-lowered for 128-bit vectors so that | ||||
4574 | // VMULL can be detected. Otherwise v2i64 multiplications are not legal. | ||||
4575 | assert(VT.is128BitVector() && VT.isInteger() &&(static_cast <bool> (VT.is128BitVector() && VT. isInteger() && "unexpected type for custom-lowering ISD::MUL" ) ? void (0) : __assert_fail ("VT.is128BitVector() && VT.isInteger() && \"unexpected type for custom-lowering ISD::MUL\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4576, __extension__ __PRETTY_FUNCTION__)) | ||||
4576 | "unexpected type for custom-lowering ISD::MUL")(static_cast <bool> (VT.is128BitVector() && VT. isInteger() && "unexpected type for custom-lowering ISD::MUL" ) ? void (0) : __assert_fail ("VT.is128BitVector() && VT.isInteger() && \"unexpected type for custom-lowering ISD::MUL\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4576, __extension__ __PRETTY_FUNCTION__)); | ||||
4577 | SDNode *N0 = Op.getOperand(0).getNode(); | ||||
4578 | SDNode *N1 = Op.getOperand(1).getNode(); | ||||
4579 | bool isMLA = false; | ||||
4580 | unsigned NewOpc = selectUmullSmull(N0, N1, DAG, isMLA); | ||||
4581 | |||||
4582 | if (!NewOpc) { | ||||
4583 | if (VT == MVT::v2i64) | ||||
4584 | // Fall through to expand this. It is not legal. | ||||
4585 | return SDValue(); | ||||
4586 | else | ||||
4587 | // Other vector multiplications are legal. | ||||
4588 | return Op; | ||||
4589 | } | ||||
4590 | |||||
4591 | // Legalize to a S/UMULL instruction | ||||
4592 | SDLoc DL(Op); | ||||
4593 | SDValue Op0; | ||||
4594 | SDValue Op1 = skipExtensionForVectorMULL(N1, DAG); | ||||
4595 | if (!isMLA) { | ||||
4596 | Op0 = skipExtensionForVectorMULL(N0, DAG); | ||||
4597 | assert(Op0.getValueType().is64BitVector() &&(static_cast <bool> (Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && "unexpected types for extended operands to VMULL" ) ? void (0) : __assert_fail ("Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && \"unexpected types for extended operands to VMULL\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4599, __extension__ __PRETTY_FUNCTION__)) | ||||
4598 | Op1.getValueType().is64BitVector() &&(static_cast <bool> (Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && "unexpected types for extended operands to VMULL" ) ? void (0) : __assert_fail ("Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && \"unexpected types for extended operands to VMULL\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4599, __extension__ __PRETTY_FUNCTION__)) | ||||
4599 | "unexpected types for extended operands to VMULL")(static_cast <bool> (Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && "unexpected types for extended operands to VMULL" ) ? void (0) : __assert_fail ("Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && \"unexpected types for extended operands to VMULL\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4599, __extension__ __PRETTY_FUNCTION__)); | ||||
4600 | return DAG.getNode(NewOpc, DL, VT, Op0, Op1); | ||||
4601 | } | ||||
4602 | // Optimizing (zext A + zext B) * C, to (S/UMULL A, C) + (S/UMULL B, C) during | ||||
4603 | // isel lowering to take advantage of no-stall back to back s/umul + s/umla. | ||||
4604 | // This is true for CPUs with accumulate forwarding such as Cortex-A53/A57 | ||||
4605 | SDValue N00 = skipExtensionForVectorMULL(N0->getOperand(0).getNode(), DAG); | ||||
4606 | SDValue N01 = skipExtensionForVectorMULL(N0->getOperand(1).getNode(), DAG); | ||||
4607 | EVT Op1VT = Op1.getValueType(); | ||||
4608 | return DAG.getNode(N0->getOpcode(), DL, VT, | ||||
4609 | DAG.getNode(NewOpc, DL, VT, | ||||
4610 | DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), | ||||
4611 | DAG.getNode(NewOpc, DL, VT, | ||||
4612 | DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); | ||||
4613 | } | ||||
4614 | |||||
4615 | static inline SDValue getPTrue(SelectionDAG &DAG, SDLoc DL, EVT VT, | ||||
4616 | int Pattern) { | ||||
4617 | if (VT == MVT::nxv1i1 && Pattern == AArch64SVEPredPattern::all) | ||||
4618 | return DAG.getConstant(1, DL, MVT::nxv1i1); | ||||
4619 | return DAG.getNode(AArch64ISD::PTRUE, DL, VT, | ||||
4620 | DAG.getTargetConstant(Pattern, DL, MVT::i32)); | ||||
4621 | } | ||||
4622 | |||||
4623 | // Returns a safe bitcast between two scalable vector predicates, where | ||||
4624 | // any newly created lanes from a widening bitcast are defined as zero. | ||||
4625 | static SDValue getSVEPredicateBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) { | ||||
4626 | SDLoc DL(Op); | ||||
4627 | EVT InVT = Op.getValueType(); | ||||
4628 | |||||
4629 | assert(InVT.getVectorElementType() == MVT::i1 &&(static_cast <bool> (InVT.getVectorElementType() == MVT ::i1 && VT.getVectorElementType() == MVT::i1 && "Expected a predicate-to-predicate bitcast") ? void (0) : __assert_fail ("InVT.getVectorElementType() == MVT::i1 && VT.getVectorElementType() == MVT::i1 && \"Expected a predicate-to-predicate bitcast\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4631, __extension__ __PRETTY_FUNCTION__)) | ||||
4630 | VT.getVectorElementType() == MVT::i1 &&(static_cast <bool> (InVT.getVectorElementType() == MVT ::i1 && VT.getVectorElementType() == MVT::i1 && "Expected a predicate-to-predicate bitcast") ? void (0) : __assert_fail ("InVT.getVectorElementType() == MVT::i1 && VT.getVectorElementType() == MVT::i1 && \"Expected a predicate-to-predicate bitcast\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4631, __extension__ __PRETTY_FUNCTION__)) | ||||
4631 | "Expected a predicate-to-predicate bitcast")(static_cast <bool> (InVT.getVectorElementType() == MVT ::i1 && VT.getVectorElementType() == MVT::i1 && "Expected a predicate-to-predicate bitcast") ? void (0) : __assert_fail ("InVT.getVectorElementType() == MVT::i1 && VT.getVectorElementType() == MVT::i1 && \"Expected a predicate-to-predicate bitcast\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4631, __extension__ __PRETTY_FUNCTION__)); | ||||
4632 | assert(VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&(static_cast <bool> (VT.isScalableVector() && DAG .getTargetLoweringInfo().isTypeLegal(VT) && InVT.isScalableVector () && DAG.getTargetLoweringInfo().isTypeLegal(InVT) && "Only expect to cast between legal scalable predicate types!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && InVT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(InVT) && \"Only expect to cast between legal scalable predicate types!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4635, __extension__ __PRETTY_FUNCTION__)) | ||||
4633 | InVT.isScalableVector() &&(static_cast <bool> (VT.isScalableVector() && DAG .getTargetLoweringInfo().isTypeLegal(VT) && InVT.isScalableVector () && DAG.getTargetLoweringInfo().isTypeLegal(InVT) && "Only expect to cast between legal scalable predicate types!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && InVT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(InVT) && \"Only expect to cast between legal scalable predicate types!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4635, __extension__ __PRETTY_FUNCTION__)) | ||||
4634 | DAG.getTargetLoweringInfo().isTypeLegal(InVT) &&(static_cast <bool> (VT.isScalableVector() && DAG .getTargetLoweringInfo().isTypeLegal(VT) && InVT.isScalableVector () && DAG.getTargetLoweringInfo().isTypeLegal(InVT) && "Only expect to cast between legal scalable predicate types!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && InVT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(InVT) && \"Only expect to cast between legal scalable predicate types!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4635, __extension__ __PRETTY_FUNCTION__)) | ||||
4635 | "Only expect to cast between legal scalable predicate types!")(static_cast <bool> (VT.isScalableVector() && DAG .getTargetLoweringInfo().isTypeLegal(VT) && InVT.isScalableVector () && DAG.getTargetLoweringInfo().isTypeLegal(InVT) && "Only expect to cast between legal scalable predicate types!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && InVT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(InVT) && \"Only expect to cast between legal scalable predicate types!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4635, __extension__ __PRETTY_FUNCTION__)); | ||||
4636 | |||||
4637 | // Return the operand if the cast isn't changing type, | ||||
4638 | // e.g. <n x 16 x i1> -> <n x 16 x i1> | ||||
4639 | if (InVT == VT) | ||||
4640 | return Op; | ||||
4641 | |||||
4642 | SDValue Reinterpret = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Op); | ||||
4643 | |||||
4644 | // We only have to zero the lanes if new lanes are being defined, e.g. when | ||||
4645 | // casting from <vscale x 2 x i1> to <vscale x 16 x i1>. If this is not the | ||||
4646 | // case (e.g. when casting from <vscale x 16 x i1> -> <vscale x 2 x i1>) then | ||||
4647 | // we can return here. | ||||
4648 | if (InVT.bitsGT(VT)) | ||||
4649 | return Reinterpret; | ||||
4650 | |||||
4651 | // Check if the other lanes are already known to be zeroed by | ||||
4652 | // construction. | ||||
4653 | if (isZeroingInactiveLanes(Op)) | ||||
4654 | return Reinterpret; | ||||
4655 | |||||
4656 | // Zero the newly introduced lanes. | ||||
4657 | SDValue Mask = DAG.getConstant(1, DL, InVT); | ||||
4658 | Mask = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Mask); | ||||
4659 | return DAG.getNode(ISD::AND, DL, VT, Reinterpret, Mask); | ||||
4660 | } | ||||
4661 | |||||
4662 | SDValue AArch64TargetLowering::getPStateSM(SelectionDAG &DAG, SDValue Chain, | ||||
4663 | SMEAttrs Attrs, SDLoc DL, | ||||
4664 | EVT VT) const { | ||||
4665 | if (Attrs.hasStreamingInterfaceOrBody()) | ||||
4666 | return DAG.getConstant(1, DL, VT); | ||||
4667 | |||||
4668 | if (Attrs.hasNonStreamingInterfaceAndBody()) | ||||
4669 | return DAG.getConstant(0, DL, VT); | ||||
4670 | |||||
4671 | assert(Attrs.hasStreamingCompatibleInterface() && "Unexpected interface")(static_cast <bool> (Attrs.hasStreamingCompatibleInterface () && "Unexpected interface") ? void (0) : __assert_fail ("Attrs.hasStreamingCompatibleInterface() && \"Unexpected interface\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4671, __extension__ __PRETTY_FUNCTION__)); | ||||
4672 | |||||
4673 | SDValue Callee = DAG.getExternalSymbol("__arm_sme_state", | ||||
4674 | getPointerTy(DAG.getDataLayout())); | ||||
4675 | Type *Int64Ty = Type::getInt64Ty(*DAG.getContext()); | ||||
4676 | Type *RetTy = StructType::get(Int64Ty, Int64Ty); | ||||
4677 | TargetLowering::CallLoweringInfo CLI(DAG); | ||||
4678 | ArgListTy Args; | ||||
4679 | CLI.setDebugLoc(DL).setChain(Chain).setLibCallee( | ||||
4680 | CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2, | ||||
4681 | RetTy, Callee, std::move(Args)); | ||||
4682 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); | ||||
4683 | SDValue Mask = DAG.getConstant(/*PSTATE.SM*/ 1, DL, MVT::i64); | ||||
4684 | return DAG.getNode(ISD::AND, DL, MVT::i64, CallResult.first.getOperand(0), | ||||
4685 | Mask); | ||||
4686 | } | ||||
4687 | |||||
4688 | static std::optional<SMEAttrs> getCalleeAttrsFromExternalFunction(SDValue V) { | ||||
4689 | if (auto *ES = dyn_cast<ExternalSymbolSDNode>(V)) { | ||||
4690 | StringRef S(ES->getSymbol()); | ||||
4691 | if (S == "__arm_sme_state" || S == "__arm_tpidr2_save") | ||||
4692 | return SMEAttrs(SMEAttrs::SM_Compatible | SMEAttrs::ZA_Preserved); | ||||
4693 | if (S == "__arm_tpidr2_restore") | ||||
4694 | return SMEAttrs(SMEAttrs::SM_Compatible | SMEAttrs::ZA_Shared); | ||||
4695 | } | ||||
4696 | return std::nullopt; | ||||
4697 | } | ||||
4698 | |||||
4699 | SDValue AArch64TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, | ||||
4700 | SelectionDAG &DAG) const { | ||||
4701 | unsigned IntNo = Op.getConstantOperandVal(1); | ||||
4702 | SDLoc DL(Op); | ||||
4703 | switch (IntNo) { | ||||
4704 | default: | ||||
4705 | return SDValue(); // Don't custom lower most intrinsics. | ||||
4706 | case Intrinsic::aarch64_mops_memset_tag: { | ||||
4707 | auto Node = cast<MemIntrinsicSDNode>(Op.getNode()); | ||||
4708 | SDValue Chain = Node->getChain(); | ||||
4709 | SDValue Dst = Op.getOperand(2); | ||||
4710 | SDValue Val = Op.getOperand(3); | ||||
4711 | Val = DAG.getAnyExtOrTrunc(Val, DL, MVT::i64); | ||||
4712 | SDValue Size = Op.getOperand(4); | ||||
4713 | auto Alignment = Node->getMemOperand()->getAlign(); | ||||
4714 | bool IsVol = Node->isVolatile(); | ||||
4715 | auto DstPtrInfo = Node->getPointerInfo(); | ||||
4716 | |||||
4717 | const auto &SDI = | ||||
4718 | static_cast<const AArch64SelectionDAGInfo &>(DAG.getSelectionDAGInfo()); | ||||
4719 | SDValue MS = | ||||
4720 | SDI.EmitMOPS(AArch64ISD::MOPS_MEMSET_TAGGING, DAG, DL, Chain, Dst, Val, | ||||
4721 | Size, Alignment, IsVol, DstPtrInfo, MachinePointerInfo{}); | ||||
4722 | |||||
4723 | // MOPS_MEMSET_TAGGING has 3 results (DstWb, SizeWb, Chain) whereas the | ||||
4724 | // intrinsic has 2. So hide SizeWb using MERGE_VALUES. Otherwise | ||||
4725 | // LowerOperationWrapper will complain that the number of results has | ||||
4726 | // changed. | ||||
4727 | return DAG.getMergeValues({MS.getValue(0), MS.getValue(2)}, DL); | ||||
4728 | } | ||||
4729 | case Intrinsic::aarch64_sme_za_enable: | ||||
4730 | return DAG.getNode( | ||||
4731 | AArch64ISD::SMSTART, DL, MVT::Other, | ||||
4732 | Op->getOperand(0), // Chain | ||||
4733 | DAG.getTargetConstant((int32_t)(AArch64SVCR::SVCRZA), DL, MVT::i32), | ||||
4734 | DAG.getConstant(0, DL, MVT::i64), DAG.getConstant(1, DL, MVT::i64)); | ||||
4735 | case Intrinsic::aarch64_sme_za_disable: | ||||
4736 | return DAG.getNode( | ||||
4737 | AArch64ISD::SMSTOP, DL, MVT::Other, | ||||
4738 | Op->getOperand(0), // Chain | ||||
4739 | DAG.getTargetConstant((int32_t)(AArch64SVCR::SVCRZA), DL, MVT::i32), | ||||
4740 | DAG.getConstant(0, DL, MVT::i64), DAG.getConstant(1, DL, MVT::i64)); | ||||
4741 | } | ||||
4742 | } | ||||
4743 | |||||
4744 | SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, | ||||
4745 | SelectionDAG &DAG) const { | ||||
4746 | unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | ||||
4747 | SDLoc dl(Op); | ||||
4748 | switch (IntNo) { | ||||
4749 | default: return SDValue(); // Don't custom lower most intrinsics. | ||||
4750 | case Intrinsic::thread_pointer: { | ||||
4751 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | ||||
4752 | return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT); | ||||
4753 | } | ||||
4754 | case Intrinsic::aarch64_neon_abs: { | ||||
4755 | EVT Ty = Op.getValueType(); | ||||
4756 | if (Ty == MVT::i64) { | ||||
4757 | SDValue Result = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, | ||||
4758 | Op.getOperand(1)); | ||||
4759 | Result = DAG.getNode(ISD::ABS, dl, MVT::v1i64, Result); | ||||
4760 | return DAG.getNode(ISD::BITCAST, dl, MVT::i64, Result); | ||||
4761 | } else if (Ty.isVector() && Ty.isInteger() && isTypeLegal(Ty)) { | ||||
4762 | return DAG.getNode(ISD::ABS, dl, Ty, Op.getOperand(1)); | ||||
4763 | } else { | ||||
4764 | report_fatal_error("Unexpected type for AArch64 NEON intrinic"); | ||||
4765 | } | ||||
4766 | } | ||||
4767 | case Intrinsic::aarch64_neon_pmull64: { | ||||
4768 | SDValue LHS = Op.getOperand(1); | ||||
4769 | SDValue RHS = Op.getOperand(2); | ||||
4770 | |||||
4771 | std::optional<uint64_t> LHSLane = | ||||
4772 | getConstantLaneNumOfExtractHalfOperand(LHS); | ||||
4773 | std::optional<uint64_t> RHSLane = | ||||
4774 | getConstantLaneNumOfExtractHalfOperand(RHS); | ||||
4775 | |||||
4776 | assert((!LHSLane || *LHSLane < 2) && "Expect lane to be None or 0 or 1")(static_cast <bool> ((!LHSLane || *LHSLane < 2) && "Expect lane to be None or 0 or 1") ? void (0) : __assert_fail ("(!LHSLane || *LHSLane < 2) && \"Expect lane to be None or 0 or 1\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4776, __extension__ __PRETTY_FUNCTION__)); | ||||
4777 | assert((!RHSLane || *RHSLane < 2) && "Expect lane to be None or 0 or 1")(static_cast <bool> ((!RHSLane || *RHSLane < 2) && "Expect lane to be None or 0 or 1") ? void (0) : __assert_fail ("(!RHSLane || *RHSLane < 2) && \"Expect lane to be None or 0 or 1\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4777, __extension__ __PRETTY_FUNCTION__)); | ||||
4778 | |||||
4779 | // 'aarch64_neon_pmull64' takes i64 parameters; while pmull/pmull2 | ||||
4780 | // instructions execute on SIMD registers. So canonicalize i64 to v1i64, | ||||
4781 | // which ISel recognizes better. For example, generate a ldr into d* | ||||
4782 | // registers as opposed to a GPR load followed by a fmov. | ||||
4783 | auto TryVectorizeOperand = [](SDValue N, std::optional<uint64_t> NLane, | ||||
4784 | std::optional<uint64_t> OtherLane, | ||||
4785 | const SDLoc &dl, | ||||
4786 | SelectionDAG &DAG) -> SDValue { | ||||
4787 | // If the operand is an higher half itself, rewrite it to | ||||
4788 | // extract_high_v2i64; this way aarch64_neon_pmull64 could | ||||
4789 | // re-use the dag-combiner function with aarch64_neon_{pmull,smull,umull}. | ||||
4790 | if (NLane && *NLane == 1) | ||||
4791 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i64, | ||||
4792 | N.getOperand(0), DAG.getConstant(1, dl, MVT::i64)); | ||||
4793 | |||||
4794 | // Operand N is not a higher half but the other operand is. | ||||
4795 | if (OtherLane && *OtherLane == 1) { | ||||
4796 | // If this operand is a lower half, rewrite it to | ||||
4797 | // extract_high_v2i64(duplane(<2 x Ty>, 0)). This saves a roundtrip to | ||||
4798 | // align lanes of two operands. A roundtrip sequence (to move from lane | ||||
4799 | // 1 to lane 0) is like this: | ||||
4800 | // mov x8, v0.d[1] | ||||
4801 | // fmov d0, x8 | ||||
4802 | if (NLane && *NLane == 0) | ||||
4803 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i64, | ||||
4804 | DAG.getNode(AArch64ISD::DUPLANE64, dl, MVT::v2i64, | ||||
4805 | N.getOperand(0), | ||||
4806 | DAG.getConstant(0, dl, MVT::i64)), | ||||
4807 | DAG.getConstant(1, dl, MVT::i64)); | ||||
4808 | |||||
4809 | // Otherwise just dup from main to all lanes. | ||||
4810 | return DAG.getNode(AArch64ISD::DUP, dl, MVT::v1i64, N); | ||||
4811 | } | ||||
4812 | |||||
4813 | // Neither operand is an extract of higher half, so codegen may just use | ||||
4814 | // the non-high version of PMULL instruction. Use v1i64 to represent i64. | ||||
4815 | assert(N.getValueType() == MVT::i64 &&(static_cast <bool> (N.getValueType() == MVT::i64 && "Intrinsic aarch64_neon_pmull64 requires i64 parameters") ? void (0) : __assert_fail ("N.getValueType() == MVT::i64 && \"Intrinsic aarch64_neon_pmull64 requires i64 parameters\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4816, __extension__ __PRETTY_FUNCTION__)) | ||||
4816 | "Intrinsic aarch64_neon_pmull64 requires i64 parameters")(static_cast <bool> (N.getValueType() == MVT::i64 && "Intrinsic aarch64_neon_pmull64 requires i64 parameters") ? void (0) : __assert_fail ("N.getValueType() == MVT::i64 && \"Intrinsic aarch64_neon_pmull64 requires i64 parameters\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4816, __extension__ __PRETTY_FUNCTION__)); | ||||
4817 | return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, N); | ||||
4818 | }; | ||||
4819 | |||||
4820 | LHS = TryVectorizeOperand(LHS, LHSLane, RHSLane, dl, DAG); | ||||
4821 | RHS = TryVectorizeOperand(RHS, RHSLane, LHSLane, dl, DAG); | ||||
4822 | |||||
4823 | return DAG.getNode(AArch64ISD::PMULL, dl, Op.getValueType(), LHS, RHS); | ||||
4824 | } | ||||
4825 | case Intrinsic::aarch64_neon_smax: | ||||
4826 | return DAG.getNode(ISD::SMAX, dl, Op.getValueType(), | ||||
4827 | Op.getOperand(1), Op.getOperand(2)); | ||||
4828 | case Intrinsic::aarch64_neon_umax: | ||||
4829 | return DAG.getNode(ISD::UMAX, dl, Op.getValueType(), | ||||
4830 | Op.getOperand(1), Op.getOperand(2)); | ||||
4831 | case Intrinsic::aarch64_neon_smin: | ||||
4832 | return DAG.getNode(ISD::SMIN, dl, Op.getValueType(), | ||||
4833 | Op.getOperand(1), Op.getOperand(2)); | ||||
4834 | case Intrinsic::aarch64_neon_umin: | ||||
4835 | return DAG.getNode(ISD::UMIN, dl, Op.getValueType(), | ||||
4836 | Op.getOperand(1), Op.getOperand(2)); | ||||
4837 | case Intrinsic::aarch64_neon_scalar_sqxtn: | ||||
4838 | case Intrinsic::aarch64_neon_scalar_sqxtun: | ||||
4839 | case Intrinsic::aarch64_neon_scalar_uqxtn: { | ||||
4840 | assert(Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::f32)(static_cast <bool> (Op.getValueType() == MVT::i32 || Op .getValueType() == MVT::f32) ? void (0) : __assert_fail ("Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::f32" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 4840, __extension__ __PRETTY_FUNCTION__)); | ||||
4841 | if (Op.getValueType() == MVT::i32) | ||||
4842 | return DAG.getNode(ISD::BITCAST, dl, MVT::i32, | ||||
4843 | DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::f32, | ||||
4844 | Op.getOperand(0), | ||||
4845 | DAG.getNode(ISD::BITCAST, dl, MVT::f64, | ||||
4846 | Op.getOperand(1)))); | ||||
4847 | return SDValue(); | ||||
4848 | } | ||||
4849 | case Intrinsic::aarch64_sve_whilelo: { | ||||
4850 | if (isa<ConstantSDNode>(Op.getOperand(1)) && | ||||
4851 | isa<ConstantSDNode>(Op.getOperand(2))) { | ||||
4852 | unsigned MinSVEVectorSize = | ||||
4853 | std::max(Subtarget->getMinSVEVectorSizeInBits(), 128u); | ||||
4854 | unsigned ElementSize = 128 / Op.getValueType().getVectorMinNumElements(); | ||||
4855 | unsigned NumActiveElems = | ||||
4856 | Op.getConstantOperandVal(2) - Op.getConstantOperandVal(1); | ||||
4857 | std::optional<unsigned> PredPattern = | ||||
4858 | getSVEPredPatternFromNumElements(NumActiveElems); | ||||
4859 | if ((PredPattern != std::nullopt) && | ||||
4860 | NumActiveElems <= (MinSVEVectorSize / ElementSize)) | ||||
4861 | return getPTrue(DAG, dl, Op.getValueType(), *PredPattern); | ||||
4862 | } | ||||
4863 | return SDValue(); | ||||
4864 | } | ||||
4865 | case Intrinsic::aarch64_sve_sunpkhi: | ||||
4866 | return DAG.getNode(AArch64ISD::SUNPKHI, dl, Op.getValueType(), | ||||
4867 | Op.getOperand(1)); | ||||
4868 | case Intrinsic::aarch64_sve_sunpklo: | ||||
4869 | return DAG.getNode(AArch64ISD::SUNPKLO, dl, Op.getValueType(), | ||||
4870 | Op.getOperand(1)); | ||||
4871 | case Intrinsic::aarch64_sve_uunpkhi: | ||||
4872 | return DAG.getNode(AArch64ISD::UUNPKHI, dl, Op.getValueType(), | ||||
4873 | Op.getOperand(1)); | ||||
4874 | case Intrinsic::aarch64_sve_uunpklo: | ||||
4875 | return DAG.getNode(AArch64ISD::UUNPKLO, dl, Op.getValueType(), | ||||
4876 | Op.getOperand(1)); | ||||
4877 | case Intrinsic::aarch64_sve_clasta_n: | ||||
4878 | return DAG.getNode(AArch64ISD::CLASTA_N, dl, Op.getValueType(), | ||||
4879 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); | ||||
4880 | case Intrinsic::aarch64_sve_clastb_n: | ||||
4881 | return DAG.getNode(AArch64ISD::CLASTB_N, dl, Op.getValueType(), | ||||
4882 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); | ||||
4883 | case Intrinsic::aarch64_sve_lasta: | ||||
4884 | return DAG.getNode(AArch64ISD::LASTA, dl, Op.getValueType(), | ||||
4885 | Op.getOperand(1), Op.getOperand(2)); | ||||
4886 | case Intrinsic::aarch64_sve_lastb: | ||||
4887 | return DAG.getNode(AArch64ISD::LASTB, dl, Op.getValueType(), | ||||
4888 | Op.getOperand(1), Op.getOperand(2)); | ||||
4889 | case Intrinsic::aarch64_sve_rev: | ||||
4890 | return DAG.getNode(ISD::VECTOR_REVERSE, dl, Op.getValueType(), | ||||
4891 | Op.getOperand(1)); | ||||
4892 | case Intrinsic::aarch64_sve_tbl: | ||||
4893 | return DAG.getNode(AArch64ISD::TBL, dl, Op.getValueType(), | ||||
4894 | Op.getOperand(1), Op.getOperand(2)); | ||||
4895 | case Intrinsic::aarch64_sve_trn1: | ||||
4896 | return DAG.getNode(AArch64ISD::TRN1, dl, Op.getValueType(), | ||||
4897 | Op.getOperand(1), Op.getOperand(2)); | ||||
4898 | case Intrinsic::aarch64_sve_trn2: | ||||
4899 | return DAG.getNode(AArch64ISD::TRN2, dl, Op.getValueType(), | ||||
4900 | Op.getOperand(1), Op.getOperand(2)); | ||||
4901 | case Intrinsic::aarch64_sve_uzp1: | ||||
4902 | return DAG.getNode(AArch64ISD::UZP1, dl, Op.getValueType(), | ||||
4903 | Op.getOperand(1), Op.getOperand(2)); | ||||
4904 | case Intrinsic::aarch64_sve_uzp2: | ||||
4905 | return DAG.getNode(AArch64ISD::UZP2, dl, Op.getValueType(), | ||||
4906 | Op.getOperand(1), Op.getOperand(2)); | ||||
4907 | case Intrinsic::aarch64_sve_zip1: | ||||
4908 | return DAG.getNode(AArch64ISD::ZIP1, dl, Op.getValueType(), | ||||
4909 | Op.getOperand(1), Op.getOperand(2)); | ||||
4910 | case Intrinsic::aarch64_sve_zip2: | ||||
4911 | return DAG.getNode(AArch64ISD::ZIP2, dl, Op.getValueType(), | ||||
4912 | Op.getOperand(1), Op.getOperand(2)); | ||||
4913 | case Intrinsic::aarch64_sve_splice: | ||||
4914 | return DAG.getNode(AArch64ISD::SPLICE, dl, Op.getValueType(), | ||||
4915 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); | ||||
4916 | case Intrinsic::aarch64_sve_ptrue: | ||||
4917 | return getPTrue(DAG, dl, Op.getValueType(), | ||||
4918 | cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); | ||||
4919 | case Intrinsic::aarch64_sve_clz: | ||||
4920 | return DAG.getNode(AArch64ISD::CTLZ_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
4921 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
4922 | case Intrinsic::aarch64_sme_cntsb: | ||||
4923 | return DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(), | ||||
4924 | DAG.getConstant(1, dl, MVT::i32)); | ||||
4925 | case Intrinsic::aarch64_sme_cntsh: { | ||||
4926 | SDValue One = DAG.getConstant(1, dl, MVT::i32); | ||||
4927 | SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(), One); | ||||
4928 | return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes, One); | ||||
4929 | } | ||||
4930 | case Intrinsic::aarch64_sme_cntsw: { | ||||
4931 | SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(), | ||||
4932 | DAG.getConstant(1, dl, MVT::i32)); | ||||
4933 | return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes, | ||||
4934 | DAG.getConstant(2, dl, MVT::i32)); | ||||
4935 | } | ||||
4936 | case Intrinsic::aarch64_sme_cntsd: { | ||||
4937 | SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(), | ||||
4938 | DAG.getConstant(1, dl, MVT::i32)); | ||||
4939 | return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes, | ||||
4940 | DAG.getConstant(3, dl, MVT::i32)); | ||||
4941 | } | ||||
4942 | case Intrinsic::aarch64_sve_cnt: { | ||||
4943 | SDValue Data = Op.getOperand(3); | ||||
4944 | // CTPOP only supports integer operands. | ||||
4945 | if (Data.getValueType().isFloatingPoint()) | ||||
4946 | Data = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Data); | ||||
4947 | return DAG.getNode(AArch64ISD::CTPOP_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
4948 | Op.getOperand(2), Data, Op.getOperand(1)); | ||||
4949 | } | ||||
4950 | case Intrinsic::aarch64_sve_dupq_lane: | ||||
4951 | return LowerDUPQLane(Op, DAG); | ||||
4952 | case Intrinsic::aarch64_sve_convert_from_svbool: | ||||
4953 | return getSVEPredicateBitCast(Op.getValueType(), Op.getOperand(1), DAG); | ||||
4954 | case Intrinsic::aarch64_sve_convert_to_svbool: | ||||
4955 | return getSVEPredicateBitCast(MVT::nxv16i1, Op.getOperand(1), DAG); | ||||
4956 | case Intrinsic::aarch64_sve_fneg: | ||||
4957 | return DAG.getNode(AArch64ISD::FNEG_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
4958 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
4959 | case Intrinsic::aarch64_sve_frintp: | ||||
4960 | return DAG.getNode(AArch64ISD::FCEIL_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
4961 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
4962 | case Intrinsic::aarch64_sve_frintm: | ||||
4963 | return DAG.getNode(AArch64ISD::FFLOOR_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
4964 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
4965 | case Intrinsic::aarch64_sve_frinti: | ||||
4966 | return DAG.getNode(AArch64ISD::FNEARBYINT_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
4967 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
4968 | case Intrinsic::aarch64_sve_frintx: | ||||
4969 | return DAG.getNode(AArch64ISD::FRINT_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
4970 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
4971 | case Intrinsic::aarch64_sve_frinta: | ||||
4972 | return DAG.getNode(AArch64ISD::FROUND_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
4973 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
4974 | case Intrinsic::aarch64_sve_frintn: | ||||
4975 | return DAG.getNode(AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
4976 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
4977 | case Intrinsic::aarch64_sve_frintz: | ||||
4978 | return DAG.getNode(AArch64ISD::FTRUNC_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
4979 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
4980 | case Intrinsic::aarch64_sve_ucvtf: | ||||
4981 | return DAG.getNode(AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU, dl, | ||||
4982 | Op.getValueType(), Op.getOperand(2), Op.getOperand(3), | ||||
4983 | Op.getOperand(1)); | ||||
4984 | case Intrinsic::aarch64_sve_scvtf: | ||||
4985 | return DAG.getNode(AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU, dl, | ||||
4986 | Op.getValueType(), Op.getOperand(2), Op.getOperand(3), | ||||
4987 | Op.getOperand(1)); | ||||
4988 | case Intrinsic::aarch64_sve_fcvtzu: | ||||
4989 | return DAG.getNode(AArch64ISD::FCVTZU_MERGE_PASSTHRU, dl, | ||||
4990 | Op.getValueType(), Op.getOperand(2), Op.getOperand(3), | ||||
4991 | Op.getOperand(1)); | ||||
4992 | case Intrinsic::aarch64_sve_fcvtzs: | ||||
4993 | return DAG.getNode(AArch64ISD::FCVTZS_MERGE_PASSTHRU, dl, | ||||
4994 | Op.getValueType(), Op.getOperand(2), Op.getOperand(3), | ||||
4995 | Op.getOperand(1)); | ||||
4996 | case Intrinsic::aarch64_sve_fsqrt: | ||||
4997 | return DAG.getNode(AArch64ISD::FSQRT_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
4998 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
4999 | case Intrinsic::aarch64_sve_frecpx: | ||||
5000 | return DAG.getNode(AArch64ISD::FRECPX_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
5001 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
5002 | case Intrinsic::aarch64_sve_frecpe_x: | ||||
5003 | return DAG.getNode(AArch64ISD::FRECPE, dl, Op.getValueType(), | ||||
5004 | Op.getOperand(1)); | ||||
5005 | case Intrinsic::aarch64_sve_frecps_x: | ||||
5006 | return DAG.getNode(AArch64ISD::FRECPS, dl, Op.getValueType(), | ||||
5007 | Op.getOperand(1), Op.getOperand(2)); | ||||
5008 | case Intrinsic::aarch64_sve_frsqrte_x: | ||||
5009 | return DAG.getNode(AArch64ISD::FRSQRTE, dl, Op.getValueType(), | ||||
5010 | Op.getOperand(1)); | ||||
5011 | case Intrinsic::aarch64_sve_frsqrts_x: | ||||
5012 | return DAG.getNode(AArch64ISD::FRSQRTS, dl, Op.getValueType(), | ||||
5013 | Op.getOperand(1), Op.getOperand(2)); | ||||
5014 | case Intrinsic::aarch64_sve_fabs: | ||||
5015 | return DAG.getNode(AArch64ISD::FABS_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
5016 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
5017 | case Intrinsic::aarch64_sve_abs: | ||||
5018 | return DAG.getNode(AArch64ISD::ABS_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
5019 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
5020 | case Intrinsic::aarch64_sve_neg: | ||||
5021 | return DAG.getNode(AArch64ISD::NEG_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
5022 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
5023 | case Intrinsic::aarch64_sve_insr: { | ||||
5024 | SDValue Scalar = Op.getOperand(2); | ||||
5025 | EVT ScalarTy = Scalar.getValueType(); | ||||
5026 | if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16)) | ||||
5027 | Scalar = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Scalar); | ||||
5028 | |||||
5029 | return DAG.getNode(AArch64ISD::INSR, dl, Op.getValueType(), | ||||
5030 | Op.getOperand(1), Scalar); | ||||
5031 | } | ||||
5032 | case Intrinsic::aarch64_sve_rbit: | ||||
5033 | return DAG.getNode(AArch64ISD::BITREVERSE_MERGE_PASSTHRU, dl, | ||||
5034 | Op.getValueType(), Op.getOperand(2), Op.getOperand(3), | ||||
5035 | Op.getOperand(1)); | ||||
5036 | case Intrinsic::aarch64_sve_revb: | ||||
5037 | return DAG.getNode(AArch64ISD::BSWAP_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
5038 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
5039 | case Intrinsic::aarch64_sve_revh: | ||||
5040 | return DAG.getNode(AArch64ISD::REVH_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
5041 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
5042 | case Intrinsic::aarch64_sve_revw: | ||||
5043 | return DAG.getNode(AArch64ISD::REVW_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
5044 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
5045 | case Intrinsic::aarch64_sve_revd: | ||||
5046 | return DAG.getNode(AArch64ISD::REVD_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
5047 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | ||||
5048 | case Intrinsic::aarch64_sve_sxtb: | ||||
5049 | return DAG.getNode( | ||||
5050 | AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
5051 | Op.getOperand(2), Op.getOperand(3), | ||||
5052 | DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i8)), | ||||
5053 | Op.getOperand(1)); | ||||
5054 | case Intrinsic::aarch64_sve_sxth: | ||||
5055 | return DAG.getNode( | ||||
5056 | AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
5057 | Op.getOperand(2), Op.getOperand(3), | ||||
5058 | DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i16)), | ||||
5059 | Op.getOperand(1)); | ||||
5060 | case Intrinsic::aarch64_sve_sxtw: | ||||
5061 | return DAG.getNode( | ||||
5062 | AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
5063 | Op.getOperand(2), Op.getOperand(3), | ||||
5064 | DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i32)), | ||||
5065 | Op.getOperand(1)); | ||||
5066 | case Intrinsic::aarch64_sve_uxtb: | ||||
5067 | return DAG.getNode( | ||||
5068 | AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
5069 | Op.getOperand(2), Op.getOperand(3), | ||||
5070 | DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i8)), | ||||
5071 | Op.getOperand(1)); | ||||
5072 | case Intrinsic::aarch64_sve_uxth: | ||||
5073 | return DAG.getNode( | ||||
5074 | AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
5075 | Op.getOperand(2), Op.getOperand(3), | ||||
5076 | DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i16)), | ||||
5077 | Op.getOperand(1)); | ||||
5078 | case Intrinsic::aarch64_sve_uxtw: | ||||
5079 | return DAG.getNode( | ||||
5080 | AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(), | ||||
5081 | Op.getOperand(2), Op.getOperand(3), | ||||
5082 | DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i32)), | ||||
5083 | Op.getOperand(1)); | ||||
5084 | case Intrinsic::localaddress: { | ||||
5085 | const auto &MF = DAG.getMachineFunction(); | ||||
5086 | const auto *RegInfo = Subtarget->getRegisterInfo(); | ||||
5087 | unsigned Reg = RegInfo->getLocalAddressRegister(MF); | ||||
5088 | return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, | ||||
5089 | Op.getSimpleValueType()); | ||||
5090 | } | ||||
5091 | |||||
5092 | case Intrinsic::eh_recoverfp: { | ||||
5093 | // FIXME: This needs to be implemented to correctly handle highly aligned | ||||
5094 | // stack objects. For now we simply return the incoming FP. Refer D53541 | ||||
5095 | // for more details. | ||||
5096 | SDValue FnOp = Op.getOperand(1); | ||||
5097 | SDValue IncomingFPOp = Op.getOperand(2); | ||||
5098 | GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp); | ||||
5099 | auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr); | ||||
5100 | if (!Fn) | ||||
5101 | report_fatal_error( | ||||
5102 | "llvm.eh.recoverfp must take a function as the first argument"); | ||||
5103 | return IncomingFPOp; | ||||
5104 | } | ||||
5105 | |||||
5106 | case Intrinsic::aarch64_neon_vsri: | ||||
5107 | case Intrinsic::aarch64_neon_vsli: { | ||||
5108 | EVT Ty = Op.getValueType(); | ||||
5109 | |||||
5110 | if (!Ty.isVector()) | ||||
5111 | report_fatal_error("Unexpected type for aarch64_neon_vsli"); | ||||
5112 | |||||
5113 | assert(Op.getConstantOperandVal(3) <= Ty.getScalarSizeInBits())(static_cast <bool> (Op.getConstantOperandVal(3) <= Ty .getScalarSizeInBits()) ? void (0) : __assert_fail ("Op.getConstantOperandVal(3) <= Ty.getScalarSizeInBits()" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5113, __extension__ __PRETTY_FUNCTION__)); | ||||
5114 | |||||
5115 | bool IsShiftRight = IntNo == Intrinsic::aarch64_neon_vsri; | ||||
5116 | unsigned Opcode = IsShiftRight ? AArch64ISD::VSRI : AArch64ISD::VSLI; | ||||
5117 | return DAG.getNode(Opcode, dl, Ty, Op.getOperand(1), Op.getOperand(2), | ||||
5118 | Op.getOperand(3)); | ||||
5119 | } | ||||
5120 | |||||
5121 | case Intrinsic::aarch64_neon_srhadd: | ||||
5122 | case Intrinsic::aarch64_neon_urhadd: | ||||
5123 | case Intrinsic::aarch64_neon_shadd: | ||||
5124 | case Intrinsic::aarch64_neon_uhadd: { | ||||
5125 | bool IsSignedAdd = (IntNo == Intrinsic::aarch64_neon_srhadd || | ||||
5126 | IntNo == Intrinsic::aarch64_neon_shadd); | ||||
5127 | bool IsRoundingAdd = (IntNo == Intrinsic::aarch64_neon_srhadd || | ||||
5128 | IntNo == Intrinsic::aarch64_neon_urhadd); | ||||
5129 | unsigned Opcode = IsSignedAdd | ||||
5130 | ? (IsRoundingAdd ? ISD::AVGCEILS : ISD::AVGFLOORS) | ||||
5131 | : (IsRoundingAdd ? ISD::AVGCEILU : ISD::AVGFLOORU); | ||||
5132 | return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1), | ||||
5133 | Op.getOperand(2)); | ||||
5134 | } | ||||
5135 | case Intrinsic::aarch64_neon_sabd: | ||||
5136 | case Intrinsic::aarch64_neon_uabd: { | ||||
5137 | unsigned Opcode = IntNo == Intrinsic::aarch64_neon_uabd ? ISD::ABDU | ||||
5138 | : ISD::ABDS; | ||||
5139 | return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1), | ||||
5140 | Op.getOperand(2)); | ||||
5141 | } | ||||
5142 | case Intrinsic::aarch64_neon_saddlp: | ||||
5143 | case Intrinsic::aarch64_neon_uaddlp: { | ||||
5144 | unsigned Opcode = IntNo == Intrinsic::aarch64_neon_uaddlp | ||||
5145 | ? AArch64ISD::UADDLP | ||||
5146 | : AArch64ISD::SADDLP; | ||||
5147 | return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1)); | ||||
5148 | } | ||||
5149 | case Intrinsic::aarch64_neon_sdot: | ||||
5150 | case Intrinsic::aarch64_neon_udot: | ||||
5151 | case Intrinsic::aarch64_sve_sdot: | ||||
5152 | case Intrinsic::aarch64_sve_udot: { | ||||
5153 | unsigned Opcode = (IntNo == Intrinsic::aarch64_neon_udot || | ||||
5154 | IntNo == Intrinsic::aarch64_sve_udot) | ||||
5155 | ? AArch64ISD::UDOT | ||||
5156 | : AArch64ISD::SDOT; | ||||
5157 | return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1), | ||||
5158 | Op.getOperand(2), Op.getOperand(3)); | ||||
5159 | } | ||||
5160 | case Intrinsic::get_active_lane_mask: { | ||||
5161 | SDValue ID = | ||||
5162 | DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, dl, MVT::i64); | ||||
5163 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(), ID, | ||||
5164 | Op.getOperand(1), Op.getOperand(2)); | ||||
5165 | } | ||||
5166 | } | ||||
5167 | } | ||||
5168 | |||||
5169 | bool AArch64TargetLowering::shouldExtendGSIndex(EVT VT, EVT &EltTy) const { | ||||
5170 | if (VT.getVectorElementType() == MVT::i8 || | ||||
5171 | VT.getVectorElementType() == MVT::i16) { | ||||
5172 | EltTy = MVT::i32; | ||||
5173 | return true; | ||||
5174 | } | ||||
5175 | return false; | ||||
5176 | } | ||||
5177 | |||||
5178 | bool AArch64TargetLowering::shouldRemoveExtendFromGSIndex(EVT IndexVT, | ||||
5179 | EVT DataVT) const { | ||||
5180 | // SVE only supports implicit extension of 32-bit indices. | ||||
5181 | if (!Subtarget->hasSVE() || IndexVT.getVectorElementType() != MVT::i32) | ||||
5182 | return false; | ||||
5183 | |||||
5184 | // Indices cannot be smaller than the main data type. | ||||
5185 | if (IndexVT.getScalarSizeInBits() < DataVT.getScalarSizeInBits()) | ||||
5186 | return false; | ||||
5187 | |||||
5188 | // Scalable vectors with "vscale * 2" or fewer elements sit within a 64-bit | ||||
5189 | // element container type, which would violate the previous clause. | ||||
5190 | return DataVT.isFixedLengthVector() || DataVT.getVectorMinNumElements() > 2; | ||||
5191 | } | ||||
5192 | |||||
5193 | bool AArch64TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { | ||||
5194 | return ExtVal.getValueType().isScalableVector() || | ||||
5195 | useSVEForFixedLengthVectorVT( | ||||
5196 | ExtVal.getValueType(), | ||||
5197 | /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()); | ||||
5198 | } | ||||
5199 | |||||
5200 | unsigned getGatherVecOpcode(bool IsScaled, bool IsSigned, bool NeedsExtend) { | ||||
5201 | std::map<std::tuple<bool, bool, bool>, unsigned> AddrModes = { | ||||
5202 | {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ false), | ||||
5203 | AArch64ISD::GLD1_MERGE_ZERO}, | ||||
5204 | {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ true), | ||||
5205 | AArch64ISD::GLD1_UXTW_MERGE_ZERO}, | ||||
5206 | {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ false), | ||||
5207 | AArch64ISD::GLD1_MERGE_ZERO}, | ||||
5208 | {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ true), | ||||
5209 | AArch64ISD::GLD1_SXTW_MERGE_ZERO}, | ||||
5210 | {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ false), | ||||
5211 | AArch64ISD::GLD1_SCALED_MERGE_ZERO}, | ||||
5212 | {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ true), | ||||
5213 | AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO}, | ||||
5214 | {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ false), | ||||
5215 | AArch64ISD::GLD1_SCALED_MERGE_ZERO}, | ||||
5216 | {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ true), | ||||
5217 | AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO}, | ||||
5218 | }; | ||||
5219 | auto Key = std::make_tuple(IsScaled, IsSigned, NeedsExtend); | ||||
5220 | return AddrModes.find(Key)->second; | ||||
5221 | } | ||||
5222 | |||||
5223 | unsigned getSignExtendedGatherOpcode(unsigned Opcode) { | ||||
5224 | switch (Opcode) { | ||||
5225 | default: | ||||
5226 | llvm_unreachable("unimplemented opcode")::llvm::llvm_unreachable_internal("unimplemented opcode", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 5226); | ||||
5227 | return Opcode; | ||||
5228 | case AArch64ISD::GLD1_MERGE_ZERO: | ||||
5229 | return AArch64ISD::GLD1S_MERGE_ZERO; | ||||
5230 | case AArch64ISD::GLD1_IMM_MERGE_ZERO: | ||||
5231 | return AArch64ISD::GLD1S_IMM_MERGE_ZERO; | ||||
5232 | case AArch64ISD::GLD1_UXTW_MERGE_ZERO: | ||||
5233 | return AArch64ISD::GLD1S_UXTW_MERGE_ZERO; | ||||
5234 | case AArch64ISD::GLD1_SXTW_MERGE_ZERO: | ||||
5235 | return AArch64ISD::GLD1S_SXTW_MERGE_ZERO; | ||||
5236 | case AArch64ISD::GLD1_SCALED_MERGE_ZERO: | ||||
5237 | return AArch64ISD::GLD1S_SCALED_MERGE_ZERO; | ||||
5238 | case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO: | ||||
5239 | return AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO; | ||||
5240 | case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO: | ||||
5241 | return AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO; | ||||
5242 | } | ||||
5243 | } | ||||
5244 | |||||
5245 | SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op, | ||||
5246 | SelectionDAG &DAG) const { | ||||
5247 | MaskedGatherSDNode *MGT = cast<MaskedGatherSDNode>(Op); | ||||
5248 | |||||
5249 | SDLoc DL(Op); | ||||
5250 | SDValue Chain = MGT->getChain(); | ||||
5251 | SDValue PassThru = MGT->getPassThru(); | ||||
5252 | SDValue Mask = MGT->getMask(); | ||||
5253 | SDValue BasePtr = MGT->getBasePtr(); | ||||
5254 | SDValue Index = MGT->getIndex(); | ||||
5255 | SDValue Scale = MGT->getScale(); | ||||
5256 | EVT VT = Op.getValueType(); | ||||
5257 | EVT MemVT = MGT->getMemoryVT(); | ||||
5258 | ISD::LoadExtType ExtType = MGT->getExtensionType(); | ||||
5259 | ISD::MemIndexType IndexType = MGT->getIndexType(); | ||||
5260 | |||||
5261 | // SVE supports zero (and so undef) passthrough values only, everything else | ||||
5262 | // must be handled manually by an explicit select on the load's output. | ||||
5263 | if (!PassThru->isUndef() && !isZerosVector(PassThru.getNode())) { | ||||
5264 | SDValue Ops[] = {Chain, DAG.getUNDEF(VT), Mask, BasePtr, Index, Scale}; | ||||
5265 | SDValue Load = | ||||
5266 | DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops, | ||||
5267 | MGT->getMemOperand(), IndexType, ExtType); | ||||
5268 | SDValue Select = DAG.getSelect(DL, VT, Mask, Load, PassThru); | ||||
5269 | return DAG.getMergeValues({Select, Load.getValue(1)}, DL); | ||||
5270 | } | ||||
5271 | |||||
5272 | bool IsScaled = MGT->isIndexScaled(); | ||||
5273 | bool IsSigned = MGT->isIndexSigned(); | ||||
5274 | |||||
5275 | // SVE supports an index scaled by sizeof(MemVT.elt) only, everything else | ||||
5276 | // must be calculated before hand. | ||||
5277 | uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue(); | ||||
5278 | if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) { | ||||
5279 | assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types")(static_cast <bool> (isPowerOf2_64(ScaleVal) && "Expecting power-of-two types") ? void (0) : __assert_fail ( "isPowerOf2_64(ScaleVal) && \"Expecting power-of-two types\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5279, __extension__ __PRETTY_FUNCTION__)); | ||||
5280 | EVT IndexVT = Index.getValueType(); | ||||
5281 | Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, | ||||
5282 | DAG.getConstant(Log2_32(ScaleVal), DL, IndexVT)); | ||||
5283 | Scale = DAG.getTargetConstant(1, DL, Scale.getValueType()); | ||||
5284 | |||||
5285 | SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale}; | ||||
5286 | return DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops, | ||||
5287 | MGT->getMemOperand(), IndexType, ExtType); | ||||
5288 | } | ||||
5289 | |||||
5290 | // Lower fixed length gather to a scalable equivalent. | ||||
5291 | if (VT.isFixedLengthVector()) { | ||||
5292 | assert(Subtarget->useSVEForFixedLengthVectors() &&(static_cast <bool> (Subtarget->useSVEForFixedLengthVectors () && "Cannot lower when not using SVE for fixed vectors!" ) ? void (0) : __assert_fail ("Subtarget->useSVEForFixedLengthVectors() && \"Cannot lower when not using SVE for fixed vectors!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5293, __extension__ __PRETTY_FUNCTION__)) | ||||
5293 | "Cannot lower when not using SVE for fixed vectors!")(static_cast <bool> (Subtarget->useSVEForFixedLengthVectors () && "Cannot lower when not using SVE for fixed vectors!" ) ? void (0) : __assert_fail ("Subtarget->useSVEForFixedLengthVectors() && \"Cannot lower when not using SVE for fixed vectors!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5293, __extension__ __PRETTY_FUNCTION__)); | ||||
5294 | |||||
5295 | // NOTE: Handle floating-point as if integer then bitcast the result. | ||||
5296 | EVT DataVT = VT.changeVectorElementTypeToInteger(); | ||||
5297 | MemVT = MemVT.changeVectorElementTypeToInteger(); | ||||
5298 | |||||
5299 | // Find the smallest integer fixed length vector we can use for the gather. | ||||
5300 | EVT PromotedVT = VT.changeVectorElementType(MVT::i32); | ||||
5301 | if (DataVT.getVectorElementType() == MVT::i64 || | ||||
5302 | Index.getValueType().getVectorElementType() == MVT::i64 || | ||||
5303 | Mask.getValueType().getVectorElementType() == MVT::i64) | ||||
5304 | PromotedVT = VT.changeVectorElementType(MVT::i64); | ||||
5305 | |||||
5306 | // Promote vector operands except for passthrough, which we know is either | ||||
5307 | // undef or zero, and thus best constructed directly. | ||||
5308 | unsigned ExtOpcode = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; | ||||
5309 | Index = DAG.getNode(ExtOpcode, DL, PromotedVT, Index); | ||||
5310 | Mask = DAG.getNode(ISD::SIGN_EXTEND, DL, PromotedVT, Mask); | ||||
5311 | |||||
5312 | // A promoted result type forces the need for an extending load. | ||||
5313 | if (PromotedVT != DataVT && ExtType == ISD::NON_EXTLOAD) | ||||
5314 | ExtType = ISD::EXTLOAD; | ||||
5315 | |||||
5316 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, PromotedVT); | ||||
5317 | |||||
5318 | // Convert fixed length vector operands to scalable. | ||||
5319 | MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType()); | ||||
5320 | Index = convertToScalableVector(DAG, ContainerVT, Index); | ||||
5321 | Mask = convertFixedMaskToScalableVector(Mask, DAG); | ||||
5322 | PassThru = PassThru->isUndef() ? DAG.getUNDEF(ContainerVT) | ||||
5323 | : DAG.getConstant(0, DL, ContainerVT); | ||||
5324 | |||||
5325 | // Emit equivalent scalable vector gather. | ||||
5326 | SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale}; | ||||
5327 | SDValue Load = | ||||
5328 | DAG.getMaskedGather(DAG.getVTList(ContainerVT, MVT::Other), MemVT, DL, | ||||
5329 | Ops, MGT->getMemOperand(), IndexType, ExtType); | ||||
5330 | |||||
5331 | // Extract fixed length data then convert to the required result type. | ||||
5332 | SDValue Result = convertFromScalableVector(DAG, PromotedVT, Load); | ||||
5333 | Result = DAG.getNode(ISD::TRUNCATE, DL, DataVT, Result); | ||||
5334 | if (VT.isFloatingPoint()) | ||||
5335 | Result = DAG.getNode(ISD::BITCAST, DL, VT, Result); | ||||
5336 | |||||
5337 | return DAG.getMergeValues({Result, Load.getValue(1)}, DL); | ||||
5338 | } | ||||
5339 | |||||
5340 | // Everything else is legal. | ||||
5341 | return Op; | ||||
5342 | } | ||||
5343 | |||||
5344 | SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op, | ||||
5345 | SelectionDAG &DAG) const { | ||||
5346 | MaskedScatterSDNode *MSC = cast<MaskedScatterSDNode>(Op); | ||||
5347 | |||||
5348 | SDLoc DL(Op); | ||||
5349 | SDValue Chain = MSC->getChain(); | ||||
5350 | SDValue StoreVal = MSC->getValue(); | ||||
5351 | SDValue Mask = MSC->getMask(); | ||||
5352 | SDValue BasePtr = MSC->getBasePtr(); | ||||
5353 | SDValue Index = MSC->getIndex(); | ||||
5354 | SDValue Scale = MSC->getScale(); | ||||
5355 | EVT VT = StoreVal.getValueType(); | ||||
5356 | EVT MemVT = MSC->getMemoryVT(); | ||||
5357 | ISD::MemIndexType IndexType = MSC->getIndexType(); | ||||
5358 | bool Truncating = MSC->isTruncatingStore(); | ||||
5359 | |||||
5360 | bool IsScaled = MSC->isIndexScaled(); | ||||
5361 | bool IsSigned = MSC->isIndexSigned(); | ||||
5362 | |||||
5363 | // SVE supports an index scaled by sizeof(MemVT.elt) only, everything else | ||||
5364 | // must be calculated before hand. | ||||
5365 | uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue(); | ||||
5366 | if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) { | ||||
5367 | assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types")(static_cast <bool> (isPowerOf2_64(ScaleVal) && "Expecting power-of-two types") ? void (0) : __assert_fail ( "isPowerOf2_64(ScaleVal) && \"Expecting power-of-two types\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5367, __extension__ __PRETTY_FUNCTION__)); | ||||
5368 | EVT IndexVT = Index.getValueType(); | ||||
5369 | Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, | ||||
5370 | DAG.getConstant(Log2_32(ScaleVal), DL, IndexVT)); | ||||
5371 | Scale = DAG.getTargetConstant(1, DL, Scale.getValueType()); | ||||
5372 | |||||
5373 | SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale}; | ||||
5374 | return DAG.getMaskedScatter(MSC->getVTList(), MemVT, DL, Ops, | ||||
5375 | MSC->getMemOperand(), IndexType, Truncating); | ||||
5376 | } | ||||
5377 | |||||
5378 | // Lower fixed length scatter to a scalable equivalent. | ||||
5379 | if (VT.isFixedLengthVector()) { | ||||
5380 | assert(Subtarget->useSVEForFixedLengthVectors() &&(static_cast <bool> (Subtarget->useSVEForFixedLengthVectors () && "Cannot lower when not using SVE for fixed vectors!" ) ? void (0) : __assert_fail ("Subtarget->useSVEForFixedLengthVectors() && \"Cannot lower when not using SVE for fixed vectors!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5381, __extension__ __PRETTY_FUNCTION__)) | ||||
5381 | "Cannot lower when not using SVE for fixed vectors!")(static_cast <bool> (Subtarget->useSVEForFixedLengthVectors () && "Cannot lower when not using SVE for fixed vectors!" ) ? void (0) : __assert_fail ("Subtarget->useSVEForFixedLengthVectors() && \"Cannot lower when not using SVE for fixed vectors!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5381, __extension__ __PRETTY_FUNCTION__)); | ||||
5382 | |||||
5383 | // Once bitcast we treat floating-point scatters as if integer. | ||||
5384 | if (VT.isFloatingPoint()) { | ||||
5385 | VT = VT.changeVectorElementTypeToInteger(); | ||||
5386 | MemVT = MemVT.changeVectorElementTypeToInteger(); | ||||
5387 | StoreVal = DAG.getNode(ISD::BITCAST, DL, VT, StoreVal); | ||||
5388 | } | ||||
5389 | |||||
5390 | // Find the smallest integer fixed length vector we can use for the scatter. | ||||
5391 | EVT PromotedVT = VT.changeVectorElementType(MVT::i32); | ||||
5392 | if (VT.getVectorElementType() == MVT::i64 || | ||||
5393 | Index.getValueType().getVectorElementType() == MVT::i64 || | ||||
5394 | Mask.getValueType().getVectorElementType() == MVT::i64) | ||||
5395 | PromotedVT = VT.changeVectorElementType(MVT::i64); | ||||
5396 | |||||
5397 | // Promote vector operands. | ||||
5398 | unsigned ExtOpcode = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; | ||||
5399 | Index = DAG.getNode(ExtOpcode, DL, PromotedVT, Index); | ||||
5400 | Mask = DAG.getNode(ISD::SIGN_EXTEND, DL, PromotedVT, Mask); | ||||
5401 | StoreVal = DAG.getNode(ISD::ANY_EXTEND, DL, PromotedVT, StoreVal); | ||||
5402 | |||||
5403 | // A promoted value type forces the need for a truncating store. | ||||
5404 | if (PromotedVT != VT) | ||||
5405 | Truncating = true; | ||||
5406 | |||||
5407 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, PromotedVT); | ||||
5408 | |||||
5409 | // Convert fixed length vector operands to scalable. | ||||
5410 | MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType()); | ||||
5411 | Index = convertToScalableVector(DAG, ContainerVT, Index); | ||||
5412 | Mask = convertFixedMaskToScalableVector(Mask, DAG); | ||||
5413 | StoreVal = convertToScalableVector(DAG, ContainerVT, StoreVal); | ||||
5414 | |||||
5415 | // Emit equivalent scalable vector scatter. | ||||
5416 | SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale}; | ||||
5417 | return DAG.getMaskedScatter(MSC->getVTList(), MemVT, DL, Ops, | ||||
5418 | MSC->getMemOperand(), IndexType, Truncating); | ||||
5419 | } | ||||
5420 | |||||
5421 | // Everything else is legal. | ||||
5422 | return Op; | ||||
5423 | } | ||||
5424 | |||||
5425 | SDValue AArch64TargetLowering::LowerMLOAD(SDValue Op, SelectionDAG &DAG) const { | ||||
5426 | SDLoc DL(Op); | ||||
5427 | MaskedLoadSDNode *LoadNode = cast<MaskedLoadSDNode>(Op); | ||||
5428 | assert(LoadNode && "Expected custom lowering of a masked load node")(static_cast <bool> (LoadNode && "Expected custom lowering of a masked load node" ) ? void (0) : __assert_fail ("LoadNode && \"Expected custom lowering of a masked load node\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5428, __extension__ __PRETTY_FUNCTION__)); | ||||
5429 | EVT VT = Op->getValueType(0); | ||||
5430 | |||||
5431 | if (useSVEForFixedLengthVectorVT( | ||||
5432 | VT, | ||||
5433 | /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors())) | ||||
5434 | return LowerFixedLengthVectorMLoadToSVE(Op, DAG); | ||||
5435 | |||||
5436 | SDValue PassThru = LoadNode->getPassThru(); | ||||
5437 | SDValue Mask = LoadNode->getMask(); | ||||
5438 | |||||
5439 | if (PassThru->isUndef() || isZerosVector(PassThru.getNode())) | ||||
5440 | return Op; | ||||
5441 | |||||
5442 | SDValue Load = DAG.getMaskedLoad( | ||||
5443 | VT, DL, LoadNode->getChain(), LoadNode->getBasePtr(), | ||||
5444 | LoadNode->getOffset(), Mask, DAG.getUNDEF(VT), LoadNode->getMemoryVT(), | ||||
5445 | LoadNode->getMemOperand(), LoadNode->getAddressingMode(), | ||||
5446 | LoadNode->getExtensionType()); | ||||
5447 | |||||
5448 | SDValue Result = DAG.getSelect(DL, VT, Mask, Load, PassThru); | ||||
5449 | |||||
5450 | return DAG.getMergeValues({Result, Load.getValue(1)}, DL); | ||||
5451 | } | ||||
5452 | |||||
5453 | // Custom lower trunc store for v4i8 vectors, since it is promoted to v4i16. | ||||
5454 | static SDValue LowerTruncateVectorStore(SDLoc DL, StoreSDNode *ST, | ||||
5455 | EVT VT, EVT MemVT, | ||||
5456 | SelectionDAG &DAG) { | ||||
5457 | assert(VT.isVector() && "VT should be a vector type")(static_cast <bool> (VT.isVector() && "VT should be a vector type" ) ? void (0) : __assert_fail ("VT.isVector() && \"VT should be a vector type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5457, __extension__ __PRETTY_FUNCTION__)); | ||||
5458 | assert(MemVT == MVT::v4i8 && VT == MVT::v4i16)(static_cast <bool> (MemVT == MVT::v4i8 && VT == MVT::v4i16) ? void (0) : __assert_fail ("MemVT == MVT::v4i8 && VT == MVT::v4i16" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5458, __extension__ __PRETTY_FUNCTION__)); | ||||
5459 | |||||
5460 | SDValue Value = ST->getValue(); | ||||
5461 | |||||
5462 | // It first extend the promoted v4i16 to v8i16, truncate to v8i8, and extract | ||||
5463 | // the word lane which represent the v4i8 subvector. It optimizes the store | ||||
5464 | // to: | ||||
5465 | // | ||||
5466 | // xtn v0.8b, v0.8h | ||||
5467 | // str s0, [x0] | ||||
5468 | |||||
5469 | SDValue Undef = DAG.getUNDEF(MVT::i16); | ||||
5470 | SDValue UndefVec = DAG.getBuildVector(MVT::v4i16, DL, | ||||
5471 | {Undef, Undef, Undef, Undef}); | ||||
5472 | |||||
5473 | SDValue TruncExt = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, | ||||
5474 | Value, UndefVec); | ||||
5475 | SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, TruncExt); | ||||
5476 | |||||
5477 | Trunc = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Trunc); | ||||
5478 | SDValue ExtractTrunc = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, | ||||
5479 | Trunc, DAG.getConstant(0, DL, MVT::i64)); | ||||
5480 | |||||
5481 | return DAG.getStore(ST->getChain(), DL, ExtractTrunc, | ||||
5482 | ST->getBasePtr(), ST->getMemOperand()); | ||||
5483 | } | ||||
5484 | |||||
5485 | // Custom lowering for any store, vector or scalar and/or default or with | ||||
5486 | // a truncate operations. Currently only custom lower truncate operation | ||||
5487 | // from vector v4i16 to v4i8 or volatile stores of i128. | ||||
5488 | SDValue AArch64TargetLowering::LowerSTORE(SDValue Op, | ||||
5489 | SelectionDAG &DAG) const { | ||||
5490 | SDLoc Dl(Op); | ||||
5491 | StoreSDNode *StoreNode = cast<StoreSDNode>(Op); | ||||
5492 | assert (StoreNode && "Can only custom lower store nodes")(static_cast <bool> (StoreNode && "Can only custom lower store nodes" ) ? void (0) : __assert_fail ("StoreNode && \"Can only custom lower store nodes\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5492, __extension__ __PRETTY_FUNCTION__)); | ||||
5493 | |||||
5494 | SDValue Value = StoreNode->getValue(); | ||||
5495 | |||||
5496 | EVT VT = Value.getValueType(); | ||||
5497 | EVT MemVT = StoreNode->getMemoryVT(); | ||||
5498 | |||||
5499 | if (VT.isVector()) { | ||||
5500 | if (useSVEForFixedLengthVectorVT( | ||||
5501 | VT, | ||||
5502 | /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors())) | ||||
5503 | return LowerFixedLengthVectorStoreToSVE(Op, DAG); | ||||
5504 | |||||
5505 | unsigned AS = StoreNode->getAddressSpace(); | ||||
5506 | Align Alignment = StoreNode->getAlign(); | ||||
5507 | if (Alignment < MemVT.getStoreSize() && | ||||
5508 | !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment, | ||||
5509 | StoreNode->getMemOperand()->getFlags(), | ||||
5510 | nullptr)) { | ||||
5511 | return scalarizeVectorStore(StoreNode, DAG); | ||||
5512 | } | ||||
5513 | |||||
5514 | if (StoreNode->isTruncatingStore() && VT == MVT::v4i16 && | ||||
5515 | MemVT == MVT::v4i8) { | ||||
5516 | return LowerTruncateVectorStore(Dl, StoreNode, VT, MemVT, DAG); | ||||
5517 | } | ||||
5518 | // 256 bit non-temporal stores can be lowered to STNP. Do this as part of | ||||
5519 | // the custom lowering, as there are no un-paired non-temporal stores and | ||||
5520 | // legalization will break up 256 bit inputs. | ||||
5521 | ElementCount EC = MemVT.getVectorElementCount(); | ||||
5522 | if (StoreNode->isNonTemporal() && MemVT.getSizeInBits() == 256u && | ||||
5523 | EC.isKnownEven() && | ||||
5524 | ((MemVT.getScalarSizeInBits() == 8u || | ||||
5525 | MemVT.getScalarSizeInBits() == 16u || | ||||
5526 | MemVT.getScalarSizeInBits() == 32u || | ||||
5527 | MemVT.getScalarSizeInBits() == 64u))) { | ||||
5528 | SDValue Lo = | ||||
5529 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, Dl, | ||||
5530 | MemVT.getHalfNumVectorElementsVT(*DAG.getContext()), | ||||
5531 | StoreNode->getValue(), DAG.getConstant(0, Dl, MVT::i64)); | ||||
5532 | SDValue Hi = | ||||
5533 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, Dl, | ||||
5534 | MemVT.getHalfNumVectorElementsVT(*DAG.getContext()), | ||||
5535 | StoreNode->getValue(), | ||||
5536 | DAG.getConstant(EC.getKnownMinValue() / 2, Dl, MVT::i64)); | ||||
5537 | SDValue Result = DAG.getMemIntrinsicNode( | ||||
5538 | AArch64ISD::STNP, Dl, DAG.getVTList(MVT::Other), | ||||
5539 | {StoreNode->getChain(), Lo, Hi, StoreNode->getBasePtr()}, | ||||
5540 | StoreNode->getMemoryVT(), StoreNode->getMemOperand()); | ||||
5541 | return Result; | ||||
5542 | } | ||||
5543 | } else if (MemVT == MVT::i128 && StoreNode->isVolatile()) { | ||||
5544 | return LowerStore128(Op, DAG); | ||||
5545 | } else if (MemVT == MVT::i64x8) { | ||||
5546 | SDValue Value = StoreNode->getValue(); | ||||
5547 | assert(Value->getValueType(0) == MVT::i64x8)(static_cast <bool> (Value->getValueType(0) == MVT:: i64x8) ? void (0) : __assert_fail ("Value->getValueType(0) == MVT::i64x8" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5547, __extension__ __PRETTY_FUNCTION__)); | ||||
5548 | SDValue Chain = StoreNode->getChain(); | ||||
5549 | SDValue Base = StoreNode->getBasePtr(); | ||||
5550 | EVT PtrVT = Base.getValueType(); | ||||
5551 | for (unsigned i = 0; i < 8; i++) { | ||||
5552 | SDValue Part = DAG.getNode(AArch64ISD::LS64_EXTRACT, Dl, MVT::i64, | ||||
5553 | Value, DAG.getConstant(i, Dl, MVT::i32)); | ||||
5554 | SDValue Ptr = DAG.getNode(ISD::ADD, Dl, PtrVT, Base, | ||||
5555 | DAG.getConstant(i * 8, Dl, PtrVT)); | ||||
5556 | Chain = DAG.getStore(Chain, Dl, Part, Ptr, StoreNode->getPointerInfo(), | ||||
5557 | StoreNode->getOriginalAlign()); | ||||
5558 | } | ||||
5559 | return Chain; | ||||
5560 | } | ||||
5561 | |||||
5562 | return SDValue(); | ||||
5563 | } | ||||
5564 | |||||
5565 | /// Lower atomic or volatile 128-bit stores to a single STP instruction. | ||||
5566 | SDValue AArch64TargetLowering::LowerStore128(SDValue Op, | ||||
5567 | SelectionDAG &DAG) const { | ||||
5568 | MemSDNode *StoreNode = cast<MemSDNode>(Op); | ||||
5569 | assert(StoreNode->getMemoryVT() == MVT::i128)(static_cast <bool> (StoreNode->getMemoryVT() == MVT ::i128) ? void (0) : __assert_fail ("StoreNode->getMemoryVT() == MVT::i128" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5569, __extension__ __PRETTY_FUNCTION__)); | ||||
5570 | assert(StoreNode->isVolatile() || StoreNode->isAtomic())(static_cast <bool> (StoreNode->isVolatile() || StoreNode ->isAtomic()) ? void (0) : __assert_fail ("StoreNode->isVolatile() || StoreNode->isAtomic()" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5570, __extension__ __PRETTY_FUNCTION__)); | ||||
5571 | assert(!StoreNode->isAtomic() ||(static_cast <bool> (!StoreNode->isAtomic() || StoreNode ->getMergedOrdering() == AtomicOrdering::Unordered || StoreNode ->getMergedOrdering() == AtomicOrdering::Monotonic) ? void (0) : __assert_fail ("!StoreNode->isAtomic() || StoreNode->getMergedOrdering() == AtomicOrdering::Unordered || StoreNode->getMergedOrdering() == AtomicOrdering::Monotonic" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5573, __extension__ __PRETTY_FUNCTION__)) | ||||
5572 | StoreNode->getMergedOrdering() == AtomicOrdering::Unordered ||(static_cast <bool> (!StoreNode->isAtomic() || StoreNode ->getMergedOrdering() == AtomicOrdering::Unordered || StoreNode ->getMergedOrdering() == AtomicOrdering::Monotonic) ? void (0) : __assert_fail ("!StoreNode->isAtomic() || StoreNode->getMergedOrdering() == AtomicOrdering::Unordered || StoreNode->getMergedOrdering() == AtomicOrdering::Monotonic" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5573, __extension__ __PRETTY_FUNCTION__)) | ||||
5573 | StoreNode->getMergedOrdering() == AtomicOrdering::Monotonic)(static_cast <bool> (!StoreNode->isAtomic() || StoreNode ->getMergedOrdering() == AtomicOrdering::Unordered || StoreNode ->getMergedOrdering() == AtomicOrdering::Monotonic) ? void (0) : __assert_fail ("!StoreNode->isAtomic() || StoreNode->getMergedOrdering() == AtomicOrdering::Unordered || StoreNode->getMergedOrdering() == AtomicOrdering::Monotonic" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5573, __extension__ __PRETTY_FUNCTION__)); | ||||
5574 | |||||
5575 | SDValue Value = StoreNode->getOpcode() == ISD::STORE | ||||
5576 | ? StoreNode->getOperand(1) | ||||
5577 | : StoreNode->getOperand(2); | ||||
5578 | SDLoc DL(Op); | ||||
5579 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, Value, | ||||
5580 | DAG.getConstant(0, DL, MVT::i64)); | ||||
5581 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, Value, | ||||
5582 | DAG.getConstant(1, DL, MVT::i64)); | ||||
5583 | SDValue Result = DAG.getMemIntrinsicNode( | ||||
5584 | AArch64ISD::STP, DL, DAG.getVTList(MVT::Other), | ||||
5585 | {StoreNode->getChain(), Lo, Hi, StoreNode->getBasePtr()}, | ||||
5586 | StoreNode->getMemoryVT(), StoreNode->getMemOperand()); | ||||
5587 | return Result; | ||||
5588 | } | ||||
5589 | |||||
5590 | SDValue AArch64TargetLowering::LowerLOAD(SDValue Op, | ||||
5591 | SelectionDAG &DAG) const { | ||||
5592 | SDLoc DL(Op); | ||||
5593 | LoadSDNode *LoadNode = cast<LoadSDNode>(Op); | ||||
5594 | assert(LoadNode && "Expected custom lowering of a load node")(static_cast <bool> (LoadNode && "Expected custom lowering of a load node" ) ? void (0) : __assert_fail ("LoadNode && \"Expected custom lowering of a load node\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5594, __extension__ __PRETTY_FUNCTION__)); | ||||
5595 | |||||
5596 | if (LoadNode->getMemoryVT() == MVT::i64x8) { | ||||
5597 | SmallVector<SDValue, 8> Ops; | ||||
5598 | SDValue Base = LoadNode->getBasePtr(); | ||||
5599 | SDValue Chain = LoadNode->getChain(); | ||||
5600 | EVT PtrVT = Base.getValueType(); | ||||
5601 | for (unsigned i = 0; i < 8; i++) { | ||||
5602 | SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, | ||||
5603 | DAG.getConstant(i * 8, DL, PtrVT)); | ||||
5604 | SDValue Part = DAG.getLoad(MVT::i64, DL, Chain, Ptr, | ||||
5605 | LoadNode->getPointerInfo(), | ||||
5606 | LoadNode->getOriginalAlign()); | ||||
5607 | Ops.push_back(Part); | ||||
5608 | Chain = SDValue(Part.getNode(), 1); | ||||
5609 | } | ||||
5610 | SDValue Loaded = DAG.getNode(AArch64ISD::LS64_BUILD, DL, MVT::i64x8, Ops); | ||||
5611 | return DAG.getMergeValues({Loaded, Chain}, DL); | ||||
5612 | } | ||||
5613 | |||||
5614 | // Custom lowering for extending v4i8 vector loads. | ||||
5615 | EVT VT = Op->getValueType(0); | ||||
5616 | assert((VT == MVT::v4i16 || VT == MVT::v4i32) && "Expected v4i16 or v4i32")(static_cast <bool> ((VT == MVT::v4i16 || VT == MVT::v4i32 ) && "Expected v4i16 or v4i32") ? void (0) : __assert_fail ("(VT == MVT::v4i16 || VT == MVT::v4i32) && \"Expected v4i16 or v4i32\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5616, __extension__ __PRETTY_FUNCTION__)); | ||||
5617 | |||||
5618 | if (LoadNode->getMemoryVT() != MVT::v4i8) | ||||
5619 | return SDValue(); | ||||
5620 | |||||
5621 | unsigned ExtType; | ||||
5622 | if (LoadNode->getExtensionType() == ISD::SEXTLOAD) | ||||
5623 | ExtType = ISD::SIGN_EXTEND; | ||||
5624 | else if (LoadNode->getExtensionType() == ISD::ZEXTLOAD || | ||||
5625 | LoadNode->getExtensionType() == ISD::EXTLOAD) | ||||
5626 | ExtType = ISD::ZERO_EXTEND; | ||||
5627 | else | ||||
5628 | return SDValue(); | ||||
5629 | |||||
5630 | SDValue Load = DAG.getLoad(MVT::f32, DL, LoadNode->getChain(), | ||||
5631 | LoadNode->getBasePtr(), MachinePointerInfo()); | ||||
5632 | SDValue Chain = Load.getValue(1); | ||||
5633 | SDValue Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f32, Load); | ||||
5634 | SDValue BC = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Vec); | ||||
5635 | SDValue Ext = DAG.getNode(ExtType, DL, MVT::v8i16, BC); | ||||
5636 | Ext = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Ext, | ||||
5637 | DAG.getConstant(0, DL, MVT::i64)); | ||||
5638 | if (VT == MVT::v4i32) | ||||
5639 | Ext = DAG.getNode(ExtType, DL, MVT::v4i32, Ext); | ||||
5640 | return DAG.getMergeValues({Ext, Chain}, DL); | ||||
5641 | } | ||||
5642 | |||||
5643 | // Generate SUBS and CSEL for integer abs. | ||||
5644 | SDValue AArch64TargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const { | ||||
5645 | MVT VT = Op.getSimpleValueType(); | ||||
5646 | |||||
5647 | if (VT.isVector()) | ||||
5648 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABS_MERGE_PASSTHRU); | ||||
5649 | |||||
5650 | SDLoc DL(Op); | ||||
5651 | SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), | ||||
5652 | Op.getOperand(0)); | ||||
5653 | // Generate SUBS & CSEL. | ||||
5654 | SDValue Cmp = | ||||
5655 | DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::i32), | ||||
5656 | Op.getOperand(0), DAG.getConstant(0, DL, VT)); | ||||
5657 | return DAG.getNode(AArch64ISD::CSEL, DL, VT, Op.getOperand(0), Neg, | ||||
5658 | DAG.getConstant(AArch64CC::PL, DL, MVT::i32), | ||||
5659 | Cmp.getValue(1)); | ||||
5660 | } | ||||
5661 | |||||
5662 | static SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) { | ||||
5663 | SDValue Chain = Op.getOperand(0); | ||||
5664 | SDValue Cond = Op.getOperand(1); | ||||
5665 | SDValue Dest = Op.getOperand(2); | ||||
5666 | |||||
5667 | AArch64CC::CondCode CC; | ||||
5668 | if (SDValue Cmp = emitConjunction(DAG, Cond, CC)) { | ||||
5669 | SDLoc dl(Op); | ||||
5670 | SDValue CCVal = DAG.getConstant(CC, dl, MVT::i32); | ||||
5671 | return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, | ||||
5672 | Cmp); | ||||
5673 | } | ||||
5674 | |||||
5675 | return SDValue(); | ||||
5676 | } | ||||
5677 | |||||
5678 | SDValue AArch64TargetLowering::LowerOperation(SDValue Op, | ||||
5679 | SelectionDAG &DAG) const { | ||||
5680 | LLVM_DEBUG(dbgs() << "Custom lowering: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Custom lowering: "; } } while (false); | ||||
5681 | LLVM_DEBUG(Op.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { Op.dump(); } } while (false); | ||||
5682 | |||||
5683 | switch (Op.getOpcode()) { | ||||
5684 | default: | ||||
5685 | llvm_unreachable("unimplemented operand")::llvm::llvm_unreachable_internal("unimplemented operand", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 5685); | ||||
5686 | return SDValue(); | ||||
5687 | case ISD::BITCAST: | ||||
5688 | return LowerBITCAST(Op, DAG); | ||||
5689 | case ISD::GlobalAddress: | ||||
5690 | return LowerGlobalAddress(Op, DAG); | ||||
5691 | case ISD::GlobalTLSAddress: | ||||
5692 | return LowerGlobalTLSAddress(Op, DAG); | ||||
5693 | case ISD::SETCC: | ||||
5694 | case ISD::STRICT_FSETCC: | ||||
5695 | case ISD::STRICT_FSETCCS: | ||||
5696 | return LowerSETCC(Op, DAG); | ||||
5697 | case ISD::SETCCCARRY: | ||||
5698 | return LowerSETCCCARRY(Op, DAG); | ||||
5699 | case ISD::BRCOND: | ||||
5700 | return LowerBRCOND(Op, DAG); | ||||
5701 | case ISD::BR_CC: | ||||
5702 | return LowerBR_CC(Op, DAG); | ||||
5703 | case ISD::SELECT: | ||||
5704 | return LowerSELECT(Op, DAG); | ||||
5705 | case ISD::SELECT_CC: | ||||
5706 | return LowerSELECT_CC(Op, DAG); | ||||
5707 | case ISD::JumpTable: | ||||
5708 | return LowerJumpTable(Op, DAG); | ||||
5709 | case ISD::BR_JT: | ||||
5710 | return LowerBR_JT(Op, DAG); | ||||
5711 | case ISD::ConstantPool: | ||||
5712 | return LowerConstantPool(Op, DAG); | ||||
5713 | case ISD::BlockAddress: | ||||
5714 | return LowerBlockAddress(Op, DAG); | ||||
5715 | case ISD::VASTART: | ||||
5716 | return LowerVASTART(Op, DAG); | ||||
5717 | case ISD::VACOPY: | ||||
5718 | return LowerVACOPY(Op, DAG); | ||||
5719 | case ISD::VAARG: | ||||
5720 | return LowerVAARG(Op, DAG); | ||||
5721 | case ISD::ADDCARRY: | ||||
5722 | return lowerADDSUBCARRY(Op, DAG, AArch64ISD::ADCS, false /*unsigned*/); | ||||
5723 | case ISD::SUBCARRY: | ||||
5724 | return lowerADDSUBCARRY(Op, DAG, AArch64ISD::SBCS, false /*unsigned*/); | ||||
5725 | case ISD::SADDO_CARRY: | ||||
5726 | return lowerADDSUBCARRY(Op, DAG, AArch64ISD::ADCS, true /*signed*/); | ||||
5727 | case ISD::SSUBO_CARRY: | ||||
5728 | return lowerADDSUBCARRY(Op, DAG, AArch64ISD::SBCS, true /*signed*/); | ||||
5729 | case ISD::SADDO: | ||||
5730 | case ISD::UADDO: | ||||
5731 | case ISD::SSUBO: | ||||
5732 | case ISD::USUBO: | ||||
5733 | case ISD::SMULO: | ||||
5734 | case ISD::UMULO: | ||||
5735 | return LowerXALUO(Op, DAG); | ||||
5736 | case ISD::FADD: | ||||
5737 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FADD_PRED); | ||||
5738 | case ISD::FSUB: | ||||
5739 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FSUB_PRED); | ||||
5740 | case ISD::FMUL: | ||||
5741 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMUL_PRED); | ||||
5742 | case ISD::FMA: | ||||
5743 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMA_PRED); | ||||
5744 | case ISD::FDIV: | ||||
5745 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FDIV_PRED); | ||||
5746 | case ISD::FNEG: | ||||
5747 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FNEG_MERGE_PASSTHRU); | ||||
5748 | case ISD::FCEIL: | ||||
5749 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FCEIL_MERGE_PASSTHRU); | ||||
5750 | case ISD::FFLOOR: | ||||
5751 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FFLOOR_MERGE_PASSTHRU); | ||||
5752 | case ISD::FNEARBYINT: | ||||
5753 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FNEARBYINT_MERGE_PASSTHRU); | ||||
5754 | case ISD::FRINT: | ||||
5755 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FRINT_MERGE_PASSTHRU); | ||||
5756 | case ISD::FROUND: | ||||
5757 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FROUND_MERGE_PASSTHRU); | ||||
5758 | case ISD::FROUNDEVEN: | ||||
5759 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU); | ||||
5760 | case ISD::FTRUNC: | ||||
5761 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FTRUNC_MERGE_PASSTHRU); | ||||
5762 | case ISD::FSQRT: | ||||
5763 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FSQRT_MERGE_PASSTHRU); | ||||
5764 | case ISD::FABS: | ||||
5765 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FABS_MERGE_PASSTHRU); | ||||
5766 | case ISD::FP_ROUND: | ||||
5767 | case ISD::STRICT_FP_ROUND: | ||||
5768 | return LowerFP_ROUND(Op, DAG); | ||||
5769 | case ISD::FP_EXTEND: | ||||
5770 | return LowerFP_EXTEND(Op, DAG); | ||||
5771 | case ISD::FRAMEADDR: | ||||
5772 | return LowerFRAMEADDR(Op, DAG); | ||||
5773 | case ISD::SPONENTRY: | ||||
5774 | return LowerSPONENTRY(Op, DAG); | ||||
5775 | case ISD::RETURNADDR: | ||||
5776 | return LowerRETURNADDR(Op, DAG); | ||||
5777 | case ISD::ADDROFRETURNADDR: | ||||
5778 | return LowerADDROFRETURNADDR(Op, DAG); | ||||
5779 | case ISD::CONCAT_VECTORS: | ||||
5780 | return LowerCONCAT_VECTORS(Op, DAG); | ||||
5781 | case ISD::INSERT_VECTOR_ELT: | ||||
5782 | return LowerINSERT_VECTOR_ELT(Op, DAG); | ||||
5783 | case ISD::EXTRACT_VECTOR_ELT: | ||||
5784 | return LowerEXTRACT_VECTOR_ELT(Op, DAG); | ||||
5785 | case ISD::BUILD_VECTOR: | ||||
5786 | return LowerBUILD_VECTOR(Op, DAG); | ||||
5787 | case ISD::VECTOR_SHUFFLE: | ||||
5788 | return LowerVECTOR_SHUFFLE(Op, DAG); | ||||
5789 | case ISD::SPLAT_VECTOR: | ||||
5790 | return LowerSPLAT_VECTOR(Op, DAG); | ||||
5791 | case ISD::EXTRACT_SUBVECTOR: | ||||
5792 | return LowerEXTRACT_SUBVECTOR(Op, DAG); | ||||
5793 | case ISD::INSERT_SUBVECTOR: | ||||
5794 | return LowerINSERT_SUBVECTOR(Op, DAG); | ||||
5795 | case ISD::SDIV: | ||||
5796 | case ISD::UDIV: | ||||
5797 | return LowerDIV(Op, DAG); | ||||
5798 | case ISD::SMIN: | ||||
5799 | case ISD::UMIN: | ||||
5800 | case ISD::SMAX: | ||||
5801 | case ISD::UMAX: | ||||
5802 | return LowerMinMax(Op, DAG); | ||||
5803 | case ISD::SRA: | ||||
5804 | case ISD::SRL: | ||||
5805 | case ISD::SHL: | ||||
5806 | return LowerVectorSRA_SRL_SHL(Op, DAG); | ||||
5807 | case ISD::SHL_PARTS: | ||||
5808 | case ISD::SRL_PARTS: | ||||
5809 | case ISD::SRA_PARTS: | ||||
5810 | return LowerShiftParts(Op, DAG); | ||||
5811 | case ISD::CTPOP: | ||||
5812 | case ISD::PARITY: | ||||
5813 | return LowerCTPOP_PARITY(Op, DAG); | ||||
5814 | case ISD::FCOPYSIGN: | ||||
5815 | return LowerFCOPYSIGN(Op, DAG); | ||||
5816 | case ISD::OR: | ||||
5817 | return LowerVectorOR(Op, DAG); | ||||
5818 | case ISD::XOR: | ||||
5819 | return LowerXOR(Op, DAG); | ||||
5820 | case ISD::PREFETCH: | ||||
5821 | return LowerPREFETCH(Op, DAG); | ||||
5822 | case ISD::SINT_TO_FP: | ||||
5823 | case ISD::UINT_TO_FP: | ||||
5824 | case ISD::STRICT_SINT_TO_FP: | ||||
5825 | case ISD::STRICT_UINT_TO_FP: | ||||
5826 | return LowerINT_TO_FP(Op, DAG); | ||||
5827 | case ISD::FP_TO_SINT: | ||||
5828 | case ISD::FP_TO_UINT: | ||||
5829 | case ISD::STRICT_FP_TO_SINT: | ||||
5830 | case ISD::STRICT_FP_TO_UINT: | ||||
5831 | return LowerFP_TO_INT(Op, DAG); | ||||
5832 | case ISD::FP_TO_SINT_SAT: | ||||
5833 | case ISD::FP_TO_UINT_SAT: | ||||
5834 | return LowerFP_TO_INT_SAT(Op, DAG); | ||||
5835 | case ISD::FSINCOS: | ||||
5836 | return LowerFSINCOS(Op, DAG); | ||||
5837 | case ISD::FLT_ROUNDS_: | ||||
5838 | return LowerFLT_ROUNDS_(Op, DAG); | ||||
5839 | case ISD::SET_ROUNDING: | ||||
5840 | return LowerSET_ROUNDING(Op, DAG); | ||||
5841 | case ISD::MUL: | ||||
5842 | return LowerMUL(Op, DAG); | ||||
5843 | case ISD::MULHS: | ||||
5844 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::MULHS_PRED); | ||||
5845 | case ISD::MULHU: | ||||
5846 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::MULHU_PRED); | ||||
5847 | case ISD::INTRINSIC_VOID: | ||||
5848 | case ISD::INTRINSIC_W_CHAIN: | ||||
5849 | return LowerINTRINSIC_W_CHAIN(Op, DAG); | ||||
5850 | case ISD::INTRINSIC_WO_CHAIN: | ||||
5851 | return LowerINTRINSIC_WO_CHAIN(Op, DAG); | ||||
5852 | case ISD::ATOMIC_STORE: | ||||
5853 | if (cast<MemSDNode>(Op)->getMemoryVT() == MVT::i128) { | ||||
5854 | assert(Subtarget->hasLSE2())(static_cast <bool> (Subtarget->hasLSE2()) ? void (0 ) : __assert_fail ("Subtarget->hasLSE2()", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 5854, __extension__ __PRETTY_FUNCTION__)); | ||||
5855 | return LowerStore128(Op, DAG); | ||||
5856 | } | ||||
5857 | return SDValue(); | ||||
5858 | case ISD::STORE: | ||||
5859 | return LowerSTORE(Op, DAG); | ||||
5860 | case ISD::MSTORE: | ||||
5861 | return LowerFixedLengthVectorMStoreToSVE(Op, DAG); | ||||
5862 | case ISD::MGATHER: | ||||
5863 | return LowerMGATHER(Op, DAG); | ||||
5864 | case ISD::MSCATTER: | ||||
5865 | return LowerMSCATTER(Op, DAG); | ||||
5866 | case ISD::VECREDUCE_SEQ_FADD: | ||||
5867 | return LowerVECREDUCE_SEQ_FADD(Op, DAG); | ||||
5868 | case ISD::VECREDUCE_ADD: | ||||
5869 | case ISD::VECREDUCE_AND: | ||||
5870 | case ISD::VECREDUCE_OR: | ||||
5871 | case ISD::VECREDUCE_XOR: | ||||
5872 | case ISD::VECREDUCE_SMAX: | ||||
5873 | case ISD::VECREDUCE_SMIN: | ||||
5874 | case ISD::VECREDUCE_UMAX: | ||||
5875 | case ISD::VECREDUCE_UMIN: | ||||
5876 | case ISD::VECREDUCE_FADD: | ||||
5877 | case ISD::VECREDUCE_FMAX: | ||||
5878 | case ISD::VECREDUCE_FMIN: | ||||
5879 | return LowerVECREDUCE(Op, DAG); | ||||
5880 | case ISD::ATOMIC_LOAD_SUB: | ||||
5881 | return LowerATOMIC_LOAD_SUB(Op, DAG); | ||||
5882 | case ISD::ATOMIC_LOAD_AND: | ||||
5883 | return LowerATOMIC_LOAD_AND(Op, DAG); | ||||
5884 | case ISD::DYNAMIC_STACKALLOC: | ||||
5885 | return LowerDYNAMIC_STACKALLOC(Op, DAG); | ||||
5886 | case ISD::VSCALE: | ||||
5887 | return LowerVSCALE(Op, DAG); | ||||
5888 | case ISD::ANY_EXTEND: | ||||
5889 | case ISD::SIGN_EXTEND: | ||||
5890 | case ISD::ZERO_EXTEND: | ||||
5891 | return LowerFixedLengthVectorIntExtendToSVE(Op, DAG); | ||||
5892 | case ISD::SIGN_EXTEND_INREG: { | ||||
5893 | // Only custom lower when ExtraVT has a legal byte based element type. | ||||
5894 | EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); | ||||
5895 | EVT ExtraEltVT = ExtraVT.getVectorElementType(); | ||||
5896 | if ((ExtraEltVT != MVT::i8) && (ExtraEltVT != MVT::i16) && | ||||
5897 | (ExtraEltVT != MVT::i32) && (ExtraEltVT != MVT::i64)) | ||||
5898 | return SDValue(); | ||||
5899 | |||||
5900 | return LowerToPredicatedOp(Op, DAG, | ||||
5901 | AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU); | ||||
5902 | } | ||||
5903 | case ISD::TRUNCATE: | ||||
5904 | return LowerTRUNCATE(Op, DAG); | ||||
5905 | case ISD::MLOAD: | ||||
5906 | return LowerMLOAD(Op, DAG); | ||||
5907 | case ISD::LOAD: | ||||
5908 | if (useSVEForFixedLengthVectorVT(Op.getValueType())) | ||||
5909 | return LowerFixedLengthVectorLoadToSVE(Op, DAG); | ||||
5910 | return LowerLOAD(Op, DAG); | ||||
5911 | case ISD::ADD: | ||||
5912 | case ISD::AND: | ||||
5913 | case ISD::SUB: | ||||
5914 | return LowerToScalableOp(Op, DAG); | ||||
5915 | case ISD::FMAXIMUM: | ||||
5916 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMAX_PRED); | ||||
5917 | case ISD::FMAXNUM: | ||||
5918 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMAXNM_PRED); | ||||
5919 | case ISD::FMINIMUM: | ||||
5920 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMIN_PRED); | ||||
5921 | case ISD::FMINNUM: | ||||
5922 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMINNM_PRED); | ||||
5923 | case ISD::VSELECT: | ||||
5924 | return LowerFixedLengthVectorSelectToSVE(Op, DAG); | ||||
5925 | case ISD::ABS: | ||||
5926 | return LowerABS(Op, DAG); | ||||
5927 | case ISD::ABDS: | ||||
5928 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDS_PRED); | ||||
5929 | case ISD::ABDU: | ||||
5930 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDU_PRED); | ||||
5931 | case ISD::BITREVERSE: | ||||
5932 | return LowerBitreverse(Op, DAG); | ||||
5933 | case ISD::BSWAP: | ||||
5934 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::BSWAP_MERGE_PASSTHRU); | ||||
5935 | case ISD::CTLZ: | ||||
5936 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::CTLZ_MERGE_PASSTHRU); | ||||
5937 | case ISD::CTTZ: | ||||
5938 | return LowerCTTZ(Op, DAG); | ||||
5939 | case ISD::VECTOR_SPLICE: | ||||
5940 | return LowerVECTOR_SPLICE(Op, DAG); | ||||
5941 | case ISD::STRICT_LROUND: | ||||
5942 | case ISD::STRICT_LLROUND: | ||||
5943 | case ISD::STRICT_LRINT: | ||||
5944 | case ISD::STRICT_LLRINT: { | ||||
5945 | assert(Op.getOperand(1).getValueType() == MVT::f16 &&(static_cast <bool> (Op.getOperand(1).getValueType() == MVT::f16 && "Expected custom lowering of rounding operations only for f16" ) ? void (0) : __assert_fail ("Op.getOperand(1).getValueType() == MVT::f16 && \"Expected custom lowering of rounding operations only for f16\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5946, __extension__ __PRETTY_FUNCTION__)) | ||||
5946 | "Expected custom lowering of rounding operations only for f16")(static_cast <bool> (Op.getOperand(1).getValueType() == MVT::f16 && "Expected custom lowering of rounding operations only for f16" ) ? void (0) : __assert_fail ("Op.getOperand(1).getValueType() == MVT::f16 && \"Expected custom lowering of rounding operations only for f16\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5946, __extension__ __PRETTY_FUNCTION__)); | ||||
5947 | SDLoc DL(Op); | ||||
5948 | SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {MVT::f32, MVT::Other}, | ||||
5949 | {Op.getOperand(0), Op.getOperand(1)}); | ||||
5950 | return DAG.getNode(Op.getOpcode(), DL, {Op.getValueType(), MVT::Other}, | ||||
5951 | {Ext.getValue(1), Ext.getValue(0)}); | ||||
5952 | } | ||||
5953 | case ISD::WRITE_REGISTER: { | ||||
5954 | assert(Op.getOperand(2).getValueType() == MVT::i128 &&(static_cast <bool> (Op.getOperand(2).getValueType() == MVT::i128 && "WRITE_REGISTER custom lowering is only for 128-bit sysregs" ) ? void (0) : __assert_fail ("Op.getOperand(2).getValueType() == MVT::i128 && \"WRITE_REGISTER custom lowering is only for 128-bit sysregs\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5955, __extension__ __PRETTY_FUNCTION__)) | ||||
5955 | "WRITE_REGISTER custom lowering is only for 128-bit sysregs")(static_cast <bool> (Op.getOperand(2).getValueType() == MVT::i128 && "WRITE_REGISTER custom lowering is only for 128-bit sysregs" ) ? void (0) : __assert_fail ("Op.getOperand(2).getValueType() == MVT::i128 && \"WRITE_REGISTER custom lowering is only for 128-bit sysregs\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 5955, __extension__ __PRETTY_FUNCTION__)); | ||||
5956 | SDLoc DL(Op); | ||||
5957 | |||||
5958 | SDValue Chain = Op.getOperand(0); | ||||
5959 | SDValue SysRegName = Op.getOperand(1); | ||||
5960 | SDValue Pair = Op.getOperand(2); | ||||
5961 | |||||
5962 | SDValue PairLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, Pair, | ||||
5963 | DAG.getConstant(0, DL, MVT::i32)); | ||||
5964 | SDValue PairHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, Pair, | ||||
5965 | DAG.getConstant(1, DL, MVT::i32)); | ||||
5966 | |||||
5967 | // chain = MSRR(chain, sysregname, lo, hi) | ||||
5968 | SDValue Result = DAG.getNode(AArch64ISD::MSRR, DL, MVT::Other, Chain, | ||||
5969 | SysRegName, PairLo, PairHi); | ||||
5970 | |||||
5971 | return Result; | ||||
5972 | } | ||||
5973 | } | ||||
5974 | } | ||||
5975 | |||||
5976 | bool AArch64TargetLowering::mergeStoresAfterLegalization(EVT VT) const { | ||||
5977 | return !Subtarget->useSVEForFixedLengthVectors(); | ||||
5978 | } | ||||
5979 | |||||
5980 | bool AArch64TargetLowering::useSVEForFixedLengthVectorVT( | ||||
5981 | EVT VT, bool OverrideNEON) const { | ||||
5982 | if (!VT.isFixedLengthVector() || !VT.isSimple()) | ||||
5983 | return false; | ||||
5984 | |||||
5985 | // Don't use SVE for vectors we cannot scalarize if required. | ||||
5986 | switch (VT.getVectorElementType().getSimpleVT().SimpleTy) { | ||||
5987 | // Fixed length predicates should be promoted to i8. | ||||
5988 | // NOTE: This is consistent with how NEON (and thus 64/128bit vectors) work. | ||||
5989 | case MVT::i1: | ||||
5990 | default: | ||||
5991 | return false; | ||||
5992 | case MVT::i8: | ||||
5993 | case MVT::i16: | ||||
5994 | case MVT::i32: | ||||
5995 | case MVT::i64: | ||||
5996 | case MVT::f16: | ||||
5997 | case MVT::f32: | ||||
5998 | case MVT::f64: | ||||
5999 | break; | ||||
6000 | } | ||||
6001 | |||||
6002 | // All SVE implementations support NEON sized vectors. | ||||
6003 | if (OverrideNEON && (VT.is128BitVector() || VT.is64BitVector())) | ||||
6004 | return Subtarget->hasSVE(); | ||||
6005 | |||||
6006 | // Ensure NEON MVTs only belong to a single register class. | ||||
6007 | if (VT.getFixedSizeInBits() <= 128) | ||||
6008 | return false; | ||||
6009 | |||||
6010 | // Ensure wider than NEON code generation is enabled. | ||||
6011 | if (!Subtarget->useSVEForFixedLengthVectors()) | ||||
6012 | return false; | ||||
6013 | |||||
6014 | // Don't use SVE for types that don't fit. | ||||
6015 | if (VT.getFixedSizeInBits() > Subtarget->getMinSVEVectorSizeInBits()) | ||||
6016 | return false; | ||||
6017 | |||||
6018 | // TODO: Perhaps an artificial restriction, but worth having whilst getting | ||||
6019 | // the base fixed length SVE support in place. | ||||
6020 | if (!VT.isPow2VectorType()) | ||||
6021 | return false; | ||||
6022 | |||||
6023 | return true; | ||||
6024 | } | ||||
6025 | |||||
6026 | //===----------------------------------------------------------------------===// | ||||
6027 | // Calling Convention Implementation | ||||
6028 | //===----------------------------------------------------------------------===// | ||||
6029 | |||||
6030 | static unsigned getIntrinsicID(const SDNode *N) { | ||||
6031 | unsigned Opcode = N->getOpcode(); | ||||
6032 | switch (Opcode) { | ||||
6033 | default: | ||||
6034 | return Intrinsic::not_intrinsic; | ||||
6035 | case ISD::INTRINSIC_WO_CHAIN: { | ||||
6036 | unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); | ||||
6037 | if (IID < Intrinsic::num_intrinsics) | ||||
6038 | return IID; | ||||
6039 | return Intrinsic::not_intrinsic; | ||||
6040 | } | ||||
6041 | } | ||||
6042 | } | ||||
6043 | |||||
6044 | bool AArch64TargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0, | ||||
6045 | SDValue N1) const { | ||||
6046 | if (!N0.hasOneUse()) | ||||
6047 | return false; | ||||
6048 | |||||
6049 | unsigned IID = getIntrinsicID(N1.getNode()); | ||||
6050 | // Avoid reassociating expressions that can be lowered to smlal/umlal. | ||||
6051 | if (IID == Intrinsic::aarch64_neon_umull || | ||||
6052 | N1.getOpcode() == AArch64ISD::UMULL || | ||||
6053 | IID == Intrinsic::aarch64_neon_smull || | ||||
6054 | N1.getOpcode() == AArch64ISD::SMULL) | ||||
6055 | return N0.getOpcode() != ISD::ADD; | ||||
6056 | |||||
6057 | return true; | ||||
6058 | } | ||||
6059 | |||||
6060 | /// Selects the correct CCAssignFn for a given CallingConvention value. | ||||
6061 | CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC, | ||||
6062 | bool IsVarArg) const { | ||||
6063 | switch (CC) { | ||||
6064 | default: | ||||
6065 | report_fatal_error("Unsupported calling convention."); | ||||
6066 | case CallingConv::WebKit_JS: | ||||
6067 | return CC_AArch64_WebKit_JS; | ||||
6068 | case CallingConv::GHC: | ||||
6069 | return CC_AArch64_GHC; | ||||
6070 | case CallingConv::C: | ||||
6071 | case CallingConv::Fast: | ||||
6072 | case CallingConv::PreserveMost: | ||||
6073 | case CallingConv::CXX_FAST_TLS: | ||||
6074 | case CallingConv::Swift: | ||||
6075 | case CallingConv::SwiftTail: | ||||
6076 | case CallingConv::Tail: | ||||
6077 | if (Subtarget->isTargetWindows() && IsVarArg) { | ||||
6078 | if (Subtarget->isWindowsArm64EC()) | ||||
6079 | return CC_AArch64_Arm64EC_VarArg; | ||||
6080 | return CC_AArch64_Win64_VarArg; | ||||
6081 | } | ||||
6082 | if (!Subtarget->isTargetDarwin()) | ||||
6083 | return CC_AArch64_AAPCS; | ||||
6084 | if (!IsVarArg) | ||||
6085 | return CC_AArch64_DarwinPCS; | ||||
6086 | return Subtarget->isTargetILP32() ? CC_AArch64_DarwinPCS_ILP32_VarArg | ||||
6087 | : CC_AArch64_DarwinPCS_VarArg; | ||||
6088 | case CallingConv::Win64: | ||||
6089 | if (IsVarArg) { | ||||
6090 | if (Subtarget->isWindowsArm64EC()) | ||||
6091 | return CC_AArch64_Arm64EC_VarArg; | ||||
6092 | return CC_AArch64_Win64_VarArg; | ||||
6093 | } | ||||
6094 | return CC_AArch64_AAPCS; | ||||
6095 | case CallingConv::CFGuard_Check: | ||||
6096 | return CC_AArch64_Win64_CFGuard_Check; | ||||
6097 | case CallingConv::AArch64_VectorCall: | ||||
6098 | case CallingConv::AArch64_SVE_VectorCall: | ||||
6099 | case CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0: | ||||
6100 | case CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2: | ||||
6101 | return CC_AArch64_AAPCS; | ||||
6102 | } | ||||
6103 | } | ||||
6104 | |||||
6105 | CCAssignFn * | ||||
6106 | AArch64TargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const { | ||||
6107 | return CC == CallingConv::WebKit_JS ? RetCC_AArch64_WebKit_JS | ||||
6108 | : RetCC_AArch64_AAPCS; | ||||
6109 | } | ||||
6110 | |||||
6111 | |||||
6112 | unsigned | ||||
6113 | AArch64TargetLowering::allocateLazySaveBuffer(SDValue &Chain, const SDLoc &DL, | ||||
6114 | SelectionDAG &DAG) const { | ||||
6115 | MachineFunction &MF = DAG.getMachineFunction(); | ||||
6116 | MachineFrameInfo &MFI = MF.getFrameInfo(); | ||||
6117 | |||||
6118 | // Allocate a lazy-save buffer object of size SVL.B * SVL.B (worst-case) | ||||
6119 | SDValue N = DAG.getNode(AArch64ISD::RDSVL, DL, MVT::i64, | ||||
6120 | DAG.getConstant(1, DL, MVT::i32)); | ||||
6121 | SDValue NN = DAG.getNode(ISD::MUL, DL, MVT::i64, N, N); | ||||
6122 | SDValue Ops[] = {Chain, NN, DAG.getConstant(1, DL, MVT::i64)}; | ||||
6123 | SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other); | ||||
6124 | SDValue Buffer = DAG.getNode(ISD::DYNAMIC_STACKALLOC, DL, VTs, Ops); | ||||
6125 | Chain = Buffer.getValue(1); | ||||
6126 | MFI.CreateVariableSizedObject(Align(1), nullptr); | ||||
6127 | |||||
6128 | // Allocate an additional TPIDR2 object on the stack (16 bytes) | ||||
6129 | unsigned TPIDR2Obj = MFI.CreateStackObject(16, Align(16), false); | ||||
6130 | |||||
6131 | // Store the buffer pointer to the TPIDR2 stack object. | ||||
6132 | MachinePointerInfo MPI = MachinePointerInfo::getStack(MF, TPIDR2Obj); | ||||
6133 | SDValue Ptr = DAG.getFrameIndex( | ||||
6134 | TPIDR2Obj, | ||||
6135 | DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout())); | ||||
6136 | Chain = DAG.getStore(Chain, DL, Buffer, Ptr, MPI); | ||||
6137 | |||||
6138 | return TPIDR2Obj; | ||||
6139 | } | ||||
6140 | |||||
6141 | SDValue AArch64TargetLowering::LowerFormalArguments( | ||||
6142 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, | ||||
6143 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, | ||||
6144 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { | ||||
6145 | MachineFunction &MF = DAG.getMachineFunction(); | ||||
6146 | const Function &F = MF.getFunction(); | ||||
6147 | MachineFrameInfo &MFI = MF.getFrameInfo(); | ||||
6148 | bool IsWin64 = Subtarget->isCallingConvWin64(F.getCallingConv()); | ||||
6149 | AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); | ||||
6150 | |||||
6151 | SmallVector<ISD::OutputArg, 4> Outs; | ||||
6152 | GetReturnInfo(CallConv, F.getReturnType(), F.getAttributes(), Outs, | ||||
6153 | DAG.getTargetLoweringInfo(), MF.getDataLayout()); | ||||
6154 | if (any_of(Outs, [](ISD::OutputArg &Out){ return Out.VT.isScalableVector(); })) | ||||
6155 | FuncInfo->setIsSVECC(true); | ||||
6156 | |||||
6157 | // Assign locations to all of the incoming arguments. | ||||
6158 | SmallVector<CCValAssign, 16> ArgLocs; | ||||
6159 | DenseMap<unsigned, SDValue> CopiedRegs; | ||||
6160 | CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); | ||||
6161 | |||||
6162 | // At this point, Ins[].VT may already be promoted to i32. To correctly | ||||
6163 | // handle passing i8 as i8 instead of i32 on stack, we pass in both i32 and | ||||
6164 | // i8 to CC_AArch64_AAPCS with i32 being ValVT and i8 being LocVT. | ||||
6165 | // Since AnalyzeFormalArguments uses Ins[].VT for both ValVT and LocVT, here | ||||
6166 | // we use a special version of AnalyzeFormalArguments to pass in ValVT and | ||||
6167 | // LocVT. | ||||
6168 | unsigned NumArgs = Ins.size(); | ||||
6169 | Function::const_arg_iterator CurOrigArg = F.arg_begin(); | ||||
6170 | unsigned CurArgIdx = 0; | ||||
6171 | for (unsigned i = 0; i != NumArgs; ++i) { | ||||
6172 | MVT ValVT = Ins[i].VT; | ||||
6173 | if (Ins[i].isOrigArg()) { | ||||
6174 | std::advance(CurOrigArg, Ins[i].getOrigArgIndex() - CurArgIdx); | ||||
6175 | CurArgIdx = Ins[i].getOrigArgIndex(); | ||||
6176 | |||||
6177 | // Get type of the original argument. | ||||
6178 | EVT ActualVT = getValueType(DAG.getDataLayout(), CurOrigArg->getType(), | ||||
6179 | /*AllowUnknown*/ true); | ||||
6180 | MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other; | ||||
6181 | // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16. | ||||
6182 | if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8) | ||||
6183 | ValVT = MVT::i8; | ||||
6184 | else if (ActualMVT == MVT::i16) | ||||
6185 | ValVT = MVT::i16; | ||||
6186 | } | ||||
6187 | bool UseVarArgCC = false; | ||||
6188 | if (IsWin64) | ||||
6189 | UseVarArgCC = isVarArg; | ||||
6190 | CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, UseVarArgCC); | ||||
6191 | bool Res = | ||||
6192 | AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo); | ||||
6193 | assert(!Res && "Call operand has unhandled type")(static_cast <bool> (!Res && "Call operand has unhandled type" ) ? void (0) : __assert_fail ("!Res && \"Call operand has unhandled type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6193, __extension__ __PRETTY_FUNCTION__)); | ||||
6194 | (void)Res; | ||||
6195 | } | ||||
6196 | |||||
6197 | SMEAttrs Attrs(MF.getFunction()); | ||||
6198 | bool IsLocallyStreaming = | ||||
6199 | !Attrs.hasStreamingInterface() && Attrs.hasStreamingBody(); | ||||
6200 | assert(Chain.getOpcode() == ISD::EntryToken && "Unexpected Chain value")(static_cast <bool> (Chain.getOpcode() == ISD::EntryToken && "Unexpected Chain value") ? void (0) : __assert_fail ("Chain.getOpcode() == ISD::EntryToken && \"Unexpected Chain value\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6200, __extension__ __PRETTY_FUNCTION__)); | ||||
6201 | SDValue Glue = Chain.getValue(1); | ||||
6202 | |||||
6203 | SmallVector<SDValue, 16> ArgValues; | ||||
6204 | unsigned ExtraArgLocs = 0; | ||||
6205 | for (unsigned i = 0, e = Ins.size(); i != e; ++i) { | ||||
6206 | CCValAssign &VA = ArgLocs[i - ExtraArgLocs]; | ||||
6207 | |||||
6208 | if (Ins[i].Flags.isByVal()) { | ||||
6209 | // Byval is used for HFAs in the PCS, but the system should work in a | ||||
6210 | // non-compliant manner for larger structs. | ||||
6211 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | ||||
6212 | int Size = Ins[i].Flags.getByValSize(); | ||||
6213 | unsigned NumRegs = (Size + 7) / 8; | ||||
6214 | |||||
6215 | // FIXME: This works on big-endian for composite byvals, which are the common | ||||
6216 | // case. It should also work for fundamental types too. | ||||
6217 | unsigned FrameIdx = | ||||
6218 | MFI.CreateFixedObject(8 * NumRegs, VA.getLocMemOffset(), false); | ||||
6219 | SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrVT); | ||||
6220 | InVals.push_back(FrameIdxN); | ||||
6221 | |||||
6222 | continue; | ||||
6223 | } | ||||
6224 | |||||
6225 | if (Ins[i].Flags.isSwiftAsync()) | ||||
6226 | MF.getInfo<AArch64FunctionInfo>()->setHasSwiftAsyncContext(true); | ||||
6227 | |||||
6228 | SDValue ArgValue; | ||||
6229 | if (VA.isRegLoc()) { | ||||
6230 | // Arguments stored in registers. | ||||
6231 | EVT RegVT = VA.getLocVT(); | ||||
6232 | const TargetRegisterClass *RC; | ||||
6233 | |||||
6234 | if (RegVT == MVT::i32) | ||||
6235 | RC = &AArch64::GPR32RegClass; | ||||
6236 | else if (RegVT == MVT::i64) | ||||
6237 | RC = &AArch64::GPR64RegClass; | ||||
6238 | else if (RegVT == MVT::f16 || RegVT == MVT::bf16) | ||||
6239 | RC = &AArch64::FPR16RegClass; | ||||
6240 | else if (RegVT == MVT::f32) | ||||
6241 | RC = &AArch64::FPR32RegClass; | ||||
6242 | else if (RegVT == MVT::f64 || RegVT.is64BitVector()) | ||||
6243 | RC = &AArch64::FPR64RegClass; | ||||
6244 | else if (RegVT == MVT::f128 || RegVT.is128BitVector()) | ||||
6245 | RC = &AArch64::FPR128RegClass; | ||||
6246 | else if (RegVT.isScalableVector() && | ||||
6247 | RegVT.getVectorElementType() == MVT::i1) { | ||||
6248 | FuncInfo->setIsSVECC(true); | ||||
6249 | RC = &AArch64::PPRRegClass; | ||||
6250 | } else if (RegVT.isScalableVector()) { | ||||
6251 | FuncInfo->setIsSVECC(true); | ||||
6252 | RC = &AArch64::ZPRRegClass; | ||||
6253 | } else | ||||
6254 | llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering")::llvm::llvm_unreachable_internal("RegVT not supported by FORMAL_ARGUMENTS Lowering" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6254); | ||||
6255 | |||||
6256 | // Transform the arguments in physical registers into virtual ones. | ||||
6257 | Register Reg = MF.addLiveIn(VA.getLocReg(), RC); | ||||
6258 | |||||
6259 | if (IsLocallyStreaming) { | ||||
6260 | // LocallyStreamingFunctions must insert the SMSTART in the correct | ||||
6261 | // position, so we use Glue to ensure no instructions can be scheduled | ||||
6262 | // between the chain of: | ||||
6263 | // t0: ch,glue = EntryNode | ||||
6264 | // t1: res,ch,glue = CopyFromReg | ||||
6265 | // ... | ||||
6266 | // tn: res,ch,glue = CopyFromReg t(n-1), .. | ||||
6267 | // t(n+1): ch, glue = SMSTART t0:0, ...., tn:2 | ||||
6268 | // ^^^^^^ | ||||
6269 | // This will be the new Chain/Root node. | ||||
6270 | ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT, Glue); | ||||
6271 | Glue = ArgValue.getValue(2); | ||||
6272 | } else | ||||
6273 | ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT); | ||||
6274 | |||||
6275 | // If this is an 8, 16 or 32-bit value, it is really passed promoted | ||||
6276 | // to 64 bits. Insert an assert[sz]ext to capture this, then | ||||
6277 | // truncate to the right size. | ||||
6278 | switch (VA.getLocInfo()) { | ||||
6279 | default: | ||||
6280 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 6280); | ||||
6281 | case CCValAssign::Full: | ||||
6282 | break; | ||||
6283 | case CCValAssign::Indirect: | ||||
6284 | assert((VA.getValVT().isScalableVector() ||(static_cast <bool> ((VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && "Indirect arguments should be scalable on most subtargets" ) ? void (0) : __assert_fail ("(VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && \"Indirect arguments should be scalable on most subtargets\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6286, __extension__ __PRETTY_FUNCTION__)) | ||||
6285 | Subtarget->isWindowsArm64EC()) &&(static_cast <bool> ((VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && "Indirect arguments should be scalable on most subtargets" ) ? void (0) : __assert_fail ("(VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && \"Indirect arguments should be scalable on most subtargets\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6286, __extension__ __PRETTY_FUNCTION__)) | ||||
6286 | "Indirect arguments should be scalable on most subtargets")(static_cast <bool> ((VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && "Indirect arguments should be scalable on most subtargets" ) ? void (0) : __assert_fail ("(VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && \"Indirect arguments should be scalable on most subtargets\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6286, __extension__ __PRETTY_FUNCTION__)); | ||||
6287 | break; | ||||
6288 | case CCValAssign::BCvt: | ||||
6289 | ArgValue = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), ArgValue); | ||||
6290 | break; | ||||
6291 | case CCValAssign::AExt: | ||||
6292 | case CCValAssign::SExt: | ||||
6293 | case CCValAssign::ZExt: | ||||
6294 | break; | ||||
6295 | case CCValAssign::AExtUpper: | ||||
6296 | ArgValue = DAG.getNode(ISD::SRL, DL, RegVT, ArgValue, | ||||
6297 | DAG.getConstant(32, DL, RegVT)); | ||||
6298 | ArgValue = DAG.getZExtOrTrunc(ArgValue, DL, VA.getValVT()); | ||||
6299 | break; | ||||
6300 | } | ||||
6301 | } else { // VA.isRegLoc() | ||||
6302 | assert(VA.isMemLoc() && "CCValAssign is neither reg nor mem")(static_cast <bool> (VA.isMemLoc() && "CCValAssign is neither reg nor mem" ) ? void (0) : __assert_fail ("VA.isMemLoc() && \"CCValAssign is neither reg nor mem\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6302, __extension__ __PRETTY_FUNCTION__)); | ||||
6303 | unsigned ArgOffset = VA.getLocMemOffset(); | ||||
6304 | unsigned ArgSize = (VA.getLocInfo() == CCValAssign::Indirect | ||||
6305 | ? VA.getLocVT().getSizeInBits() | ||||
6306 | : VA.getValVT().getSizeInBits()) / 8; | ||||
6307 | |||||
6308 | uint32_t BEAlign = 0; | ||||
6309 | if (!Subtarget->isLittleEndian() && ArgSize < 8 && | ||||
6310 | !Ins[i].Flags.isInConsecutiveRegs()) | ||||
6311 | BEAlign = 8 - ArgSize; | ||||
6312 | |||||
6313 | SDValue FIN; | ||||
6314 | MachinePointerInfo PtrInfo; | ||||
6315 | if (isVarArg && Subtarget->isWindowsArm64EC()) { | ||||
6316 | // In the ARM64EC varargs convention, fixed arguments on the stack are | ||||
6317 | // accessed relative to x4, not sp. | ||||
6318 | unsigned ObjOffset = ArgOffset + BEAlign; | ||||
6319 | Register VReg = MF.addLiveIn(AArch64::X4, &AArch64::GPR64RegClass); | ||||
6320 | SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); | ||||
6321 | FIN = DAG.getNode(ISD::ADD, DL, MVT::i64, Val, | ||||
6322 | DAG.getConstant(ObjOffset, DL, MVT::i64)); | ||||
6323 | PtrInfo = MachinePointerInfo::getUnknownStack(MF); | ||||
6324 | } else { | ||||
6325 | int FI = MFI.CreateFixedObject(ArgSize, ArgOffset + BEAlign, true); | ||||
6326 | |||||
6327 | // Create load nodes to retrieve arguments from the stack. | ||||
6328 | FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); | ||||
6329 | PtrInfo = MachinePointerInfo::getFixedStack(MF, FI); | ||||
6330 | } | ||||
6331 | |||||
6332 | // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT) | ||||
6333 | ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; | ||||
6334 | MVT MemVT = VA.getValVT(); | ||||
6335 | |||||
6336 | switch (VA.getLocInfo()) { | ||||
6337 | default: | ||||
6338 | break; | ||||
6339 | case CCValAssign::Trunc: | ||||
6340 | case CCValAssign::BCvt: | ||||
6341 | MemVT = VA.getLocVT(); | ||||
6342 | break; | ||||
6343 | case CCValAssign::Indirect: | ||||
6344 | assert((VA.getValVT().isScalableVector() ||(static_cast <bool> ((VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && "Indirect arguments should be scalable on most subtargets" ) ? void (0) : __assert_fail ("(VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && \"Indirect arguments should be scalable on most subtargets\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6346, __extension__ __PRETTY_FUNCTION__)) | ||||
6345 | Subtarget->isWindowsArm64EC()) &&(static_cast <bool> ((VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && "Indirect arguments should be scalable on most subtargets" ) ? void (0) : __assert_fail ("(VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && \"Indirect arguments should be scalable on most subtargets\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6346, __extension__ __PRETTY_FUNCTION__)) | ||||
6346 | "Indirect arguments should be scalable on most subtargets")(static_cast <bool> ((VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && "Indirect arguments should be scalable on most subtargets" ) ? void (0) : __assert_fail ("(VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && \"Indirect arguments should be scalable on most subtargets\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6346, __extension__ __PRETTY_FUNCTION__)); | ||||
6347 | MemVT = VA.getLocVT(); | ||||
6348 | break; | ||||
6349 | case CCValAssign::SExt: | ||||
6350 | ExtType = ISD::SEXTLOAD; | ||||
6351 | break; | ||||
6352 | case CCValAssign::ZExt: | ||||
6353 | ExtType = ISD::ZEXTLOAD; | ||||
6354 | break; | ||||
6355 | case CCValAssign::AExt: | ||||
6356 | ExtType = ISD::EXTLOAD; | ||||
6357 | break; | ||||
6358 | } | ||||
6359 | |||||
6360 | ArgValue = DAG.getExtLoad(ExtType, DL, VA.getLocVT(), Chain, FIN, PtrInfo, | ||||
6361 | MemVT); | ||||
6362 | } | ||||
6363 | |||||
6364 | if (VA.getLocInfo() == CCValAssign::Indirect) { | ||||
6365 | assert((static_cast <bool> ((VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && "Indirect arguments should be scalable on most subtargets" ) ? void (0) : __assert_fail ("(VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && \"Indirect arguments should be scalable on most subtargets\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6367, __extension__ __PRETTY_FUNCTION__)) | ||||
6366 | (VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) &&(static_cast <bool> ((VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && "Indirect arguments should be scalable on most subtargets" ) ? void (0) : __assert_fail ("(VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && \"Indirect arguments should be scalable on most subtargets\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6367, __extension__ __PRETTY_FUNCTION__)) | ||||
6367 | "Indirect arguments should be scalable on most subtargets")(static_cast <bool> ((VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && "Indirect arguments should be scalable on most subtargets" ) ? void (0) : __assert_fail ("(VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && \"Indirect arguments should be scalable on most subtargets\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6367, __extension__ __PRETTY_FUNCTION__)); | ||||
6368 | |||||
6369 | uint64_t PartSize = VA.getValVT().getStoreSize().getKnownMinSize(); | ||||
6370 | unsigned NumParts = 1; | ||||
6371 | if (Ins[i].Flags.isInConsecutiveRegs()) { | ||||
6372 | assert(!Ins[i].Flags.isInConsecutiveRegsLast())(static_cast <bool> (!Ins[i].Flags.isInConsecutiveRegsLast ()) ? void (0) : __assert_fail ("!Ins[i].Flags.isInConsecutiveRegsLast()" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6372, __extension__ __PRETTY_FUNCTION__)); | ||||
6373 | while (!Ins[i + NumParts - 1].Flags.isInConsecutiveRegsLast()) | ||||
6374 | ++NumParts; | ||||
6375 | } | ||||
6376 | |||||
6377 | MVT PartLoad = VA.getValVT(); | ||||
6378 | SDValue Ptr = ArgValue; | ||||
6379 | |||||
6380 | // Ensure we generate all loads for each tuple part, whilst updating the | ||||
6381 | // pointer after each load correctly using vscale. | ||||
6382 | while (NumParts > 0) { | ||||
6383 | ArgValue = DAG.getLoad(PartLoad, DL, Chain, Ptr, MachinePointerInfo()); | ||||
6384 | InVals.push_back(ArgValue); | ||||
6385 | NumParts--; | ||||
6386 | if (NumParts > 0) { | ||||
6387 | SDValue BytesIncrement; | ||||
6388 | if (PartLoad.isScalableVector()) { | ||||
6389 | BytesIncrement = DAG.getVScale( | ||||
6390 | DL, Ptr.getValueType(), | ||||
6391 | APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize)); | ||||
6392 | } else { | ||||
6393 | BytesIncrement = DAG.getConstant( | ||||
6394 | APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize), DL, | ||||
6395 | Ptr.getValueType()); | ||||
6396 | } | ||||
6397 | SDNodeFlags Flags; | ||||
6398 | Flags.setNoUnsignedWrap(true); | ||||
6399 | Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, | ||||
6400 | BytesIncrement, Flags); | ||||
6401 | ExtraArgLocs++; | ||||
6402 | i++; | ||||
6403 | } | ||||
6404 | } | ||||
6405 | } else { | ||||
6406 | if (Subtarget->isTargetILP32() && Ins[i].Flags.isPointer()) | ||||
6407 | ArgValue = DAG.getNode(ISD::AssertZext, DL, ArgValue.getValueType(), | ||||
6408 | ArgValue, DAG.getValueType(MVT::i32)); | ||||
6409 | |||||
6410 | // i1 arguments are zero-extended to i8 by the caller. Emit a | ||||
6411 | // hint to reflect this. | ||||
6412 | if (Ins[i].isOrigArg()) { | ||||
6413 | Argument *OrigArg = F.getArg(Ins[i].getOrigArgIndex()); | ||||
6414 | if (OrigArg->getType()->isIntegerTy(1)) { | ||||
6415 | if (!Ins[i].Flags.isZExt()) { | ||||
6416 | ArgValue = DAG.getNode(AArch64ISD::ASSERT_ZEXT_BOOL, DL, | ||||
6417 | ArgValue.getValueType(), ArgValue); | ||||
6418 | } | ||||
6419 | } | ||||
6420 | } | ||||
6421 | |||||
6422 | InVals.push_back(ArgValue); | ||||
6423 | } | ||||
6424 | } | ||||
6425 | assert((ArgLocs.size() + ExtraArgLocs) == Ins.size())(static_cast <bool> ((ArgLocs.size() + ExtraArgLocs) == Ins.size()) ? void (0) : __assert_fail ("(ArgLocs.size() + ExtraArgLocs) == Ins.size()" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6425, __extension__ __PRETTY_FUNCTION__)); | ||||
6426 | |||||
6427 | // Insert the SMSTART if this is a locally streaming function and | ||||
6428 | // make sure it is Glued to the last CopyFromReg value. | ||||
6429 | if (IsLocallyStreaming) { | ||||
6430 | const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); | ||||
6431 | Chain = DAG.getNode( | ||||
6432 | AArch64ISD::SMSTART, DL, DAG.getVTList(MVT::Other, MVT::Glue), | ||||
6433 | {DAG.getRoot(), | ||||
6434 | DAG.getTargetConstant((int32_t)AArch64SVCR::SVCRSM, DL, MVT::i32), | ||||
6435 | DAG.getConstant(0, DL, MVT::i64), DAG.getConstant(1, DL, MVT::i64), | ||||
6436 | DAG.getRegisterMask(TRI->getSMStartStopCallPreservedMask()), Glue}); | ||||
6437 | // Ensure that the SMSTART happens after the CopyWithChain such that its | ||||
6438 | // chain result is used. | ||||
6439 | for (unsigned I=0; I<InVals.size(); ++I) { | ||||
6440 | Register Reg = MF.getRegInfo().createVirtualRegister( | ||||
6441 | getRegClassFor(InVals[I].getValueType().getSimpleVT())); | ||||
6442 | Chain = DAG.getCopyToReg(Chain, DL, Reg, InVals[I]); | ||||
6443 | InVals[I] = DAG.getCopyFromReg(Chain, DL, Reg, | ||||
6444 | InVals[I].getValueType()); | ||||
6445 | } | ||||
6446 | } | ||||
6447 | |||||
6448 | // varargs | ||||
6449 | if (isVarArg) { | ||||
6450 | if (!Subtarget->isTargetDarwin() || IsWin64) { | ||||
6451 | // The AAPCS variadic function ABI is identical to the non-variadic | ||||
6452 | // one. As a result there may be more arguments in registers and we should | ||||
6453 | // save them for future reference. | ||||
6454 | // Win64 variadic functions also pass arguments in registers, but all float | ||||
6455 | // arguments are passed in integer registers. | ||||
6456 | saveVarArgRegisters(CCInfo, DAG, DL, Chain); | ||||
6457 | } | ||||
6458 | |||||
6459 | // This will point to the next argument passed via stack. | ||||
6460 | unsigned StackOffset = CCInfo.getNextStackOffset(); | ||||
6461 | // We currently pass all varargs at 8-byte alignment, or 4 for ILP32 | ||||
6462 | StackOffset = alignTo(StackOffset, Subtarget->isTargetILP32() ? 4 : 8); | ||||
6463 | FuncInfo->setVarArgsStackOffset(StackOffset); | ||||
6464 | FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true)); | ||||
6465 | |||||
6466 | if (MFI.hasMustTailInVarArgFunc()) { | ||||
6467 | SmallVector<MVT, 2> RegParmTypes; | ||||
6468 | RegParmTypes.push_back(MVT::i64); | ||||
6469 | RegParmTypes.push_back(MVT::f128); | ||||
6470 | // Compute the set of forwarded registers. The rest are scratch. | ||||
6471 | SmallVectorImpl<ForwardedRegister> &Forwards = | ||||
6472 | FuncInfo->getForwardedMustTailRegParms(); | ||||
6473 | CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, | ||||
6474 | CC_AArch64_AAPCS); | ||||
6475 | |||||
6476 | // Conservatively forward X8, since it might be used for aggregate return. | ||||
6477 | if (!CCInfo.isAllocated(AArch64::X8)) { | ||||
6478 | Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass); | ||||
6479 | Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64)); | ||||
6480 | } | ||||
6481 | } | ||||
6482 | } | ||||
6483 | |||||
6484 | // On Windows, InReg pointers must be returned, so record the pointer in a | ||||
6485 | // virtual register at the start of the function so it can be returned in the | ||||
6486 | // epilogue. | ||||
6487 | if (IsWin64) { | ||||
6488 | for (unsigned I = 0, E = Ins.size(); I != E; ++I) { | ||||
6489 | if (Ins[I].Flags.isInReg() && Ins[I].Flags.isSRet()) { | ||||
6490 | assert(!FuncInfo->getSRetReturnReg())(static_cast <bool> (!FuncInfo->getSRetReturnReg()) ? void (0) : __assert_fail ("!FuncInfo->getSRetReturnReg()" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6490, __extension__ __PRETTY_FUNCTION__)); | ||||
6491 | |||||
6492 | MVT PtrTy = getPointerTy(DAG.getDataLayout()); | ||||
6493 | Register Reg = | ||||
6494 | MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy)); | ||||
6495 | FuncInfo->setSRetReturnReg(Reg); | ||||
6496 | |||||
6497 | SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[I]); | ||||
6498 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain); | ||||
6499 | break; | ||||
6500 | } | ||||
6501 | } | ||||
6502 | } | ||||
6503 | |||||
6504 | unsigned StackArgSize = CCInfo.getNextStackOffset(); | ||||
6505 | bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; | ||||
6506 | if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) { | ||||
6507 | // This is a non-standard ABI so by fiat I say we're allowed to make full | ||||
6508 | // use of the stack area to be popped, which must be aligned to 16 bytes in | ||||
6509 | // any case: | ||||
6510 | StackArgSize = alignTo(StackArgSize, 16); | ||||
6511 | |||||
6512 | // If we're expected to restore the stack (e.g. fastcc) then we'll be adding | ||||
6513 | // a multiple of 16. | ||||
6514 | FuncInfo->setArgumentStackToRestore(StackArgSize); | ||||
6515 | |||||
6516 | // This realignment carries over to the available bytes below. Our own | ||||
6517 | // callers will guarantee the space is free by giving an aligned value to | ||||
6518 | // CALLSEQ_START. | ||||
6519 | } | ||||
6520 | // Even if we're not expected to free up the space, it's useful to know how | ||||
6521 | // much is there while considering tail calls (because we can reuse it). | ||||
6522 | FuncInfo->setBytesInStackArgArea(StackArgSize); | ||||
6523 | |||||
6524 | if (Subtarget->hasCustomCallingConv()) | ||||
6525 | Subtarget->getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF); | ||||
6526 | |||||
6527 | // Conservatively assume the function requires the lazy-save mechanism. | ||||
6528 | if (SMEAttrs(MF.getFunction()).hasZAState()) { | ||||
6529 | unsigned TPIDR2Obj = allocateLazySaveBuffer(Chain, DL, DAG); | ||||
6530 | FuncInfo->setLazySaveTPIDR2Obj(TPIDR2Obj); | ||||
6531 | } | ||||
6532 | |||||
6533 | return Chain; | ||||
6534 | } | ||||
6535 | |||||
6536 | void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo, | ||||
6537 | SelectionDAG &DAG, | ||||
6538 | const SDLoc &DL, | ||||
6539 | SDValue &Chain) const { | ||||
6540 | MachineFunction &MF = DAG.getMachineFunction(); | ||||
6541 | MachineFrameInfo &MFI = MF.getFrameInfo(); | ||||
6542 | AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); | ||||
6543 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | ||||
6544 | bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()); | ||||
6545 | |||||
6546 | SmallVector<SDValue, 8> MemOps; | ||||
6547 | |||||
6548 | static const MCPhysReg GPRArgRegs[] = { AArch64::X0, AArch64::X1, AArch64::X2, | ||||
6549 | AArch64::X3, AArch64::X4, AArch64::X5, | ||||
6550 | AArch64::X6, AArch64::X7 }; | ||||
6551 | unsigned NumGPRArgRegs = std::size(GPRArgRegs); | ||||
6552 | if (Subtarget->isWindowsArm64EC()) { | ||||
6553 | // In the ARM64EC ABI, only x0-x3 are used to pass arguments to varargs | ||||
6554 | // functions. | ||||
6555 | NumGPRArgRegs = 4; | ||||
6556 | } | ||||
6557 | unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(GPRArgRegs); | ||||
6558 | |||||
6559 | unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR); | ||||
6560 | int GPRIdx = 0; | ||||
6561 | if (GPRSaveSize != 0) { | ||||
6562 | if (IsWin64) { | ||||
6563 | GPRIdx = MFI.CreateFixedObject(GPRSaveSize, -(int)GPRSaveSize, false); | ||||
6564 | if (GPRSaveSize & 15) | ||||
6565 | // The extra size here, if triggered, will always be 8. | ||||
6566 | MFI.CreateFixedObject(16 - (GPRSaveSize & 15), -(int)alignTo(GPRSaveSize, 16), false); | ||||
6567 | } else | ||||
6568 | GPRIdx = MFI.CreateStackObject(GPRSaveSize, Align(8), false); | ||||
6569 | |||||
6570 | SDValue FIN; | ||||
6571 | if (Subtarget->isWindowsArm64EC()) { | ||||
6572 | // With the Arm64EC ABI, we reserve the save area as usual, but we | ||||
6573 | // compute its address relative to x4. For a normal AArch64->AArch64 | ||||
6574 | // call, x4 == sp on entry, but calls from an entry thunk can pass in a | ||||
6575 | // different address. | ||||
6576 | Register VReg = MF.addLiveIn(AArch64::X4, &AArch64::GPR64RegClass); | ||||
6577 | SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); | ||||
6578 | FIN = DAG.getNode(ISD::SUB, DL, MVT::i64, Val, | ||||
6579 | DAG.getConstant(GPRSaveSize, DL, MVT::i64)); | ||||
6580 | } else { | ||||
6581 | FIN = DAG.getFrameIndex(GPRIdx, PtrVT); | ||||
6582 | } | ||||
6583 | |||||
6584 | for (unsigned i = FirstVariadicGPR; i < NumGPRArgRegs; ++i) { | ||||
6585 | Register VReg = MF.addLiveIn(GPRArgRegs[i], &AArch64::GPR64RegClass); | ||||
6586 | SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); | ||||
6587 | SDValue Store = | ||||
6588 | DAG.getStore(Val.getValue(1), DL, Val, FIN, | ||||
6589 | IsWin64 ? MachinePointerInfo::getFixedStack( | ||||
6590 | MF, GPRIdx, (i - FirstVariadicGPR) * 8) | ||||
6591 | : MachinePointerInfo::getStack(MF, i * 8)); | ||||
6592 | MemOps.push_back(Store); | ||||
6593 | FIN = | ||||
6594 | DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getConstant(8, DL, PtrVT)); | ||||
6595 | } | ||||
6596 | } | ||||
6597 | FuncInfo->setVarArgsGPRIndex(GPRIdx); | ||||
6598 | FuncInfo->setVarArgsGPRSize(GPRSaveSize); | ||||
6599 | |||||
6600 | if (Subtarget->hasFPARMv8() && !IsWin64) { | ||||
6601 | static const MCPhysReg FPRArgRegs[] = { | ||||
6602 | AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, | ||||
6603 | AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7}; | ||||
6604 | static const unsigned NumFPRArgRegs = std::size(FPRArgRegs); | ||||
6605 | unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(FPRArgRegs); | ||||
6606 | |||||
6607 | unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR); | ||||
6608 | int FPRIdx = 0; | ||||
6609 | if (FPRSaveSize != 0) { | ||||
6610 | FPRIdx = MFI.CreateStackObject(FPRSaveSize, Align(16), false); | ||||
6611 | |||||
6612 | SDValue FIN = DAG.getFrameIndex(FPRIdx, PtrVT); | ||||
6613 | |||||
6614 | for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) { | ||||
6615 | Register VReg = MF.addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass); | ||||
6616 | SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128); | ||||
6617 | |||||
6618 | SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN, | ||||
6619 | MachinePointerInfo::getStack(MF, i * 16)); | ||||
6620 | MemOps.push_back(Store); | ||||
6621 | FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, | ||||
6622 | DAG.getConstant(16, DL, PtrVT)); | ||||
6623 | } | ||||
6624 | } | ||||
6625 | FuncInfo->setVarArgsFPRIndex(FPRIdx); | ||||
6626 | FuncInfo->setVarArgsFPRSize(FPRSaveSize); | ||||
6627 | } | ||||
6628 | |||||
6629 | if (!MemOps.empty()) { | ||||
6630 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); | ||||
6631 | } | ||||
6632 | } | ||||
6633 | |||||
6634 | /// LowerCallResult - Lower the result values of a call into the | ||||
6635 | /// appropriate copies out of appropriate physical registers. | ||||
6636 | SDValue AArch64TargetLowering::LowerCallResult( | ||||
6637 | SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, | ||||
6638 | const SmallVectorImpl<CCValAssign> &RVLocs, const SDLoc &DL, | ||||
6639 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, | ||||
6640 | SDValue ThisVal) const { | ||||
6641 | DenseMap<unsigned, SDValue> CopiedRegs; | ||||
6642 | // Copy all of the result registers out of their specified physreg. | ||||
6643 | for (unsigned i = 0; i != RVLocs.size(); ++i) { | ||||
6644 | CCValAssign VA = RVLocs[i]; | ||||
6645 | |||||
6646 | // Pass 'this' value directly from the argument to return value, to avoid | ||||
6647 | // reg unit interference | ||||
6648 | if (i == 0 && isThisReturn) { | ||||
6649 | assert(!VA.needsCustom() && VA.getLocVT() == MVT::i64 &&(static_cast <bool> (!VA.needsCustom() && VA.getLocVT () == MVT::i64 && "unexpected return calling convention register assignment" ) ? void (0) : __assert_fail ("!VA.needsCustom() && VA.getLocVT() == MVT::i64 && \"unexpected return calling convention register assignment\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6650, __extension__ __PRETTY_FUNCTION__)) | ||||
6650 | "unexpected return calling convention register assignment")(static_cast <bool> (!VA.needsCustom() && VA.getLocVT () == MVT::i64 && "unexpected return calling convention register assignment" ) ? void (0) : __assert_fail ("!VA.needsCustom() && VA.getLocVT() == MVT::i64 && \"unexpected return calling convention register assignment\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6650, __extension__ __PRETTY_FUNCTION__)); | ||||
6651 | InVals.push_back(ThisVal); | ||||
6652 | continue; | ||||
6653 | } | ||||
6654 | |||||
6655 | // Avoid copying a physreg twice since RegAllocFast is incompetent and only | ||||
6656 | // allows one use of a physreg per block. | ||||
6657 | SDValue Val = CopiedRegs.lookup(VA.getLocReg()); | ||||
6658 | if (!Val) { | ||||
6659 | Val = | ||||
6660 | DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag); | ||||
6661 | Chain = Val.getValue(1); | ||||
6662 | InFlag = Val.getValue(2); | ||||
6663 | CopiedRegs[VA.getLocReg()] = Val; | ||||
6664 | } | ||||
6665 | |||||
6666 | switch (VA.getLocInfo()) { | ||||
6667 | default: | ||||
6668 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 6668); | ||||
6669 | case CCValAssign::Full: | ||||
6670 | break; | ||||
6671 | case CCValAssign::BCvt: | ||||
6672 | Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); | ||||
6673 | break; | ||||
6674 | case CCValAssign::AExtUpper: | ||||
6675 | Val = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Val, | ||||
6676 | DAG.getConstant(32, DL, VA.getLocVT())); | ||||
6677 | [[fallthrough]]; | ||||
6678 | case CCValAssign::AExt: | ||||
6679 | [[fallthrough]]; | ||||
6680 | case CCValAssign::ZExt: | ||||
6681 | Val = DAG.getZExtOrTrunc(Val, DL, VA.getValVT()); | ||||
6682 | break; | ||||
6683 | } | ||||
6684 | |||||
6685 | InVals.push_back(Val); | ||||
6686 | } | ||||
6687 | |||||
6688 | return Chain; | ||||
6689 | } | ||||
6690 | |||||
6691 | /// Return true if the calling convention is one that we can guarantee TCO for. | ||||
6692 | static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) { | ||||
6693 | return (CC == CallingConv::Fast && GuaranteeTailCalls) || | ||||
6694 | CC == CallingConv::Tail || CC == CallingConv::SwiftTail; | ||||
6695 | } | ||||
6696 | |||||
6697 | /// Return true if we might ever do TCO for calls with this calling convention. | ||||
6698 | static bool mayTailCallThisCC(CallingConv::ID CC) { | ||||
6699 | switch (CC) { | ||||
6700 | case CallingConv::C: | ||||
6701 | case CallingConv::AArch64_SVE_VectorCall: | ||||
6702 | case CallingConv::PreserveMost: | ||||
6703 | case CallingConv::Swift: | ||||
6704 | case CallingConv::SwiftTail: | ||||
6705 | case CallingConv::Tail: | ||||
6706 | case CallingConv::Fast: | ||||
6707 | return true; | ||||
6708 | default: | ||||
6709 | return false; | ||||
6710 | } | ||||
6711 | } | ||||
6712 | |||||
6713 | static void analyzeCallOperands(const AArch64TargetLowering &TLI, | ||||
6714 | const AArch64Subtarget *Subtarget, | ||||
6715 | const TargetLowering::CallLoweringInfo &CLI, | ||||
6716 | CCState &CCInfo) { | ||||
6717 | const SelectionDAG &DAG = CLI.DAG; | ||||
6718 | CallingConv::ID CalleeCC = CLI.CallConv; | ||||
6719 | bool IsVarArg = CLI.IsVarArg; | ||||
6720 | const SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; | ||||
6721 | bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC); | ||||
6722 | |||||
6723 | unsigned NumArgs = Outs.size(); | ||||
6724 | for (unsigned i = 0; i != NumArgs; ++i) { | ||||
6725 | MVT ArgVT = Outs[i].VT; | ||||
6726 | ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; | ||||
6727 | |||||
6728 | bool UseVarArgCC = false; | ||||
6729 | if (IsVarArg) { | ||||
6730 | // On Windows, the fixed arguments in a vararg call are passed in GPRs | ||||
6731 | // too, so use the vararg CC to force them to integer registers. | ||||
6732 | if (IsCalleeWin64) { | ||||
6733 | UseVarArgCC = true; | ||||
6734 | } else { | ||||
6735 | UseVarArgCC = !Outs[i].IsFixed; | ||||
6736 | } | ||||
6737 | } else { | ||||
6738 | // Get type of the original argument. | ||||
6739 | EVT ActualVT = | ||||
6740 | TLI.getValueType(DAG.getDataLayout(), CLI.Args[Outs[i].OrigArgIndex].Ty, | ||||
6741 | /*AllowUnknown*/ true); | ||||
6742 | MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : ArgVT; | ||||
6743 | // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16. | ||||
6744 | if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8) | ||||
6745 | ArgVT = MVT::i8; | ||||
6746 | else if (ActualMVT == MVT::i16) | ||||
6747 | ArgVT = MVT::i16; | ||||
6748 | } | ||||
6749 | |||||
6750 | CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CalleeCC, UseVarArgCC); | ||||
6751 | bool Res = AssignFn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo); | ||||
6752 | assert(!Res && "Call operand has unhandled type")(static_cast <bool> (!Res && "Call operand has unhandled type" ) ? void (0) : __assert_fail ("!Res && \"Call operand has unhandled type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6752, __extension__ __PRETTY_FUNCTION__)); | ||||
6753 | (void)Res; | ||||
6754 | } | ||||
6755 | } | ||||
6756 | |||||
6757 | bool AArch64TargetLowering::isEligibleForTailCallOptimization( | ||||
6758 | const CallLoweringInfo &CLI) const { | ||||
6759 | CallingConv::ID CalleeCC = CLI.CallConv; | ||||
6760 | if (!mayTailCallThisCC(CalleeCC)) | ||||
6761 | return false; | ||||
6762 | |||||
6763 | SDValue Callee = CLI.Callee; | ||||
6764 | bool IsVarArg = CLI.IsVarArg; | ||||
6765 | const SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; | ||||
6766 | const SmallVector<SDValue, 32> &OutVals = CLI.OutVals; | ||||
6767 | const SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; | ||||
6768 | const SelectionDAG &DAG = CLI.DAG; | ||||
6769 | MachineFunction &MF = DAG.getMachineFunction(); | ||||
6770 | const Function &CallerF = MF.getFunction(); | ||||
6771 | CallingConv::ID CallerCC = CallerF.getCallingConv(); | ||||
6772 | |||||
6773 | // SME Streaming functions are not eligible for TCO as they may require | ||||
6774 | // the streaming mode or ZA to be restored after returning from the call. | ||||
6775 | SMEAttrs CallerAttrs(MF.getFunction()); | ||||
6776 | auto CalleeAttrs = CLI.CB ? SMEAttrs(*CLI.CB) : SMEAttrs(SMEAttrs::Normal); | ||||
6777 | if (CallerAttrs.requiresSMChange(CalleeAttrs) || | ||||
6778 | CallerAttrs.requiresLazySave(CalleeAttrs)) | ||||
6779 | return false; | ||||
6780 | |||||
6781 | // Functions using the C or Fast calling convention that have an SVE signature | ||||
6782 | // preserve more registers and should assume the SVE_VectorCall CC. | ||||
6783 | // The check for matching callee-saved regs will determine whether it is | ||||
6784 | // eligible for TCO. | ||||
6785 | if ((CallerCC == CallingConv::C || CallerCC == CallingConv::Fast) && | ||||
6786 | MF.getInfo<AArch64FunctionInfo>()->isSVECC()) | ||||
6787 | CallerCC = CallingConv::AArch64_SVE_VectorCall; | ||||
6788 | |||||
6789 | bool CCMatch = CallerCC == CalleeCC; | ||||
6790 | |||||
6791 | // When using the Windows calling convention on a non-windows OS, we want | ||||
6792 | // to back up and restore X18 in such functions; we can't do a tail call | ||||
6793 | // from those functions. | ||||
6794 | if (CallerCC == CallingConv::Win64 && !Subtarget->isTargetWindows() && | ||||
6795 | CalleeCC != CallingConv::Win64) | ||||
6796 | return false; | ||||
6797 | |||||
6798 | // Byval parameters hand the function a pointer directly into the stack area | ||||
6799 | // we want to reuse during a tail call. Working around this *is* possible (see | ||||
6800 | // X86) but less efficient and uglier in LowerCall. | ||||
6801 | for (Function::const_arg_iterator i = CallerF.arg_begin(), | ||||
6802 | e = CallerF.arg_end(); | ||||
6803 | i != e; ++i) { | ||||
6804 | if (i->hasByValAttr()) | ||||
6805 | return false; | ||||
6806 | |||||
6807 | // On Windows, "inreg" attributes signify non-aggregate indirect returns. | ||||
6808 | // In this case, it is necessary to save/restore X0 in the callee. Tail | ||||
6809 | // call opt interferes with this. So we disable tail call opt when the | ||||
6810 | // caller has an argument with "inreg" attribute. | ||||
6811 | |||||
6812 | // FIXME: Check whether the callee also has an "inreg" argument. | ||||
6813 | if (i->hasInRegAttr()) | ||||
6814 | return false; | ||||
6815 | } | ||||
6816 | |||||
6817 | if (canGuaranteeTCO(CalleeCC, getTargetMachine().Options.GuaranteedTailCallOpt)) | ||||
6818 | return CCMatch; | ||||
6819 | |||||
6820 | // Externally-defined functions with weak linkage should not be | ||||
6821 | // tail-called on AArch64 when the OS does not support dynamic | ||||
6822 | // pre-emption of symbols, as the AAELF spec requires normal calls | ||||
6823 | // to undefined weak functions to be replaced with a NOP or jump to the | ||||
6824 | // next instruction. The behaviour of branch instructions in this | ||||
6825 | // situation (as used for tail calls) is implementation-defined, so we | ||||
6826 | // cannot rely on the linker replacing the tail call with a return. | ||||
6827 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { | ||||
6828 | const GlobalValue *GV = G->getGlobal(); | ||||
6829 | const Triple &TT = getTargetMachine().getTargetTriple(); | ||||
6830 | if (GV->hasExternalWeakLinkage() && | ||||
6831 | (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO())) | ||||
6832 | return false; | ||||
6833 | } | ||||
6834 | |||||
6835 | // Now we search for cases where we can use a tail call without changing the | ||||
6836 | // ABI. Sibcall is used in some places (particularly gcc) to refer to this | ||||
6837 | // concept. | ||||
6838 | |||||
6839 | // I want anyone implementing a new calling convention to think long and hard | ||||
6840 | // about this assert. | ||||
6841 | assert((!IsVarArg || CalleeCC == CallingConv::C) &&(static_cast <bool> ((!IsVarArg || CalleeCC == CallingConv ::C) && "Unexpected variadic calling convention") ? void (0) : __assert_fail ("(!IsVarArg || CalleeCC == CallingConv::C) && \"Unexpected variadic calling convention\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6842, __extension__ __PRETTY_FUNCTION__)) | ||||
6842 | "Unexpected variadic calling convention")(static_cast <bool> ((!IsVarArg || CalleeCC == CallingConv ::C) && "Unexpected variadic calling convention") ? void (0) : __assert_fail ("(!IsVarArg || CalleeCC == CallingConv::C) && \"Unexpected variadic calling convention\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6842, __extension__ __PRETTY_FUNCTION__)); | ||||
6843 | |||||
6844 | LLVMContext &C = *DAG.getContext(); | ||||
6845 | // Check that the call results are passed in the same way. | ||||
6846 | if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, | ||||
6847 | CCAssignFnForCall(CalleeCC, IsVarArg), | ||||
6848 | CCAssignFnForCall(CallerCC, IsVarArg))) | ||||
6849 | return false; | ||||
6850 | // The callee has to preserve all registers the caller needs to preserve. | ||||
6851 | const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); | ||||
6852 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); | ||||
6853 | if (!CCMatch) { | ||||
6854 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); | ||||
6855 | if (Subtarget->hasCustomCallingConv()) { | ||||
6856 | TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved); | ||||
6857 | TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved); | ||||
6858 | } | ||||
6859 | if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) | ||||
6860 | return false; | ||||
6861 | } | ||||
6862 | |||||
6863 | // Nothing more to check if the callee is taking no arguments | ||||
6864 | if (Outs.empty()) | ||||
6865 | return true; | ||||
6866 | |||||
6867 | SmallVector<CCValAssign, 16> ArgLocs; | ||||
6868 | CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C); | ||||
6869 | |||||
6870 | analyzeCallOperands(*this, Subtarget, CLI, CCInfo); | ||||
6871 | |||||
6872 | if (IsVarArg && !(CLI.CB && CLI.CB->isMustTailCall())) { | ||||
6873 | // When we are musttail, additional checks have been done and we can safely ignore this check | ||||
6874 | // At least two cases here: if caller is fastcc then we can't have any | ||||
6875 | // memory arguments (we'd be expected to clean up the stack afterwards). If | ||||
6876 | // caller is C then we could potentially use its argument area. | ||||
6877 | |||||
6878 | // FIXME: for now we take the most conservative of these in both cases: | ||||
6879 | // disallow all variadic memory operands. | ||||
6880 | for (const CCValAssign &ArgLoc : ArgLocs) | ||||
6881 | if (!ArgLoc.isRegLoc()) | ||||
6882 | return false; | ||||
6883 | } | ||||
6884 | |||||
6885 | const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); | ||||
6886 | |||||
6887 | // If any of the arguments is passed indirectly, it must be SVE, so the | ||||
6888 | // 'getBytesInStackArgArea' is not sufficient to determine whether we need to | ||||
6889 | // allocate space on the stack. That is why we determine this explicitly here | ||||
6890 | // the call cannot be a tailcall. | ||||
6891 | if (llvm::any_of(ArgLocs, [&](CCValAssign &A) { | ||||
6892 | assert((A.getLocInfo() != CCValAssign::Indirect ||(static_cast <bool> ((A.getLocInfo() != CCValAssign::Indirect || A.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC ()) && "Expected value to be scalable") ? void (0) : __assert_fail ("(A.getLocInfo() != CCValAssign::Indirect || A.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && \"Expected value to be scalable\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6895, __extension__ __PRETTY_FUNCTION__)) | ||||
6893 | A.getValVT().isScalableVector() ||(static_cast <bool> ((A.getLocInfo() != CCValAssign::Indirect || A.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC ()) && "Expected value to be scalable") ? void (0) : __assert_fail ("(A.getLocInfo() != CCValAssign::Indirect || A.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && \"Expected value to be scalable\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6895, __extension__ __PRETTY_FUNCTION__)) | ||||
6894 | Subtarget->isWindowsArm64EC()) &&(static_cast <bool> ((A.getLocInfo() != CCValAssign::Indirect || A.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC ()) && "Expected value to be scalable") ? void (0) : __assert_fail ("(A.getLocInfo() != CCValAssign::Indirect || A.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && \"Expected value to be scalable\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6895, __extension__ __PRETTY_FUNCTION__)) | ||||
6895 | "Expected value to be scalable")(static_cast <bool> ((A.getLocInfo() != CCValAssign::Indirect || A.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC ()) && "Expected value to be scalable") ? void (0) : __assert_fail ("(A.getLocInfo() != CCValAssign::Indirect || A.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && \"Expected value to be scalable\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 6895, __extension__ __PRETTY_FUNCTION__)); | ||||
6896 | return A.getLocInfo() == CCValAssign::Indirect; | ||||
6897 | })) | ||||
6898 | return false; | ||||
6899 | |||||
6900 | // If the stack arguments for this call do not fit into our own save area then | ||||
6901 | // the call cannot be made tail. | ||||
6902 | if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) | ||||
6903 | return false; | ||||
6904 | |||||
6905 | const MachineRegisterInfo &MRI = MF.getRegInfo(); | ||||
6906 | if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) | ||||
6907 | return false; | ||||
6908 | |||||
6909 | return true; | ||||
6910 | } | ||||
6911 | |||||
6912 | SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain, | ||||
6913 | SelectionDAG &DAG, | ||||
6914 | MachineFrameInfo &MFI, | ||||
6915 | int ClobberedFI) const { | ||||
6916 | SmallVector<SDValue, 8> ArgChains; | ||||
6917 | int64_t FirstByte = MFI.getObjectOffset(ClobberedFI); | ||||
6918 | int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1; | ||||
6919 | |||||
6920 | // Include the original chain at the beginning of the list. When this is | ||||
6921 | // used by target LowerCall hooks, this helps legalize find the | ||||
6922 | // CALLSEQ_BEGIN node. | ||||
6923 | ArgChains.push_back(Chain); | ||||
6924 | |||||
6925 | // Add a chain value for each stack argument corresponding | ||||
6926 | for (SDNode *U : DAG.getEntryNode().getNode()->uses()) | ||||
6927 | if (LoadSDNode *L = dyn_cast<LoadSDNode>(U)) | ||||
6928 | if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) | ||||
6929 | if (FI->getIndex() < 0) { | ||||
6930 | int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex()); | ||||
6931 | int64_t InLastByte = InFirstByte; | ||||
6932 | InLastByte += MFI.getObjectSize(FI->getIndex()) - 1; | ||||
6933 | |||||
6934 | if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) || | ||||
6935 | (FirstByte <= InFirstByte && InFirstByte <= LastByte)) | ||||
6936 | ArgChains.push_back(SDValue(L, 1)); | ||||
6937 | } | ||||
6938 | |||||
6939 | // Build a tokenfactor for all the chains. | ||||
6940 | return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); | ||||
6941 | } | ||||
6942 | |||||
6943 | bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC, | ||||
6944 | bool TailCallOpt) const { | ||||
6945 | return (CallCC == CallingConv::Fast && TailCallOpt) || | ||||
6946 | CallCC == CallingConv::Tail || CallCC == CallingConv::SwiftTail; | ||||
6947 | } | ||||
6948 | |||||
6949 | // Check if the value is zero-extended from i1 to i8 | ||||
6950 | static bool checkZExtBool(SDValue Arg, const SelectionDAG &DAG) { | ||||
6951 | unsigned SizeInBits = Arg.getValueType().getSizeInBits(); | ||||
6952 | if (SizeInBits < 8) | ||||
6953 | return false; | ||||
6954 | |||||
6955 | APInt RequredZero(SizeInBits, 0xFE); | ||||
6956 | KnownBits Bits = DAG.computeKnownBits(Arg, 4); | ||||
6957 | bool ZExtBool = (Bits.Zero & RequredZero) == RequredZero; | ||||
6958 | return ZExtBool; | ||||
6959 | } | ||||
6960 | |||||
6961 | SDValue AArch64TargetLowering::changeStreamingMode( | ||||
6962 | SelectionDAG &DAG, SDLoc DL, bool Enable, | ||||
6963 | SDValue Chain, SDValue InFlag, SDValue PStateSM, bool Entry) const { | ||||
6964 | const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); | ||||
6965 | SDValue RegMask = DAG.getRegisterMask(TRI->getSMStartStopCallPreservedMask()); | ||||
6966 | SDValue MSROp = | ||||
6967 | DAG.getTargetConstant((int32_t)AArch64SVCR::SVCRSM, DL, MVT::i32); | ||||
6968 | |||||
6969 | SDValue ExpectedSMVal = | ||||
6970 | DAG.getTargetConstant(Entry ? Enable : !Enable, DL, MVT::i64); | ||||
6971 | SmallVector<SDValue> Ops = {Chain, MSROp, PStateSM, ExpectedSMVal, RegMask}; | ||||
6972 | |||||
6973 | if (InFlag) | ||||
6974 | Ops.push_back(InFlag); | ||||
6975 | |||||
6976 | unsigned Opcode = Enable ? AArch64ISD::SMSTART : AArch64ISD::SMSTOP; | ||||
6977 | return DAG.getNode(Opcode, DL, DAG.getVTList(MVT::Other, MVT::Glue), Ops); | ||||
6978 | } | ||||
6979 | |||||
6980 | /// LowerCall - Lower a call to a callseq_start + CALL + callseq_end chain, | ||||
6981 | /// and add input and output parameter nodes. | ||||
6982 | SDValue | ||||
6983 | AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, | ||||
6984 | SmallVectorImpl<SDValue> &InVals) const { | ||||
6985 | SelectionDAG &DAG = CLI.DAG; | ||||
6986 | SDLoc &DL = CLI.DL; | ||||
6987 | SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; | ||||
6988 | SmallVector<SDValue, 32> &OutVals = CLI.OutVals; | ||||
6989 | SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; | ||||
6990 | SDValue Chain = CLI.Chain; | ||||
6991 | SDValue Callee = CLI.Callee; | ||||
6992 | bool &IsTailCall = CLI.IsTailCall; | ||||
6993 | CallingConv::ID &CallConv = CLI.CallConv; | ||||
6994 | bool IsVarArg = CLI.IsVarArg; | ||||
6995 | |||||
6996 | MachineFunction &MF = DAG.getMachineFunction(); | ||||
6997 | MachineFunction::CallSiteInfo CSInfo; | ||||
6998 | bool IsThisReturn = false; | ||||
6999 | |||||
7000 | AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); | ||||
7001 | bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; | ||||
7002 | bool IsCFICall = CLI.CB && CLI.CB->isIndirectCall() && CLI.CFIType; | ||||
7003 | bool IsSibCall = false; | ||||
7004 | bool GuardWithBTI = false; | ||||
7005 | |||||
7006 | if (CLI.CB && CLI.CB->getAttributes().hasFnAttr(Attribute::ReturnsTwice) && | ||||
7007 | !Subtarget->noBTIAtReturnTwice()) { | ||||
7008 | GuardWithBTI = FuncInfo->branchTargetEnforcement(); | ||||
7009 | } | ||||
7010 | |||||
7011 | // Analyze operands of the call, assigning locations to each operand. | ||||
7012 | SmallVector<CCValAssign, 16> ArgLocs; | ||||
7013 | CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); | ||||
7014 | |||||
7015 | if (IsVarArg) { | ||||
7016 | unsigned NumArgs = Outs.size(); | ||||
7017 | |||||
7018 | for (unsigned i = 0; i != NumArgs; ++i) { | ||||
7019 | if (!Outs[i].IsFixed && Outs[i].VT.isScalableVector()) | ||||
7020 | report_fatal_error("Passing SVE types to variadic functions is " | ||||
7021 | "currently not supported"); | ||||
7022 | } | ||||
7023 | } | ||||
7024 | |||||
7025 | analyzeCallOperands(*this, Subtarget, CLI, CCInfo); | ||||
7026 | |||||
7027 | CCAssignFn *RetCC = CCAssignFnForReturn(CallConv); | ||||
7028 | // Assign locations to each value returned by this call. | ||||
7029 | SmallVector<CCValAssign, 16> RVLocs; | ||||
7030 | CCState RetCCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, | ||||
7031 | *DAG.getContext()); | ||||
7032 | RetCCInfo.AnalyzeCallResult(Ins, RetCC); | ||||
7033 | |||||
7034 | // Check callee args/returns for SVE registers and set calling convention | ||||
7035 | // accordingly. | ||||
7036 | if (CallConv == CallingConv::C || CallConv == CallingConv::Fast) { | ||||
7037 | auto HasSVERegLoc = [](CCValAssign &Loc) { | ||||
7038 | if (!Loc.isRegLoc()) | ||||
7039 | return false; | ||||
7040 | return AArch64::ZPRRegClass.contains(Loc.getLocReg()) || | ||||
7041 | AArch64::PPRRegClass.contains(Loc.getLocReg()); | ||||
7042 | }; | ||||
7043 | if (any_of(RVLocs, HasSVERegLoc) || any_of(ArgLocs, HasSVERegLoc)) | ||||
7044 | CallConv = CallingConv::AArch64_SVE_VectorCall; | ||||
7045 | } | ||||
7046 | |||||
7047 | if (IsTailCall) { | ||||
7048 | // Check if it's really possible to do a tail call. | ||||
7049 | IsTailCall = isEligibleForTailCallOptimization(CLI); | ||||
7050 | |||||
7051 | // A sibling call is one where we're under the usual C ABI and not planning | ||||
7052 | // to change that but can still do a tail call: | ||||
7053 | if (!TailCallOpt && IsTailCall && CallConv != CallingConv::Tail && | ||||
7054 | CallConv != CallingConv::SwiftTail) | ||||
7055 | IsSibCall = true; | ||||
7056 | |||||
7057 | if (IsTailCall) | ||||
7058 | ++NumTailCalls; | ||||
7059 | } | ||||
7060 | |||||
7061 | if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall()) | ||||
7062 | report_fatal_error("failed to perform tail call elimination on a call " | ||||
7063 | "site marked musttail"); | ||||
7064 | |||||
7065 | // Get a count of how many bytes are to be pushed on the stack. | ||||
7066 | unsigned NumBytes = CCInfo.getNextStackOffset(); | ||||
7067 | |||||
7068 | if (IsSibCall) { | ||||
7069 | // Since we're not changing the ABI to make this a tail call, the memory | ||||
7070 | // operands are already available in the caller's incoming argument space. | ||||
7071 | NumBytes = 0; | ||||
7072 | } | ||||
7073 | |||||
7074 | // FPDiff is the byte offset of the call's argument area from the callee's. | ||||
7075 | // Stores to callee stack arguments will be placed in FixedStackSlots offset | ||||
7076 | // by this amount for a tail call. In a sibling call it must be 0 because the | ||||
7077 | // caller will deallocate the entire stack and the callee still expects its | ||||
7078 | // arguments to begin at SP+0. Completely unused for non-tail calls. | ||||
7079 | int FPDiff = 0; | ||||
7080 | |||||
7081 | if (IsTailCall && !IsSibCall) { | ||||
7082 | unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); | ||||
7083 | |||||
7084 | // Since callee will pop argument stack as a tail call, we must keep the | ||||
7085 | // popped size 16-byte aligned. | ||||
7086 | NumBytes = alignTo(NumBytes, 16); | ||||
7087 | |||||
7088 | // FPDiff will be negative if this tail call requires more space than we | ||||
7089 | // would automatically have in our incoming argument space. Positive if we | ||||
7090 | // can actually shrink the stack. | ||||
7091 | FPDiff = NumReusableBytes - NumBytes; | ||||
7092 | |||||
7093 | // Update the required reserved area if this is the tail call requiring the | ||||
7094 | // most argument stack space. | ||||
7095 | if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (unsigned)-FPDiff) | ||||
7096 | FuncInfo->setTailCallReservedStack(-FPDiff); | ||||
7097 | |||||
7098 | // The stack pointer must be 16-byte aligned at all times it's used for a | ||||
7099 | // memory operation, which in practice means at *all* times and in | ||||
7100 | // particular across call boundaries. Therefore our own arguments started at | ||||
7101 | // a 16-byte aligned SP and the delta applied for the tail call should | ||||
7102 | // satisfy the same constraint. | ||||
7103 | assert(FPDiff % 16 == 0 && "unaligned stack on tail call")(static_cast <bool> (FPDiff % 16 == 0 && "unaligned stack on tail call" ) ? void (0) : __assert_fail ("FPDiff % 16 == 0 && \"unaligned stack on tail call\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7103, __extension__ __PRETTY_FUNCTION__)); | ||||
7104 | } | ||||
7105 | |||||
7106 | // Determine whether we need any streaming mode changes. | ||||
7107 | SMEAttrs CalleeAttrs, CallerAttrs(MF.getFunction()); | ||||
7108 | if (CLI.CB) | ||||
7109 | CalleeAttrs = SMEAttrs(*CLI.CB); | ||||
7110 | else if (std::optional<SMEAttrs> Attrs = | ||||
7111 | getCalleeAttrsFromExternalFunction(CLI.Callee)) | ||||
7112 | CalleeAttrs = *Attrs; | ||||
7113 | |||||
7114 | bool RequiresLazySave = CallerAttrs.requiresLazySave(CalleeAttrs); | ||||
7115 | |||||
7116 | MachineFrameInfo &MFI = MF.getFrameInfo(); | ||||
7117 | if (RequiresLazySave) { | ||||
7118 | // Set up a lazy save mechanism by storing the runtime live slices | ||||
7119 | // (worst-case N*N) to the TPIDR2 stack object. | ||||
7120 | SDValue N = DAG.getNode(AArch64ISD::RDSVL, DL, MVT::i64, | ||||
7121 | DAG.getConstant(1, DL, MVT::i32)); | ||||
7122 | SDValue NN = DAG.getNode(ISD::MUL, DL, MVT::i64, N, N); | ||||
7123 | unsigned TPIDR2Obj = FuncInfo->getLazySaveTPIDR2Obj(); | ||||
7124 | |||||
7125 | MachinePointerInfo MPI = MachinePointerInfo::getStack(MF, TPIDR2Obj); | ||||
7126 | SDValue TPIDR2ObjAddr = DAG.getFrameIndex(TPIDR2Obj, | ||||
7127 | DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout())); | ||||
7128 | SDValue BufferPtrAddr = | ||||
7129 | DAG.getNode(ISD::ADD, DL, TPIDR2ObjAddr.getValueType(), TPIDR2ObjAddr, | ||||
7130 | DAG.getConstant(8, DL, TPIDR2ObjAddr.getValueType())); | ||||
7131 | Chain = DAG.getTruncStore(Chain, DL, NN, BufferPtrAddr, MPI, MVT::i16); | ||||
7132 | Chain = DAG.getNode( | ||||
7133 | ISD::INTRINSIC_VOID, DL, MVT::Other, Chain, | ||||
7134 | DAG.getConstant(Intrinsic::aarch64_sme_set_tpidr2, DL, MVT::i32), | ||||
7135 | TPIDR2ObjAddr); | ||||
7136 | } | ||||
7137 | |||||
7138 | SDValue PStateSM; | ||||
7139 | std::optional<bool> RequiresSMChange = | ||||
7140 | CallerAttrs.requiresSMChange(CalleeAttrs); | ||||
7141 | if (RequiresSMChange) | ||||
7142 | PStateSM = getPStateSM(DAG, Chain, CallerAttrs, DL, MVT::i64); | ||||
7143 | |||||
7144 | // Adjust the stack pointer for the new arguments... | ||||
7145 | // These operations are automatically eliminated by the prolog/epilog pass | ||||
7146 | if (!IsSibCall) | ||||
7147 | Chain = DAG.getCALLSEQ_START(Chain, IsTailCall ? 0 : NumBytes, 0, DL); | ||||
7148 | |||||
7149 | SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, AArch64::SP, | ||||
7150 | getPointerTy(DAG.getDataLayout())); | ||||
7151 | |||||
7152 | SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; | ||||
7153 | SmallSet<unsigned, 8> RegsUsed; | ||||
7154 | SmallVector<SDValue, 8> MemOpChains; | ||||
7155 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | ||||
7156 | |||||
7157 | if (IsVarArg && CLI.CB && CLI.CB->isMustTailCall()) { | ||||
7158 | const auto &Forwards = FuncInfo->getForwardedMustTailRegParms(); | ||||
7159 | for (const auto &F : Forwards) { | ||||
7160 | SDValue Val = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT); | ||||
7161 | RegsToPass.emplace_back(F.PReg, Val); | ||||
7162 | } | ||||
7163 | } | ||||
7164 | |||||
7165 | // Walk the register/memloc assignments, inserting copies/loads. | ||||
7166 | unsigned ExtraArgLocs = 0; | ||||
7167 | for (unsigned i = 0, e = Outs.size(); i != e; ++i) { | ||||
7168 | CCValAssign &VA = ArgLocs[i - ExtraArgLocs]; | ||||
7169 | SDValue Arg = OutVals[i]; | ||||
7170 | ISD::ArgFlagsTy Flags = Outs[i].Flags; | ||||
7171 | |||||
7172 | // Promote the value if needed. | ||||
7173 | switch (VA.getLocInfo()) { | ||||
7174 | default: | ||||
7175 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7175); | ||||
7176 | case CCValAssign::Full: | ||||
7177 | break; | ||||
7178 | case CCValAssign::SExt: | ||||
7179 | Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); | ||||
7180 | break; | ||||
7181 | case CCValAssign::ZExt: | ||||
7182 | Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); | ||||
7183 | break; | ||||
7184 | case CCValAssign::AExt: | ||||
7185 | if (Outs[i].ArgVT == MVT::i1) { | ||||
7186 | // AAPCS requires i1 to be zero-extended to 8-bits by the caller. | ||||
7187 | // | ||||
7188 | // Check if we actually have to do this, because the value may | ||||
7189 | // already be zero-extended. | ||||
7190 | // | ||||
7191 | // We cannot just emit a (zext i8 (trunc (assert-zext i8))) | ||||
7192 | // and rely on DAGCombiner to fold this, because the following | ||||
7193 | // (anyext i32) is combined with (zext i8) in DAG.getNode: | ||||
7194 | // | ||||
7195 | // (ext (zext x)) -> (zext x) | ||||
7196 | // | ||||
7197 | // This will give us (zext i32), which we cannot remove, so | ||||
7198 | // try to check this beforehand. | ||||
7199 | if (!checkZExtBool(Arg, DAG)) { | ||||
7200 | Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg); | ||||
7201 | Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i8, Arg); | ||||
7202 | } | ||||
7203 | } | ||||
7204 | Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); | ||||
7205 | break; | ||||
7206 | case CCValAssign::AExtUpper: | ||||
7207 | assert(VA.getValVT() == MVT::i32 && "only expect 32 -> 64 upper bits")(static_cast <bool> (VA.getValVT() == MVT::i32 && "only expect 32 -> 64 upper bits") ? void (0) : __assert_fail ("VA.getValVT() == MVT::i32 && \"only expect 32 -> 64 upper bits\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7207, __extension__ __PRETTY_FUNCTION__)); | ||||
7208 | Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); | ||||
7209 | Arg = DAG.getNode(ISD::SHL, DL, VA.getLocVT(), Arg, | ||||
7210 | DAG.getConstant(32, DL, VA.getLocVT())); | ||||
7211 | break; | ||||
7212 | case CCValAssign::BCvt: | ||||
7213 | Arg = DAG.getBitcast(VA.getLocVT(), Arg); | ||||
7214 | break; | ||||
7215 | case CCValAssign::Trunc: | ||||
7216 | Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT()); | ||||
7217 | break; | ||||
7218 | case CCValAssign::FPExt: | ||||
7219 | Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg); | ||||
7220 | break; | ||||
7221 | case CCValAssign::Indirect: | ||||
7222 | bool isScalable = VA.getValVT().isScalableVector(); | ||||
7223 | assert((isScalable || Subtarget->isWindowsArm64EC()) &&(static_cast <bool> ((isScalable || Subtarget->isWindowsArm64EC ()) && "Indirect arguments should be scalable on most subtargets" ) ? void (0) : __assert_fail ("(isScalable || Subtarget->isWindowsArm64EC()) && \"Indirect arguments should be scalable on most subtargets\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7224, __extension__ __PRETTY_FUNCTION__)) | ||||
7224 | "Indirect arguments should be scalable on most subtargets")(static_cast <bool> ((isScalable || Subtarget->isWindowsArm64EC ()) && "Indirect arguments should be scalable on most subtargets" ) ? void (0) : __assert_fail ("(isScalable || Subtarget->isWindowsArm64EC()) && \"Indirect arguments should be scalable on most subtargets\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7224, __extension__ __PRETTY_FUNCTION__)); | ||||
7225 | |||||
7226 | uint64_t StoreSize = VA.getValVT().getStoreSize().getKnownMinSize(); | ||||
7227 | uint64_t PartSize = StoreSize; | ||||
7228 | unsigned NumParts = 1; | ||||
7229 | if (Outs[i].Flags.isInConsecutiveRegs()) { | ||||
7230 | assert(!Outs[i].Flags.isInConsecutiveRegsLast())(static_cast <bool> (!Outs[i].Flags.isInConsecutiveRegsLast ()) ? void (0) : __assert_fail ("!Outs[i].Flags.isInConsecutiveRegsLast()" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7230, __extension__ __PRETTY_FUNCTION__)); | ||||
7231 | while (!Outs[i + NumParts - 1].Flags.isInConsecutiveRegsLast()) | ||||
7232 | ++NumParts; | ||||
7233 | StoreSize *= NumParts; | ||||
7234 | } | ||||
7235 | |||||
7236 | Type *Ty = EVT(VA.getValVT()).getTypeForEVT(*DAG.getContext()); | ||||
7237 | Align Alignment = DAG.getDataLayout().getPrefTypeAlign(Ty); | ||||
7238 | int FI = MFI.CreateStackObject(StoreSize, Alignment, false); | ||||
7239 | if (isScalable) | ||||
7240 | MFI.setStackID(FI, TargetStackID::ScalableVector); | ||||
7241 | |||||
7242 | MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); | ||||
7243 | SDValue Ptr = DAG.getFrameIndex( | ||||
7244 | FI, DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout())); | ||||
7245 | SDValue SpillSlot = Ptr; | ||||
7246 | |||||
7247 | // Ensure we generate all stores for each tuple part, whilst updating the | ||||
7248 | // pointer after each store correctly using vscale. | ||||
7249 | while (NumParts) { | ||||
7250 | Chain = DAG.getStore(Chain, DL, OutVals[i], Ptr, MPI); | ||||
7251 | NumParts--; | ||||
7252 | if (NumParts > 0) { | ||||
7253 | SDValue BytesIncrement; | ||||
7254 | if (isScalable) { | ||||
7255 | BytesIncrement = DAG.getVScale( | ||||
7256 | DL, Ptr.getValueType(), | ||||
7257 | APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize)); | ||||
7258 | } else { | ||||
7259 | BytesIncrement = DAG.getConstant( | ||||
7260 | APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize), DL, | ||||
7261 | Ptr.getValueType()); | ||||
7262 | } | ||||
7263 | SDNodeFlags Flags; | ||||
7264 | Flags.setNoUnsignedWrap(true); | ||||
7265 | |||||
7266 | MPI = MachinePointerInfo(MPI.getAddrSpace()); | ||||
7267 | Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, | ||||
7268 | BytesIncrement, Flags); | ||||
7269 | ExtraArgLocs++; | ||||
7270 | i++; | ||||
7271 | } | ||||
7272 | } | ||||
7273 | |||||
7274 | Arg = SpillSlot; | ||||
7275 | break; | ||||
7276 | } | ||||
7277 | |||||
7278 | if (VA.isRegLoc()) { | ||||
7279 | if (i == 0 && Flags.isReturned() && !Flags.isSwiftSelf() && | ||||
7280 | Outs[0].VT == MVT::i64) { | ||||
7281 | assert(VA.getLocVT() == MVT::i64 &&(static_cast <bool> (VA.getLocVT() == MVT::i64 && "unexpected calling convention register assignment") ? void ( 0) : __assert_fail ("VA.getLocVT() == MVT::i64 && \"unexpected calling convention register assignment\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7282, __extension__ __PRETTY_FUNCTION__)) | ||||
7282 | "unexpected calling convention register assignment")(static_cast <bool> (VA.getLocVT() == MVT::i64 && "unexpected calling convention register assignment") ? void ( 0) : __assert_fail ("VA.getLocVT() == MVT::i64 && \"unexpected calling convention register assignment\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7282, __extension__ __PRETTY_FUNCTION__)); | ||||
7283 | assert(!Ins.empty() && Ins[0].VT == MVT::i64 &&(static_cast <bool> (!Ins.empty() && Ins[0].VT == MVT::i64 && "unexpected use of 'returned'") ? void ( 0) : __assert_fail ("!Ins.empty() && Ins[0].VT == MVT::i64 && \"unexpected use of 'returned'\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7284, __extension__ __PRETTY_FUNCTION__)) | ||||
7284 | "unexpected use of 'returned'")(static_cast <bool> (!Ins.empty() && Ins[0].VT == MVT::i64 && "unexpected use of 'returned'") ? void ( 0) : __assert_fail ("!Ins.empty() && Ins[0].VT == MVT::i64 && \"unexpected use of 'returned'\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7284, __extension__ __PRETTY_FUNCTION__)); | ||||
7285 | IsThisReturn = true; | ||||
7286 | } | ||||
7287 | if (RegsUsed.count(VA.getLocReg())) { | ||||
7288 | // If this register has already been used then we're trying to pack | ||||
7289 | // parts of an [N x i32] into an X-register. The extension type will | ||||
7290 | // take care of putting the two halves in the right place but we have to | ||||
7291 | // combine them. | ||||
7292 | SDValue &Bits = | ||||
7293 | llvm::find_if(RegsToPass, | ||||
7294 | [=](const std::pair<unsigned, SDValue> &Elt) { | ||||
7295 | return Elt.first == VA.getLocReg(); | ||||
7296 | }) | ||||
7297 | ->second; | ||||
7298 | Bits = DAG.getNode(ISD::OR, DL, Bits.getValueType(), Bits, Arg); | ||||
7299 | // Call site info is used for function's parameter entry value | ||||
7300 | // tracking. For now we track only simple cases when parameter | ||||
7301 | // is transferred through whole register. | ||||
7302 | llvm::erase_if(CSInfo, [&VA](MachineFunction::ArgRegPair ArgReg) { | ||||
7303 | return ArgReg.Reg == VA.getLocReg(); | ||||
7304 | }); | ||||
7305 | } else { | ||||
7306 | // Add an extra level of indirection for streaming mode changes by | ||||
7307 | // using a pseudo copy node that cannot be rematerialised between a | ||||
7308 | // smstart/smstop and the call by the simple register coalescer. | ||||
7309 | if (RequiresSMChange && isa<FrameIndexSDNode>(Arg)) | ||||
7310 | Arg = DAG.getNode(AArch64ISD::OBSCURE_COPY, DL, MVT::i64, Arg); | ||||
7311 | RegsToPass.emplace_back(VA.getLocReg(), Arg); | ||||
7312 | RegsUsed.insert(VA.getLocReg()); | ||||
7313 | const TargetOptions &Options = DAG.getTarget().Options; | ||||
7314 | if (Options.EmitCallSiteInfo) | ||||
7315 | CSInfo.emplace_back(VA.getLocReg(), i); | ||||
7316 | } | ||||
7317 | } else { | ||||
7318 | assert(VA.isMemLoc())(static_cast <bool> (VA.isMemLoc()) ? void (0) : __assert_fail ("VA.isMemLoc()", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7318, __extension__ __PRETTY_FUNCTION__)); | ||||
7319 | |||||
7320 | SDValue DstAddr; | ||||
7321 | MachinePointerInfo DstInfo; | ||||
7322 | |||||
7323 | // FIXME: This works on big-endian for composite byvals, which are the | ||||
7324 | // common case. It should also work for fundamental types too. | ||||
7325 | uint32_t BEAlign = 0; | ||||
7326 | unsigned OpSize; | ||||
7327 | if (VA.getLocInfo() == CCValAssign::Indirect || | ||||
7328 | VA.getLocInfo() == CCValAssign::Trunc) | ||||
7329 | OpSize = VA.getLocVT().getFixedSizeInBits(); | ||||
7330 | else | ||||
7331 | OpSize = Flags.isByVal() ? Flags.getByValSize() * 8 | ||||
7332 | : VA.getValVT().getSizeInBits(); | ||||
7333 | OpSize = (OpSize + 7) / 8; | ||||
7334 | if (!Subtarget->isLittleEndian() && !Flags.isByVal() && | ||||
7335 | !Flags.isInConsecutiveRegs()) { | ||||
7336 | if (OpSize < 8) | ||||
7337 | BEAlign = 8 - OpSize; | ||||
7338 | } | ||||
7339 | unsigned LocMemOffset = VA.getLocMemOffset(); | ||||
7340 | int32_t Offset = LocMemOffset + BEAlign; | ||||
7341 | SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL); | ||||
7342 | PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff); | ||||
7343 | |||||
7344 | if (IsTailCall) { | ||||
7345 | Offset = Offset + FPDiff; | ||||
7346 | int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); | ||||
7347 | |||||
7348 | DstAddr = DAG.getFrameIndex(FI, PtrVT); | ||||
7349 | DstInfo = MachinePointerInfo::getFixedStack(MF, FI); | ||||
7350 | |||||
7351 | // Make sure any stack arguments overlapping with where we're storing | ||||
7352 | // are loaded before this eventual operation. Otherwise they'll be | ||||
7353 | // clobbered. | ||||
7354 | Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI); | ||||
7355 | } else { | ||||
7356 | SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL); | ||||
7357 | |||||
7358 | DstAddr = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff); | ||||
7359 | DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset); | ||||
7360 | } | ||||
7361 | |||||
7362 | if (Outs[i].Flags.isByVal()) { | ||||
7363 | SDValue SizeNode = | ||||
7364 | DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i64); | ||||
7365 | SDValue Cpy = DAG.getMemcpy( | ||||
7366 | Chain, DL, DstAddr, Arg, SizeNode, | ||||
7367 | Outs[i].Flags.getNonZeroByValAlign(), | ||||
7368 | /*isVol = */ false, /*AlwaysInline = */ false, | ||||
7369 | /*isTailCall = */ false, DstInfo, MachinePointerInfo()); | ||||
7370 | |||||
7371 | MemOpChains.push_back(Cpy); | ||||
7372 | } else { | ||||
7373 | // Since we pass i1/i8/i16 as i1/i8/i16 on stack and Arg is already | ||||
7374 | // promoted to a legal register type i32, we should truncate Arg back to | ||||
7375 | // i1/i8/i16. | ||||
7376 | if (VA.getValVT() == MVT::i1 || VA.getValVT() == MVT::i8 || | ||||
7377 | VA.getValVT() == MVT::i16) | ||||
7378 | Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg); | ||||
7379 | |||||
7380 | SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo); | ||||
7381 | MemOpChains.push_back(Store); | ||||
7382 | } | ||||
7383 | } | ||||
7384 | } | ||||
7385 | |||||
7386 | if (IsVarArg && Subtarget->isWindowsArm64EC()) { | ||||
7387 | // For vararg calls, the Arm64EC ABI requires values in x4 and x5 | ||||
7388 | // describing the argument list. x4 contains the address of the | ||||
7389 | // first stack parameter. x5 contains the size in bytes of all parameters | ||||
7390 | // passed on the stack. | ||||
7391 | RegsToPass.emplace_back(AArch64::X4, StackPtr); | ||||
7392 | RegsToPass.emplace_back(AArch64::X5, | ||||
7393 | DAG.getConstant(NumBytes, DL, MVT::i64)); | ||||
7394 | } | ||||
7395 | |||||
7396 | if (!MemOpChains.empty()) | ||||
7397 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); | ||||
7398 | |||||
7399 | SDValue InFlag; | ||||
7400 | if (RequiresSMChange) { | ||||
7401 | SDValue NewChain = changeStreamingMode(DAG, DL, *RequiresSMChange, Chain, | ||||
7402 | InFlag, PStateSM, true); | ||||
7403 | Chain = NewChain.getValue(0); | ||||
7404 | InFlag = NewChain.getValue(1); | ||||
7405 | } | ||||
7406 | |||||
7407 | // Build a sequence of copy-to-reg nodes chained together with token chain | ||||
7408 | // and flag operands which copy the outgoing args into the appropriate regs. | ||||
7409 | for (auto &RegToPass : RegsToPass) { | ||||
7410 | Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first, | ||||
7411 | RegToPass.second, InFlag); | ||||
7412 | InFlag = Chain.getValue(1); | ||||
7413 | } | ||||
7414 | |||||
7415 | // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every | ||||
7416 | // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol | ||||
7417 | // node so that legalize doesn't hack it. | ||||
7418 | if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) { | ||||
7419 | auto GV = G->getGlobal(); | ||||
7420 | unsigned OpFlags = | ||||
7421 | Subtarget->classifyGlobalFunctionReference(GV, getTargetMachine()); | ||||
7422 | if (OpFlags & AArch64II::MO_GOT) { | ||||
7423 | Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); | ||||
7424 | Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee); | ||||
7425 | } else { | ||||
7426 | const GlobalValue *GV = G->getGlobal(); | ||||
7427 | Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0); | ||||
7428 | } | ||||
7429 | } else if (auto *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { | ||||
7430 | if (getTargetMachine().getCodeModel() == CodeModel::Large && | ||||
7431 | Subtarget->isTargetMachO()) { | ||||
7432 | const char *Sym = S->getSymbol(); | ||||
7433 | Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, AArch64II::MO_GOT); | ||||
7434 | Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee); | ||||
7435 | } else { | ||||
7436 | const char *Sym = S->getSymbol(); | ||||
7437 | Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, 0); | ||||
7438 | } | ||||
7439 | } | ||||
7440 | |||||
7441 | // We don't usually want to end the call-sequence here because we would tidy | ||||
7442 | // the frame up *after* the call, however in the ABI-changing tail-call case | ||||
7443 | // we've carefully laid out the parameters so that when sp is reset they'll be | ||||
7444 | // in the correct location. | ||||
7445 | if (IsTailCall && !IsSibCall) { | ||||
7446 | Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InFlag, DL); | ||||
7447 | InFlag = Chain.getValue(1); | ||||
7448 | } | ||||
7449 | |||||
7450 | std::vector<SDValue> Ops; | ||||
7451 | Ops.push_back(Chain); | ||||
7452 | Ops.push_back(Callee); | ||||
7453 | |||||
7454 | if (IsTailCall) { | ||||
7455 | // Each tail call may have to adjust the stack by a different amount, so | ||||
7456 | // this information must travel along with the operation for eventual | ||||
7457 | // consumption by emitEpilogue. | ||||
7458 | Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32)); | ||||
7459 | } | ||||
7460 | |||||
7461 | // Add argument registers to the end of the list so that they are known live | ||||
7462 | // into the call. | ||||
7463 | for (auto &RegToPass : RegsToPass) | ||||
7464 | Ops.push_back(DAG.getRegister(RegToPass.first, | ||||
7465 | RegToPass.second.getValueType())); | ||||
7466 | |||||
7467 | // Add a register mask operand representing the call-preserved registers. | ||||
7468 | const uint32_t *Mask; | ||||
7469 | const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); | ||||
7470 | if (IsThisReturn) { | ||||
7471 | // For 'this' returns, use the X0-preserving mask if applicable | ||||
7472 | Mask = TRI->getThisReturnPreservedMask(MF, CallConv); | ||||
7473 | if (!Mask) { | ||||
7474 | IsThisReturn = false; | ||||
7475 | Mask = TRI->getCallPreservedMask(MF, CallConv); | ||||
7476 | } | ||||
7477 | } else | ||||
7478 | Mask = TRI->getCallPreservedMask(MF, CallConv); | ||||
7479 | |||||
7480 | if (Subtarget->hasCustomCallingConv()) | ||||
7481 | TRI->UpdateCustomCallPreservedMask(MF, &Mask); | ||||
7482 | |||||
7483 | if (TRI->isAnyArgRegReserved(MF)) | ||||
7484 | TRI->emitReservedArgRegCallError(MF); | ||||
7485 | |||||
7486 | assert(Mask && "Missing call preserved mask for calling convention")(static_cast <bool> (Mask && "Missing call preserved mask for calling convention" ) ? void (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7486, __extension__ __PRETTY_FUNCTION__)); | ||||
7487 | Ops.push_back(DAG.getRegisterMask(Mask)); | ||||
7488 | |||||
7489 | if (InFlag.getNode()) | ||||
7490 | Ops.push_back(InFlag); | ||||
7491 | |||||
7492 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | ||||
7493 | |||||
7494 | // If we're doing a tall call, use a TC_RETURN here rather than an | ||||
7495 | // actual call instruction. | ||||
7496 | if (IsTailCall) { | ||||
7497 | MF.getFrameInfo().setHasTailCall(); | ||||
7498 | SDValue Ret = DAG.getNode(AArch64ISD::TC_RETURN, DL, NodeTys, Ops); | ||||
7499 | |||||
7500 | if (IsCFICall) | ||||
7501 | Ret.getNode()->setCFIType(CLI.CFIType->getZExtValue()); | ||||
7502 | |||||
7503 | DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo)); | ||||
7504 | return Ret; | ||||
7505 | } | ||||
7506 | |||||
7507 | unsigned CallOpc = AArch64ISD::CALL; | ||||
7508 | // Calls with operand bundle "clang.arc.attachedcall" are special. They should | ||||
7509 | // be expanded to the call, directly followed by a special marker sequence and | ||||
7510 | // a call to an ObjC library function. Use CALL_RVMARKER to do that. | ||||
7511 | if (CLI.CB && objcarc::hasAttachedCallOpBundle(CLI.CB)) { | ||||
7512 | assert(!IsTailCall &&(static_cast <bool> (!IsTailCall && "tail calls cannot be marked with clang.arc.attachedcall" ) ? void (0) : __assert_fail ("!IsTailCall && \"tail calls cannot be marked with clang.arc.attachedcall\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7513, __extension__ __PRETTY_FUNCTION__)) | ||||
7513 | "tail calls cannot be marked with clang.arc.attachedcall")(static_cast <bool> (!IsTailCall && "tail calls cannot be marked with clang.arc.attachedcall" ) ? void (0) : __assert_fail ("!IsTailCall && \"tail calls cannot be marked with clang.arc.attachedcall\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7513, __extension__ __PRETTY_FUNCTION__)); | ||||
7514 | CallOpc = AArch64ISD::CALL_RVMARKER; | ||||
7515 | |||||
7516 | // Add a target global address for the retainRV/claimRV runtime function | ||||
7517 | // just before the call target. | ||||
7518 | Function *ARCFn = *objcarc::getAttachedARCFunction(CLI.CB); | ||||
7519 | auto GA = DAG.getTargetGlobalAddress(ARCFn, DL, PtrVT); | ||||
7520 | Ops.insert(Ops.begin() + 1, GA); | ||||
7521 | } else if (GuardWithBTI) | ||||
7522 | CallOpc = AArch64ISD::CALL_BTI; | ||||
7523 | |||||
7524 | // Returns a chain and a flag for retval copy to use. | ||||
7525 | Chain = DAG.getNode(CallOpc, DL, NodeTys, Ops); | ||||
7526 | |||||
7527 | if (IsCFICall) | ||||
7528 | Chain.getNode()->setCFIType(CLI.CFIType->getZExtValue()); | ||||
7529 | |||||
7530 | DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); | ||||
7531 | InFlag = Chain.getValue(1); | ||||
7532 | DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo)); | ||||
7533 | |||||
7534 | uint64_t CalleePopBytes = | ||||
7535 | DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0; | ||||
7536 | |||||
7537 | Chain = DAG.getCALLSEQ_END(Chain, NumBytes, CalleePopBytes, InFlag, DL); | ||||
7538 | InFlag = Chain.getValue(1); | ||||
7539 | |||||
7540 | // Handle result values, copying them out of physregs into vregs that we | ||||
7541 | // return. | ||||
7542 | SDValue Result = LowerCallResult(Chain, InFlag, CallConv, IsVarArg, RVLocs, | ||||
7543 | DL, DAG, InVals, IsThisReturn, | ||||
7544 | IsThisReturn ? OutVals[0] : SDValue()); | ||||
7545 | |||||
7546 | if (!Ins.empty()) | ||||
7547 | InFlag = Result.getValue(Result->getNumValues() - 1); | ||||
7548 | |||||
7549 | if (RequiresSMChange) { | ||||
7550 | assert(PStateSM && "Expected a PStateSM to be set")(static_cast <bool> (PStateSM && "Expected a PStateSM to be set" ) ? void (0) : __assert_fail ("PStateSM && \"Expected a PStateSM to be set\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7550, __extension__ __PRETTY_FUNCTION__)); | ||||
7551 | Result = changeStreamingMode(DAG, DL, !*RequiresSMChange, Result, InFlag, | ||||
7552 | PStateSM, false); | ||||
7553 | } | ||||
7554 | |||||
7555 | if (RequiresLazySave) { | ||||
7556 | // Unconditionally resume ZA. | ||||
7557 | Result = DAG.getNode( | ||||
7558 | AArch64ISD::SMSTART, DL, MVT::Other, Result, | ||||
7559 | DAG.getTargetConstant((int32_t)(AArch64SVCR::SVCRZA), DL, MVT::i32), | ||||
7560 | DAG.getConstant(0, DL, MVT::i64), DAG.getConstant(1, DL, MVT::i64)); | ||||
7561 | |||||
7562 | // Conditionally restore the lazy save using a pseudo node. | ||||
7563 | unsigned FI = FuncInfo->getLazySaveTPIDR2Obj(); | ||||
7564 | SDValue RegMask = DAG.getRegisterMask( | ||||
7565 | TRI->SMEABISupportRoutinesCallPreservedMaskFromX0()); | ||||
7566 | SDValue RestoreRoutine = DAG.getTargetExternalSymbol( | ||||
7567 | "__arm_tpidr2_restore", getPointerTy(DAG.getDataLayout())); | ||||
7568 | SDValue TPIDR2_EL0 = DAG.getNode( | ||||
7569 | ISD::INTRINSIC_W_CHAIN, DL, MVT::i64, Result, | ||||
7570 | DAG.getConstant(Intrinsic::aarch64_sme_get_tpidr2, DL, MVT::i32)); | ||||
7571 | |||||
7572 | // Copy the address of the TPIDR2 block into X0 before 'calling' the | ||||
7573 | // RESTORE_ZA pseudo. | ||||
7574 | SDValue Glue; | ||||
7575 | SDValue TPIDR2Block = DAG.getFrameIndex( | ||||
7576 | FI, DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout())); | ||||
7577 | Result = DAG.getCopyToReg(Result, DL, AArch64::X0, TPIDR2Block, Glue); | ||||
7578 | Result = DAG.getNode(AArch64ISD::RESTORE_ZA, DL, MVT::Other, | ||||
7579 | {Result, TPIDR2_EL0, | ||||
7580 | DAG.getRegister(AArch64::X0, MVT::i64), | ||||
7581 | RestoreRoutine, | ||||
7582 | RegMask, | ||||
7583 | Result.getValue(1)}); | ||||
7584 | |||||
7585 | // Finally reset the TPIDR2_EL0 register to 0. | ||||
7586 | Result = DAG.getNode( | ||||
7587 | ISD::INTRINSIC_VOID, DL, MVT::Other, Result, | ||||
7588 | DAG.getConstant(Intrinsic::aarch64_sme_set_tpidr2, DL, MVT::i32), | ||||
7589 | DAG.getConstant(0, DL, MVT::i64)); | ||||
7590 | } | ||||
7591 | |||||
7592 | if (RequiresSMChange || RequiresLazySave) { | ||||
7593 | for (unsigned I = 0; I < InVals.size(); ++I) { | ||||
7594 | // The smstart/smstop is chained as part of the call, but when the | ||||
7595 | // resulting chain is discarded (which happens when the call is not part | ||||
7596 | // of a chain, e.g. a call to @llvm.cos()), we need to ensure the | ||||
7597 | // smstart/smstop is chained to the result value. We can do that by doing | ||||
7598 | // a vreg -> vreg copy. | ||||
7599 | Register Reg = MF.getRegInfo().createVirtualRegister( | ||||
7600 | getRegClassFor(InVals[I].getValueType().getSimpleVT())); | ||||
7601 | SDValue X = DAG.getCopyToReg(Result, DL, Reg, InVals[I]); | ||||
7602 | InVals[I] = DAG.getCopyFromReg(X, DL, Reg, | ||||
7603 | InVals[I].getValueType()); | ||||
7604 | } | ||||
7605 | } | ||||
7606 | |||||
7607 | return Result; | ||||
7608 | } | ||||
7609 | |||||
7610 | bool AArch64TargetLowering::CanLowerReturn( | ||||
7611 | CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, | ||||
7612 | const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { | ||||
7613 | CCAssignFn *RetCC = CCAssignFnForReturn(CallConv); | ||||
7614 | SmallVector<CCValAssign, 16> RVLocs; | ||||
7615 | CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); | ||||
7616 | return CCInfo.CheckReturn(Outs, RetCC); | ||||
7617 | } | ||||
7618 | |||||
7619 | SDValue | ||||
7620 | AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, | ||||
7621 | bool isVarArg, | ||||
7622 | const SmallVectorImpl<ISD::OutputArg> &Outs, | ||||
7623 | const SmallVectorImpl<SDValue> &OutVals, | ||||
7624 | const SDLoc &DL, SelectionDAG &DAG) const { | ||||
7625 | auto &MF = DAG.getMachineFunction(); | ||||
7626 | auto *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); | ||||
7627 | |||||
7628 | CCAssignFn *RetCC = CCAssignFnForReturn(CallConv); | ||||
7629 | SmallVector<CCValAssign, 16> RVLocs; | ||||
7630 | CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext()); | ||||
7631 | CCInfo.AnalyzeReturn(Outs, RetCC); | ||||
7632 | |||||
7633 | // Copy the result values into the output registers. | ||||
7634 | SDValue Flag; | ||||
7635 | SmallVector<std::pair<unsigned, SDValue>, 4> RetVals; | ||||
7636 | SmallSet<unsigned, 4> RegsUsed; | ||||
7637 | for (unsigned i = 0, realRVLocIdx = 0; i != RVLocs.size(); | ||||
7638 | ++i, ++realRVLocIdx) { | ||||
7639 | CCValAssign &VA = RVLocs[i]; | ||||
7640 | assert(VA.isRegLoc() && "Can only return in registers!")(static_cast <bool> (VA.isRegLoc() && "Can only return in registers!" ) ? void (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7640, __extension__ __PRETTY_FUNCTION__)); | ||||
7641 | SDValue Arg = OutVals[realRVLocIdx]; | ||||
7642 | |||||
7643 | switch (VA.getLocInfo()) { | ||||
7644 | default: | ||||
7645 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7645); | ||||
7646 | case CCValAssign::Full: | ||||
7647 | if (Outs[i].ArgVT == MVT::i1) { | ||||
7648 | // AAPCS requires i1 to be zero-extended to i8 by the producer of the | ||||
7649 | // value. This is strictly redundant on Darwin (which uses "zeroext | ||||
7650 | // i1"), but will be optimised out before ISel. | ||||
7651 | Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg); | ||||
7652 | Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); | ||||
7653 | } | ||||
7654 | break; | ||||
7655 | case CCValAssign::BCvt: | ||||
7656 | Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); | ||||
7657 | break; | ||||
7658 | case CCValAssign::AExt: | ||||
7659 | case CCValAssign::ZExt: | ||||
7660 | Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT()); | ||||
7661 | break; | ||||
7662 | case CCValAssign::AExtUpper: | ||||
7663 | assert(VA.getValVT() == MVT::i32 && "only expect 32 -> 64 upper bits")(static_cast <bool> (VA.getValVT() == MVT::i32 && "only expect 32 -> 64 upper bits") ? void (0) : __assert_fail ("VA.getValVT() == MVT::i32 && \"only expect 32 -> 64 upper bits\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7663, __extension__ __PRETTY_FUNCTION__)); | ||||
7664 | Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT()); | ||||
7665 | Arg = DAG.getNode(ISD::SHL, DL, VA.getLocVT(), Arg, | ||||
7666 | DAG.getConstant(32, DL, VA.getLocVT())); | ||||
7667 | break; | ||||
7668 | } | ||||
7669 | |||||
7670 | if (RegsUsed.count(VA.getLocReg())) { | ||||
7671 | SDValue &Bits = | ||||
7672 | llvm::find_if(RetVals, [=](const std::pair<unsigned, SDValue> &Elt) { | ||||
7673 | return Elt.first == VA.getLocReg(); | ||||
7674 | })->second; | ||||
7675 | Bits = DAG.getNode(ISD::OR, DL, Bits.getValueType(), Bits, Arg); | ||||
7676 | } else { | ||||
7677 | RetVals.emplace_back(VA.getLocReg(), Arg); | ||||
7678 | RegsUsed.insert(VA.getLocReg()); | ||||
7679 | } | ||||
7680 | } | ||||
7681 | |||||
7682 | const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); | ||||
7683 | |||||
7684 | // Emit SMSTOP before returning from a locally streaming function | ||||
7685 | SMEAttrs FuncAttrs(MF.getFunction()); | ||||
7686 | if (FuncAttrs.hasStreamingBody() && !FuncAttrs.hasStreamingInterface()) { | ||||
7687 | Chain = DAG.getNode( | ||||
7688 | AArch64ISD::SMSTOP, DL, DAG.getVTList(MVT::Other, MVT::Glue), Chain, | ||||
7689 | DAG.getTargetConstant((int32_t)AArch64SVCR::SVCRSM, DL, MVT::i32), | ||||
7690 | DAG.getConstant(1, DL, MVT::i64), DAG.getConstant(0, DL, MVT::i64), | ||||
7691 | DAG.getRegisterMask(TRI->getSMStartStopCallPreservedMask())); | ||||
7692 | Flag = Chain.getValue(1); | ||||
7693 | } | ||||
7694 | |||||
7695 | SmallVector<SDValue, 4> RetOps(1, Chain); | ||||
7696 | for (auto &RetVal : RetVals) { | ||||
7697 | Chain = DAG.getCopyToReg(Chain, DL, RetVal.first, RetVal.second, Flag); | ||||
7698 | Flag = Chain.getValue(1); | ||||
7699 | RetOps.push_back( | ||||
7700 | DAG.getRegister(RetVal.first, RetVal.second.getValueType())); | ||||
7701 | } | ||||
7702 | |||||
7703 | // Windows AArch64 ABIs require that for returning structs by value we copy | ||||
7704 | // the sret argument into X0 for the return. | ||||
7705 | // We saved the argument into a virtual register in the entry block, | ||||
7706 | // so now we copy the value out and into X0. | ||||
7707 | if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) { | ||||
7708 | SDValue Val = DAG.getCopyFromReg(RetOps[0], DL, SRetReg, | ||||
7709 | getPointerTy(MF.getDataLayout())); | ||||
7710 | |||||
7711 | unsigned RetValReg = AArch64::X0; | ||||
7712 | Chain = DAG.getCopyToReg(Chain, DL, RetValReg, Val, Flag); | ||||
7713 | Flag = Chain.getValue(1); | ||||
7714 | |||||
7715 | RetOps.push_back( | ||||
7716 | DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout()))); | ||||
7717 | } | ||||
7718 | |||||
7719 | const MCPhysReg *I = TRI->getCalleeSavedRegsViaCopy(&MF); | ||||
7720 | if (I) { | ||||
7721 | for (; *I; ++I) { | ||||
7722 | if (AArch64::GPR64RegClass.contains(*I)) | ||||
7723 | RetOps.push_back(DAG.getRegister(*I, MVT::i64)); | ||||
7724 | else if (AArch64::FPR64RegClass.contains(*I)) | ||||
7725 | RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); | ||||
7726 | else | ||||
7727 | llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7727); | ||||
7728 | } | ||||
7729 | } | ||||
7730 | |||||
7731 | RetOps[0] = Chain; // Update chain. | ||||
7732 | |||||
7733 | // Add the flag if we have it. | ||||
7734 | if (Flag.getNode()) | ||||
7735 | RetOps.push_back(Flag); | ||||
7736 | |||||
7737 | return DAG.getNode(AArch64ISD::RET_FLAG, DL, MVT::Other, RetOps); | ||||
7738 | } | ||||
7739 | |||||
7740 | //===----------------------------------------------------------------------===// | ||||
7741 | // Other Lowering Code | ||||
7742 | //===----------------------------------------------------------------------===// | ||||
7743 | |||||
7744 | SDValue AArch64TargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty, | ||||
7745 | SelectionDAG &DAG, | ||||
7746 | unsigned Flag) const { | ||||
7747 | return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, | ||||
7748 | N->getOffset(), Flag); | ||||
7749 | } | ||||
7750 | |||||
7751 | SDValue AArch64TargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty, | ||||
7752 | SelectionDAG &DAG, | ||||
7753 | unsigned Flag) const { | ||||
7754 | return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag); | ||||
7755 | } | ||||
7756 | |||||
7757 | SDValue AArch64TargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty, | ||||
7758 | SelectionDAG &DAG, | ||||
7759 | unsigned Flag) const { | ||||
7760 | return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(), | ||||
7761 | N->getOffset(), Flag); | ||||
7762 | } | ||||
7763 | |||||
7764 | SDValue AArch64TargetLowering::getTargetNode(BlockAddressSDNode* N, EVT Ty, | ||||
7765 | SelectionDAG &DAG, | ||||
7766 | unsigned Flag) const { | ||||
7767 | return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag); | ||||
7768 | } | ||||
7769 | |||||
7770 | // (loadGOT sym) | ||||
7771 | template <class NodeTy> | ||||
7772 | SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG, | ||||
7773 | unsigned Flags) const { | ||||
7774 | LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getGOT\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "AArch64TargetLowering::getGOT\n" ; } } while (false); | ||||
7775 | SDLoc DL(N); | ||||
7776 | EVT Ty = getPointerTy(DAG.getDataLayout()); | ||||
7777 | SDValue GotAddr = getTargetNode(N, Ty, DAG, AArch64II::MO_GOT | Flags); | ||||
7778 | // FIXME: Once remat is capable of dealing with instructions with register | ||||
7779 | // operands, expand this into two nodes instead of using a wrapper node. | ||||
7780 | return DAG.getNode(AArch64ISD::LOADgot, DL, Ty, GotAddr); | ||||
7781 | } | ||||
7782 | |||||
7783 | // (wrapper %highest(sym), %higher(sym), %hi(sym), %lo(sym)) | ||||
7784 | template <class NodeTy> | ||||
7785 | SDValue AArch64TargetLowering::getAddrLarge(NodeTy *N, SelectionDAG &DAG, | ||||
7786 | unsigned Flags) const { | ||||
7787 | LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrLarge\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "AArch64TargetLowering::getAddrLarge\n" ; } } while (false); | ||||
7788 | SDLoc DL(N); | ||||
7789 | EVT Ty = getPointerTy(DAG.getDataLayout()); | ||||
7790 | const unsigned char MO_NC = AArch64II::MO_NC; | ||||
7791 | return DAG.getNode( | ||||
7792 | AArch64ISD::WrapperLarge, DL, Ty, | ||||
7793 | getTargetNode(N, Ty, DAG, AArch64II::MO_G3 | Flags), | ||||
7794 | getTargetNode(N, Ty, DAG, AArch64II::MO_G2 | MO_NC | Flags), | ||||
7795 | getTargetNode(N, Ty, DAG, AArch64II::MO_G1 | MO_NC | Flags), | ||||
7796 | getTargetNode(N, Ty, DAG, AArch64II::MO_G0 | MO_NC | Flags)); | ||||
7797 | } | ||||
7798 | |||||
7799 | // (addlow (adrp %hi(sym)) %lo(sym)) | ||||
7800 | template <class NodeTy> | ||||
7801 | SDValue AArch64TargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, | ||||
7802 | unsigned Flags) const { | ||||
7803 | LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddr\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "AArch64TargetLowering::getAddr\n" ; } } while (false); | ||||
7804 | SDLoc DL(N); | ||||
7805 | EVT Ty = getPointerTy(DAG.getDataLayout()); | ||||
7806 | SDValue Hi = getTargetNode(N, Ty, DAG, AArch64II::MO_PAGE | Flags); | ||||
7807 | SDValue Lo = getTargetNode(N, Ty, DAG, | ||||
7808 | AArch64II::MO_PAGEOFF | AArch64II::MO_NC | Flags); | ||||
7809 | SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, Ty, Hi); | ||||
7810 | return DAG.getNode(AArch64ISD::ADDlow, DL, Ty, ADRP, Lo); | ||||
7811 | } | ||||
7812 | |||||
7813 | // (adr sym) | ||||
7814 | template <class NodeTy> | ||||
7815 | SDValue AArch64TargetLowering::getAddrTiny(NodeTy *N, SelectionDAG &DAG, | ||||
7816 | unsigned Flags) const { | ||||
7817 | LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrTiny\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "AArch64TargetLowering::getAddrTiny\n" ; } } while (false); | ||||
7818 | SDLoc DL(N); | ||||
7819 | EVT Ty = getPointerTy(DAG.getDataLayout()); | ||||
7820 | SDValue Sym = getTargetNode(N, Ty, DAG, Flags); | ||||
7821 | return DAG.getNode(AArch64ISD::ADR, DL, Ty, Sym); | ||||
7822 | } | ||||
7823 | |||||
7824 | SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op, | ||||
7825 | SelectionDAG &DAG) const { | ||||
7826 | GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); | ||||
7827 | const GlobalValue *GV = GN->getGlobal(); | ||||
7828 | unsigned OpFlags = Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); | ||||
7829 | |||||
7830 | if (OpFlags != AArch64II::MO_NO_FLAG) | ||||
7831 | assert(cast<GlobalAddressSDNode>(Op)->getOffset() == 0 &&(static_cast <bool> (cast<GlobalAddressSDNode>(Op )->getOffset() == 0 && "unexpected offset in global node" ) ? void (0) : __assert_fail ("cast<GlobalAddressSDNode>(Op)->getOffset() == 0 && \"unexpected offset in global node\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7832, __extension__ __PRETTY_FUNCTION__)) | ||||
7832 | "unexpected offset in global node")(static_cast <bool> (cast<GlobalAddressSDNode>(Op )->getOffset() == 0 && "unexpected offset in global node" ) ? void (0) : __assert_fail ("cast<GlobalAddressSDNode>(Op)->getOffset() == 0 && \"unexpected offset in global node\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7832, __extension__ __PRETTY_FUNCTION__)); | ||||
7833 | |||||
7834 | // This also catches the large code model case for Darwin, and tiny code | ||||
7835 | // model with got relocations. | ||||
7836 | if ((OpFlags & AArch64II::MO_GOT) != 0) { | ||||
7837 | return getGOT(GN, DAG, OpFlags); | ||||
7838 | } | ||||
7839 | |||||
7840 | SDValue Result; | ||||
7841 | if (getTargetMachine().getCodeModel() == CodeModel::Large) { | ||||
7842 | Result = getAddrLarge(GN, DAG, OpFlags); | ||||
7843 | } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) { | ||||
7844 | Result = getAddrTiny(GN, DAG, OpFlags); | ||||
7845 | } else { | ||||
7846 | Result = getAddr(GN, DAG, OpFlags); | ||||
7847 | } | ||||
7848 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | ||||
7849 | SDLoc DL(GN); | ||||
7850 | if (OpFlags & (AArch64II::MO_DLLIMPORT | AArch64II::MO_DLLIMPORTAUX | | ||||
7851 | AArch64II::MO_COFFSTUB)) | ||||
7852 | Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, | ||||
7853 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); | ||||
7854 | return Result; | ||||
7855 | } | ||||
7856 | |||||
7857 | /// Convert a TLS address reference into the correct sequence of loads | ||||
7858 | /// and calls to compute the variable's address (for Darwin, currently) and | ||||
7859 | /// return an SDValue containing the final node. | ||||
7860 | |||||
7861 | /// Darwin only has one TLS scheme which must be capable of dealing with the | ||||
7862 | /// fully general situation, in the worst case. This means: | ||||
7863 | /// + "extern __thread" declaration. | ||||
7864 | /// + Defined in a possibly unknown dynamic library. | ||||
7865 | /// | ||||
7866 | /// The general system is that each __thread variable has a [3 x i64] descriptor | ||||
7867 | /// which contains information used by the runtime to calculate the address. The | ||||
7868 | /// only part of this the compiler needs to know about is the first xword, which | ||||
7869 | /// contains a function pointer that must be called with the address of the | ||||
7870 | /// entire descriptor in "x0". | ||||
7871 | /// | ||||
7872 | /// Since this descriptor may be in a different unit, in general even the | ||||
7873 | /// descriptor must be accessed via an indirect load. The "ideal" code sequence | ||||
7874 | /// is: | ||||
7875 | /// adrp x0, _var@TLVPPAGE | ||||
7876 | /// ldr x0, [x0, _var@TLVPPAGEOFF] ; x0 now contains address of descriptor | ||||
7877 | /// ldr x1, [x0] ; x1 contains 1st entry of descriptor, | ||||
7878 | /// ; the function pointer | ||||
7879 | /// blr x1 ; Uses descriptor address in x0 | ||||
7880 | /// ; Address of _var is now in x0. | ||||
7881 | /// | ||||
7882 | /// If the address of _var's descriptor *is* known to the linker, then it can | ||||
7883 | /// change the first "ldr" instruction to an appropriate "add x0, x0, #imm" for | ||||
7884 | /// a slight efficiency gain. | ||||
7885 | SDValue | ||||
7886 | AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op, | ||||
7887 | SelectionDAG &DAG) const { | ||||
7888 | assert(Subtarget->isTargetDarwin() &&(static_cast <bool> (Subtarget->isTargetDarwin() && "This function expects a Darwin target") ? void (0) : __assert_fail ("Subtarget->isTargetDarwin() && \"This function expects a Darwin target\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7889, __extension__ __PRETTY_FUNCTION__)) | ||||
7889 | "This function expects a Darwin target")(static_cast <bool> (Subtarget->isTargetDarwin() && "This function expects a Darwin target") ? void (0) : __assert_fail ("Subtarget->isTargetDarwin() && \"This function expects a Darwin target\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 7889, __extension__ __PRETTY_FUNCTION__)); | ||||
7890 | |||||
7891 | SDLoc DL(Op); | ||||
7892 | MVT PtrVT = getPointerTy(DAG.getDataLayout()); | ||||
7893 | MVT PtrMemVT = getPointerMemTy(DAG.getDataLayout()); | ||||
7894 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); | ||||
7895 | |||||
7896 | SDValue TLVPAddr = | ||||
7897 | DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); | ||||
7898 | SDValue DescAddr = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TLVPAddr); | ||||
7899 | |||||
7900 | // The first entry in the descriptor is a function pointer that we must call | ||||
7901 | // to obtain the address of the variable. | ||||
7902 | SDValue Chain = DAG.getEntryNode(); | ||||
7903 | SDValue FuncTLVGet = DAG.getLoad( | ||||
7904 | PtrMemVT, DL, Chain, DescAddr, | ||||
7905 | MachinePointerInfo::getGOT(DAG.getMachineFunction()), | ||||
7906 | Align(PtrMemVT.getSizeInBits() / 8), | ||||
7907 | MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); | ||||
7908 | Chain = FuncTLVGet.getValue(1); | ||||
7909 | |||||
7910 | // Extend loaded pointer if necessary (i.e. if ILP32) to DAG pointer. | ||||
7911 | FuncTLVGet = DAG.getZExtOrTrunc(FuncTLVGet, DL, PtrVT); | ||||
7912 | |||||
7913 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); | ||||
7914 | MFI.setAdjustsStack(true); | ||||
7915 | |||||
7916 | // TLS calls preserve all registers except those that absolutely must be | ||||
7917 | // trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be | ||||
7918 | // silly). | ||||
7919 | const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); | ||||
7920 | const uint32_t *Mask = TRI->getTLSCallPreservedMask(); | ||||
7921 | if (Subtarget->hasCustomCallingConv()) | ||||
7922 | TRI->UpdateCustomCallPreservedMask(DAG.getMachineFunction(), &Mask); | ||||
7923 | |||||
7924 | // Finally, we can make the call. This is just a degenerate version of a | ||||
7925 | // normal AArch64 call node: x0 takes the address of the descriptor, and | ||||
7926 | // returns the address of the variable in this thread. | ||||
7927 | Chain = DAG.getCopyToReg(Chain, DL, AArch64::X0, DescAddr, SDValue()); | ||||
7928 | Chain = | ||||
7929 | DAG.getNode(AArch64ISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue), | ||||
7930 | Chain, FuncTLVGet, DAG.getRegister(AArch64::X0, MVT::i64), | ||||
7931 | DAG.getRegisterMask(Mask), Chain.getValue(1)); | ||||
7932 | return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Chain.getValue(1)); | ||||
7933 | } | ||||
7934 | |||||
7935 | /// Convert a thread-local variable reference into a sequence of instructions to | ||||
7936 | /// compute the variable's address for the local exec TLS model of ELF targets. | ||||
7937 | /// The sequence depends on the maximum TLS area size. | ||||
7938 | SDValue AArch64TargetLowering::LowerELFTLSLocalExec(const GlobalValue *GV, | ||||
7939 | SDValue ThreadBase, | ||||
7940 | const SDLoc &DL, | ||||
7941 | SelectionDAG &DAG) const { | ||||
7942 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | ||||
7943 | SDValue TPOff, Addr; | ||||
7944 | |||||
7945 | switch (DAG.getTarget().Options.TLSSize) { | ||||
7946 | default: | ||||
7947 | llvm_unreachable("Unexpected TLS size")::llvm::llvm_unreachable_internal("Unexpected TLS size", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 7947); | ||||
7948 | |||||
7949 | case 12: { | ||||
7950 | // mrs x0, TPIDR_EL0 | ||||
7951 | // add x0, x0, :tprel_lo12:a | ||||
7952 | SDValue Var = DAG.getTargetGlobalAddress( | ||||
7953 | GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_PAGEOFF); | ||||
7954 | return SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, ThreadBase, | ||||
7955 | Var, | ||||
7956 | DAG.getTargetConstant(0, DL, MVT::i32)), | ||||
7957 | 0); | ||||
7958 | } | ||||
7959 | |||||
7960 | case 24: { | ||||
7961 | // mrs x0, TPIDR_EL0 | ||||
7962 | // add x0, x0, :tprel_hi12:a | ||||
7963 | // add x0, x0, :tprel_lo12_nc:a | ||||
7964 | SDValue HiVar = DAG.getTargetGlobalAddress( | ||||
7965 | GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12); | ||||
7966 | SDValue LoVar = DAG.getTargetGlobalAddress( | ||||
7967 | GV, DL, PtrVT, 0, | ||||
7968 | AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); | ||||
7969 | Addr = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, ThreadBase, | ||||
7970 | HiVar, | ||||
7971 | DAG.getTargetConstant(0, DL, MVT::i32)), | ||||
7972 | 0); | ||||
7973 | return SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, Addr, | ||||
7974 | LoVar, | ||||
7975 | DAG.getTargetConstant(0, DL, MVT::i32)), | ||||
7976 | 0); | ||||
7977 | } | ||||
7978 | |||||
7979 | case 32: { | ||||
7980 | // mrs x1, TPIDR_EL0 | ||||
7981 | // movz x0, #:tprel_g1:a | ||||
7982 | // movk x0, #:tprel_g0_nc:a | ||||
7983 | // add x0, x1, x0 | ||||
7984 | SDValue HiVar = DAG.getTargetGlobalAddress( | ||||
7985 | GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G1); | ||||
7986 | SDValue LoVar = DAG.getTargetGlobalAddress( | ||||
7987 | GV, DL, PtrVT, 0, | ||||
7988 | AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC); | ||||
7989 | TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar, | ||||
7990 | DAG.getTargetConstant(16, DL, MVT::i32)), | ||||
7991 | 0); | ||||
7992 | TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, LoVar, | ||||
7993 | DAG.getTargetConstant(0, DL, MVT::i32)), | ||||
7994 | 0); | ||||
7995 | return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff); | ||||
7996 | } | ||||
7997 | |||||
7998 | case 48: { | ||||
7999 | // mrs x1, TPIDR_EL0 | ||||
8000 | // movz x0, #:tprel_g2:a | ||||
8001 | // movk x0, #:tprel_g1_nc:a | ||||
8002 | // movk x0, #:tprel_g0_nc:a | ||||
8003 | // add x0, x1, x0 | ||||
8004 | SDValue HiVar = DAG.getTargetGlobalAddress( | ||||
8005 | GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G2); | ||||
8006 | SDValue MiVar = DAG.getTargetGlobalAddress( | ||||
8007 | GV, DL, PtrVT, 0, | ||||
8008 | AArch64II::MO_TLS | AArch64II::MO_G1 | AArch64II::MO_NC); | ||||
8009 | SDValue LoVar = DAG.getTargetGlobalAddress( | ||||
8010 | GV, DL, PtrVT, 0, | ||||
8011 | AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC); | ||||
8012 | TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar, | ||||
8013 | DAG.getTargetConstant(32, DL, MVT::i32)), | ||||
8014 | 0); | ||||
8015 | TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, MiVar, | ||||
8016 | DAG.getTargetConstant(16, DL, MVT::i32)), | ||||
8017 | 0); | ||||
8018 | TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, LoVar, | ||||
8019 | DAG.getTargetConstant(0, DL, MVT::i32)), | ||||
8020 | 0); | ||||
8021 | return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff); | ||||
8022 | } | ||||
8023 | } | ||||
8024 | } | ||||
8025 | |||||
8026 | /// When accessing thread-local variables under either the general-dynamic or | ||||
8027 | /// local-dynamic system, we make a "TLS-descriptor" call. The variable will | ||||
8028 | /// have a descriptor, accessible via a PC-relative ADRP, and whose first entry | ||||
8029 | /// is a function pointer to carry out the resolution. | ||||
8030 | /// | ||||
8031 | /// The sequence is: | ||||
8032 | /// adrp x0, :tlsdesc:var | ||||
8033 | /// ldr x1, [x0, #:tlsdesc_lo12:var] | ||||
8034 | /// add x0, x0, #:tlsdesc_lo12:var | ||||
8035 | /// .tlsdesccall var | ||||
8036 | /// blr x1 | ||||
8037 | /// (TPIDR_EL0 offset now in x0) | ||||
8038 | /// | ||||
8039 | /// The above sequence must be produced unscheduled, to enable the linker to | ||||
8040 | /// optimize/relax this sequence. | ||||
8041 | /// Therefore, a pseudo-instruction (TLSDESC_CALLSEQ) is used to represent the | ||||
8042 | /// above sequence, and expanded really late in the compilation flow, to ensure | ||||
8043 | /// the sequence is produced as per above. | ||||
8044 | SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(SDValue SymAddr, | ||||
8045 | const SDLoc &DL, | ||||
8046 | SelectionDAG &DAG) const { | ||||
8047 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | ||||
8048 | |||||
8049 | SDValue Chain = DAG.getEntryNode(); | ||||
8050 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | ||||
8051 | |||||
8052 | Chain = | ||||
8053 | DAG.getNode(AArch64ISD::TLSDESC_CALLSEQ, DL, NodeTys, {Chain, SymAddr}); | ||||
8054 | SDValue Glue = Chain.getValue(1); | ||||
8055 | |||||
8056 | return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue); | ||||
8057 | } | ||||
8058 | |||||
8059 | SDValue | ||||
8060 | AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op, | ||||
8061 | SelectionDAG &DAG) const { | ||||
8062 | assert(Subtarget->isTargetELF() && "This function expects an ELF target")(static_cast <bool> (Subtarget->isTargetELF() && "This function expects an ELF target") ? void (0) : __assert_fail ("Subtarget->isTargetELF() && \"This function expects an ELF target\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8062, __extension__ __PRETTY_FUNCTION__)); | ||||
8063 | |||||
8064 | const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); | ||||
8065 | |||||
8066 | TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal()); | ||||
8067 | |||||
8068 | if (!EnableAArch64ELFLocalDynamicTLSGeneration) { | ||||
8069 | if (Model == TLSModel::LocalDynamic) | ||||
8070 | Model = TLSModel::GeneralDynamic; | ||||
8071 | } | ||||
8072 | |||||
8073 | if (getTargetMachine().getCodeModel() == CodeModel::Large && | ||||
8074 | Model != TLSModel::LocalExec) | ||||
8075 | report_fatal_error("ELF TLS only supported in small memory model or " | ||||
8076 | "in local exec TLS model"); | ||||
8077 | // Different choices can be made for the maximum size of the TLS area for a | ||||
8078 | // module. For the small address model, the default TLS size is 16MiB and the | ||||
8079 | // maximum TLS size is 4GiB. | ||||
8080 | // FIXME: add tiny and large code model support for TLS access models other | ||||
8081 | // than local exec. We currently generate the same code as small for tiny, | ||||
8082 | // which may be larger than needed. | ||||
8083 | |||||
8084 | SDValue TPOff; | ||||
8085 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | ||||
8086 | SDLoc DL(Op); | ||||
8087 | const GlobalValue *GV = GA->getGlobal(); | ||||
8088 | |||||
8089 | SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT); | ||||
8090 | |||||
8091 | if (Model == TLSModel::LocalExec) { | ||||
8092 | return LowerELFTLSLocalExec(GV, ThreadBase, DL, DAG); | ||||
8093 | } else if (Model == TLSModel::InitialExec) { | ||||
8094 | TPOff = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); | ||||
8095 | TPOff = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TPOff); | ||||
8096 | } else if (Model == TLSModel::LocalDynamic) { | ||||
8097 | // Local-dynamic accesses proceed in two phases. A general-dynamic TLS | ||||
8098 | // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate | ||||
8099 | // the beginning of the module's TLS region, followed by a DTPREL offset | ||||
8100 | // calculation. | ||||
8101 | |||||
8102 | // These accesses will need deduplicating if there's more than one. | ||||
8103 | AArch64FunctionInfo *MFI = | ||||
8104 | DAG.getMachineFunction().getInfo<AArch64FunctionInfo>(); | ||||
8105 | MFI->incNumLocalDynamicTLSAccesses(); | ||||
8106 | |||||
8107 | // The call needs a relocation too for linker relaxation. It doesn't make | ||||
8108 | // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of | ||||
8109 | // the address. | ||||
8110 | SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT, | ||||
8111 | AArch64II::MO_TLS); | ||||
8112 | |||||
8113 | // Now we can calculate the offset from TPIDR_EL0 to this module's | ||||
8114 | // thread-local area. | ||||
8115 | TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG); | ||||
8116 | |||||
8117 | // Now use :dtprel_whatever: operations to calculate this variable's offset | ||||
8118 | // in its thread-storage area. | ||||
8119 | SDValue HiVar = DAG.getTargetGlobalAddress( | ||||
8120 | GV, DL, MVT::i64, 0, AArch64II::MO_TLS | AArch64II::MO_HI12); | ||||
8121 | SDValue LoVar = DAG.getTargetGlobalAddress( | ||||
8122 | GV, DL, MVT::i64, 0, | ||||
8123 | AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); | ||||
8124 | |||||
8125 | TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, HiVar, | ||||
8126 | DAG.getTargetConstant(0, DL, MVT::i32)), | ||||
8127 | 0); | ||||
8128 | TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, LoVar, | ||||
8129 | DAG.getTargetConstant(0, DL, MVT::i32)), | ||||
8130 | 0); | ||||
8131 | } else if (Model == TLSModel::GeneralDynamic) { | ||||
8132 | // The call needs a relocation too for linker relaxation. It doesn't make | ||||
8133 | // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of | ||||
8134 | // the address. | ||||
8135 | SDValue SymAddr = | ||||
8136 | DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); | ||||
8137 | |||||
8138 | // Finally we can make a call to calculate the offset from tpidr_el0. | ||||
8139 | TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG); | ||||
8140 | } else | ||||
8141 | llvm_unreachable("Unsupported ELF TLS access model")::llvm::llvm_unreachable_internal("Unsupported ELF TLS access model" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8141); | ||||
8142 | |||||
8143 | return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff); | ||||
8144 | } | ||||
8145 | |||||
8146 | SDValue | ||||
8147 | AArch64TargetLowering::LowerWindowsGlobalTLSAddress(SDValue Op, | ||||
8148 | SelectionDAG &DAG) const { | ||||
8149 | assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering")(static_cast <bool> (Subtarget->isTargetWindows() && "Windows specific TLS lowering") ? void (0) : __assert_fail ( "Subtarget->isTargetWindows() && \"Windows specific TLS lowering\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8149, __extension__ __PRETTY_FUNCTION__)); | ||||
8150 | |||||
8151 | SDValue Chain = DAG.getEntryNode(); | ||||
8152 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | ||||
8153 | SDLoc DL(Op); | ||||
8154 | |||||
8155 | SDValue TEB = DAG.getRegister(AArch64::X18, MVT::i64); | ||||
8156 | |||||
8157 | // Load the ThreadLocalStoragePointer from the TEB | ||||
8158 | // A pointer to the TLS array is located at offset 0x58 from the TEB. | ||||
8159 | SDValue TLSArray = | ||||
8160 | DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x58, DL)); | ||||
8161 | TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo()); | ||||
8162 | Chain = TLSArray.getValue(1); | ||||
8163 | |||||
8164 | // Load the TLS index from the C runtime; | ||||
8165 | // This does the same as getAddr(), but without having a GlobalAddressSDNode. | ||||
8166 | // This also does the same as LOADgot, but using a generic i32 load, | ||||
8167 | // while LOADgot only loads i64. | ||||
8168 | SDValue TLSIndexHi = | ||||
8169 | DAG.getTargetExternalSymbol("_tls_index", PtrVT, AArch64II::MO_PAGE); | ||||
8170 | SDValue TLSIndexLo = DAG.getTargetExternalSymbol( | ||||
8171 | "_tls_index", PtrVT, AArch64II::MO_PAGEOFF | AArch64II::MO_NC); | ||||
8172 | SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, TLSIndexHi); | ||||
8173 | SDValue TLSIndex = | ||||
8174 | DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, TLSIndexLo); | ||||
8175 | TLSIndex = DAG.getLoad(MVT::i32, DL, Chain, TLSIndex, MachinePointerInfo()); | ||||
8176 | Chain = TLSIndex.getValue(1); | ||||
8177 | |||||
8178 | // The pointer to the thread's TLS data area is at the TLS Index scaled by 8 | ||||
8179 | // offset into the TLSArray. | ||||
8180 | TLSIndex = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TLSIndex); | ||||
8181 | SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex, | ||||
8182 | DAG.getConstant(3, DL, PtrVT)); | ||||
8183 | SDValue TLS = DAG.getLoad(PtrVT, DL, Chain, | ||||
8184 | DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot), | ||||
8185 | MachinePointerInfo()); | ||||
8186 | Chain = TLS.getValue(1); | ||||
8187 | |||||
8188 | const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); | ||||
8189 | const GlobalValue *GV = GA->getGlobal(); | ||||
8190 | SDValue TGAHi = DAG.getTargetGlobalAddress( | ||||
8191 | GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12); | ||||
8192 | SDValue TGALo = DAG.getTargetGlobalAddress( | ||||
8193 | GV, DL, PtrVT, 0, | ||||
8194 | AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); | ||||
8195 | |||||
8196 | // Add the offset from the start of the .tls section (section base). | ||||
8197 | SDValue Addr = | ||||
8198 | SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TLS, TGAHi, | ||||
8199 | DAG.getTargetConstant(0, DL, MVT::i32)), | ||||
8200 | 0); | ||||
8201 | Addr = DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, Addr, TGALo); | ||||
8202 | return Addr; | ||||
8203 | } | ||||
8204 | |||||
8205 | SDValue AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op, | ||||
8206 | SelectionDAG &DAG) const { | ||||
8207 | const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); | ||||
8208 | if (DAG.getTarget().useEmulatedTLS()) | ||||
8209 | return LowerToTLSEmulatedModel(GA, DAG); | ||||
8210 | |||||
8211 | if (Subtarget->isTargetDarwin()) | ||||
8212 | return LowerDarwinGlobalTLSAddress(Op, DAG); | ||||
8213 | if (Subtarget->isTargetELF()) | ||||
8214 | return LowerELFGlobalTLSAddress(Op, DAG); | ||||
8215 | if (Subtarget->isTargetWindows()) | ||||
8216 | return LowerWindowsGlobalTLSAddress(Op, DAG); | ||||
8217 | |||||
8218 | llvm_unreachable("Unexpected platform trying to use TLS")::llvm::llvm_unreachable_internal("Unexpected platform trying to use TLS" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8218); | ||||
8219 | } | ||||
8220 | |||||
8221 | // Looks through \param Val to determine the bit that can be used to | ||||
8222 | // check the sign of the value. It returns the unextended value and | ||||
8223 | // the sign bit position. | ||||
8224 | std::pair<SDValue, uint64_t> lookThroughSignExtension(SDValue Val) { | ||||
8225 | if (Val.getOpcode() == ISD::SIGN_EXTEND_INREG) | ||||
8226 | return {Val.getOperand(0), | ||||
8227 | cast<VTSDNode>(Val.getOperand(1))->getVT().getFixedSizeInBits() - | ||||
8228 | 1}; | ||||
8229 | |||||
8230 | if (Val.getOpcode() == ISD::SIGN_EXTEND) | ||||
8231 | return {Val.getOperand(0), | ||||
8232 | Val.getOperand(0)->getValueType(0).getFixedSizeInBits() - 1}; | ||||
8233 | |||||
8234 | return {Val, Val.getValueSizeInBits() - 1}; | ||||
8235 | } | ||||
8236 | |||||
8237 | SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { | ||||
8238 | SDValue Chain = Op.getOperand(0); | ||||
8239 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); | ||||
8240 | SDValue LHS = Op.getOperand(2); | ||||
8241 | SDValue RHS = Op.getOperand(3); | ||||
8242 | SDValue Dest = Op.getOperand(4); | ||||
8243 | SDLoc dl(Op); | ||||
8244 | |||||
8245 | MachineFunction &MF = DAG.getMachineFunction(); | ||||
8246 | // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions | ||||
8247 | // will not be produced, as they are conditional branch instructions that do | ||||
8248 | // not set flags. | ||||
8249 | bool ProduceNonFlagSettingCondBr = | ||||
8250 | !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening); | ||||
8251 | |||||
8252 | // Handle f128 first, since lowering it will result in comparing the return | ||||
8253 | // value of a libcall against zero, which is just what the rest of LowerBR_CC | ||||
8254 | // is expecting to deal with. | ||||
8255 | if (LHS.getValueType() == MVT::f128) { | ||||
8256 | softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS); | ||||
8257 | |||||
8258 | // If softenSetCCOperands returned a scalar, we need to compare the result | ||||
8259 | // against zero to select between true and false values. | ||||
8260 | if (!RHS.getNode()) { | ||||
8261 | RHS = DAG.getConstant(0, dl, LHS.getValueType()); | ||||
8262 | CC = ISD::SETNE; | ||||
8263 | } | ||||
8264 | } | ||||
8265 | |||||
8266 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch | ||||
8267 | // instruction. | ||||
8268 | if (ISD::isOverflowIntrOpRes(LHS) && isOneConstant(RHS) && | ||||
8269 | (CC == ISD::SETEQ || CC == ISD::SETNE)) { | ||||
8270 | // Only lower legal XALUO ops. | ||||
8271 | if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0))) | ||||
8272 | return SDValue(); | ||||
8273 | |||||
8274 | // The actual operation with overflow check. | ||||
8275 | AArch64CC::CondCode OFCC; | ||||
8276 | SDValue Value, Overflow; | ||||
8277 | std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, LHS.getValue(0), DAG); | ||||
8278 | |||||
8279 | if (CC == ISD::SETNE) | ||||
8280 | OFCC = getInvertedCondCode(OFCC); | ||||
8281 | SDValue CCVal = DAG.getConstant(OFCC, dl, MVT::i32); | ||||
8282 | |||||
8283 | return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, | ||||
8284 | Overflow); | ||||
8285 | } | ||||
8286 | |||||
8287 | if (LHS.getValueType().isInteger()) { | ||||
8288 | assert((LHS.getValueType() == RHS.getValueType()) &&(static_cast <bool> ((LHS.getValueType() == RHS.getValueType ()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType () == MVT::i64)) ? void (0) : __assert_fail ("(LHS.getValueType() == RHS.getValueType()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64)" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8289, __extension__ __PRETTY_FUNCTION__)) | ||||
8289 | (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64))(static_cast <bool> ((LHS.getValueType() == RHS.getValueType ()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType () == MVT::i64)) ? void (0) : __assert_fail ("(LHS.getValueType() == RHS.getValueType()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64)" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8289, __extension__ __PRETTY_FUNCTION__)); | ||||
8290 | |||||
8291 | // If the RHS of the comparison is zero, we can potentially fold this | ||||
8292 | // to a specialized branch. | ||||
8293 | const ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS); | ||||
8294 | if (RHSC && RHSC->getZExtValue() == 0 && ProduceNonFlagSettingCondBr) { | ||||
8295 | if (CC == ISD::SETEQ) { | ||||
8296 | // See if we can use a TBZ to fold in an AND as well. | ||||
8297 | // TBZ has a smaller branch displacement than CBZ. If the offset is | ||||
8298 | // out of bounds, a late MI-layer pass rewrites branches. | ||||
8299 | // 403.gcc is an example that hits this case. | ||||
8300 | if (LHS.getOpcode() == ISD::AND && | ||||
8301 | isa<ConstantSDNode>(LHS.getOperand(1)) && | ||||
8302 | isPowerOf2_64(LHS.getConstantOperandVal(1))) { | ||||
8303 | SDValue Test = LHS.getOperand(0); | ||||
8304 | uint64_t Mask = LHS.getConstantOperandVal(1); | ||||
8305 | return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, Test, | ||||
8306 | DAG.getConstant(Log2_64(Mask), dl, MVT::i64), | ||||
8307 | Dest); | ||||
8308 | } | ||||
8309 | |||||
8310 | return DAG.getNode(AArch64ISD::CBZ, dl, MVT::Other, Chain, LHS, Dest); | ||||
8311 | } else if (CC == ISD::SETNE) { | ||||
8312 | // See if we can use a TBZ to fold in an AND as well. | ||||
8313 | // TBZ has a smaller branch displacement than CBZ. If the offset is | ||||
8314 | // out of bounds, a late MI-layer pass rewrites branches. | ||||
8315 | // 403.gcc is an example that hits this case. | ||||
8316 | if (LHS.getOpcode() == ISD::AND && | ||||
8317 | isa<ConstantSDNode>(LHS.getOperand(1)) && | ||||
8318 | isPowerOf2_64(LHS.getConstantOperandVal(1))) { | ||||
8319 | SDValue Test = LHS.getOperand(0); | ||||
8320 | uint64_t Mask = LHS.getConstantOperandVal(1); | ||||
8321 | return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, Test, | ||||
8322 | DAG.getConstant(Log2_64(Mask), dl, MVT::i64), | ||||
8323 | Dest); | ||||
8324 | } | ||||
8325 | |||||
8326 | return DAG.getNode(AArch64ISD::CBNZ, dl, MVT::Other, Chain, LHS, Dest); | ||||
8327 | } else if (CC == ISD::SETLT && LHS.getOpcode() != ISD::AND) { | ||||
8328 | // Don't combine AND since emitComparison converts the AND to an ANDS | ||||
8329 | // (a.k.a. TST) and the test in the test bit and branch instruction | ||||
8330 | // becomes redundant. This would also increase register pressure. | ||||
8331 | uint64_t SignBitPos; | ||||
8332 | std::tie(LHS, SignBitPos) = lookThroughSignExtension(LHS); | ||||
8333 | return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, LHS, | ||||
8334 | DAG.getConstant(SignBitPos, dl, MVT::i64), Dest); | ||||
8335 | } | ||||
8336 | } | ||||
8337 | if (RHSC && RHSC->getSExtValue() == -1 && CC == ISD::SETGT && | ||||
8338 | LHS.getOpcode() != ISD::AND && ProduceNonFlagSettingCondBr) { | ||||
8339 | // Don't combine AND since emitComparison converts the AND to an ANDS | ||||
8340 | // (a.k.a. TST) and the test in the test bit and branch instruction | ||||
8341 | // becomes redundant. This would also increase register pressure. | ||||
8342 | uint64_t SignBitPos; | ||||
8343 | std::tie(LHS, SignBitPos) = lookThroughSignExtension(LHS); | ||||
8344 | return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, LHS, | ||||
8345 | DAG.getConstant(SignBitPos, dl, MVT::i64), Dest); | ||||
8346 | } | ||||
8347 | |||||
8348 | SDValue CCVal; | ||||
8349 | SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl); | ||||
8350 | return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, | ||||
8351 | Cmp); | ||||
8352 | } | ||||
8353 | |||||
8354 | assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::bf16 ||(static_cast <bool> (LHS.getValueType() == MVT::f16 || LHS .getValueType() == MVT::bf16 || LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64) ? void (0) : __assert_fail ("LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::bf16 || LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8355, __extension__ __PRETTY_FUNCTION__)) | ||||
8355 | LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64)(static_cast <bool> (LHS.getValueType() == MVT::f16 || LHS .getValueType() == MVT::bf16 || LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64) ? void (0) : __assert_fail ("LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::bf16 || LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8355, __extension__ __PRETTY_FUNCTION__)); | ||||
8356 | |||||
8357 | // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally | ||||
8358 | // clean. Some of them require two branches to implement. | ||||
8359 | SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG); | ||||
8360 | AArch64CC::CondCode CC1, CC2; | ||||
8361 | changeFPCCToAArch64CC(CC, CC1, CC2); | ||||
8362 | SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32); | ||||
8363 | SDValue BR1 = | ||||
8364 | DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CC1Val, Cmp); | ||||
8365 | if (CC2 != AArch64CC::AL) { | ||||
8366 | SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32); | ||||
8367 | return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, BR1, Dest, CC2Val, | ||||
8368 | Cmp); | ||||
8369 | } | ||||
8370 | |||||
8371 | return BR1; | ||||
8372 | } | ||||
8373 | |||||
8374 | SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op, | ||||
8375 | SelectionDAG &DAG) const { | ||||
8376 | if (!Subtarget->hasNEON()) | ||||
8377 | return SDValue(); | ||||
8378 | |||||
8379 | EVT VT = Op.getValueType(); | ||||
8380 | EVT IntVT = VT.changeTypeToInteger(); | ||||
8381 | SDLoc DL(Op); | ||||
8382 | |||||
8383 | SDValue In1 = Op.getOperand(0); | ||||
8384 | SDValue In2 = Op.getOperand(1); | ||||
8385 | EVT SrcVT = In2.getValueType(); | ||||
8386 | |||||
8387 | if (!SrcVT.bitsEq(VT)) | ||||
8388 | In2 = DAG.getFPExtendOrRound(In2, DL, VT); | ||||
8389 | |||||
8390 | if (VT.isScalableVector()) | ||||
8391 | IntVT = | ||||
8392 | getPackedSVEVectorVT(VT.getVectorElementType().changeTypeToInteger()); | ||||
8393 | |||||
8394 | if (VT.isFixedLengthVector() && | ||||
8395 | useSVEForFixedLengthVectorVT(VT, Subtarget->forceStreamingCompatibleSVE())) { | ||||
8396 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); | ||||
8397 | |||||
8398 | In1 = convertToScalableVector(DAG, ContainerVT, In1); | ||||
8399 | In2 = convertToScalableVector(DAG, ContainerVT, In2); | ||||
8400 | |||||
8401 | SDValue Res = DAG.getNode(ISD::FCOPYSIGN, DL, ContainerVT, In1, In2); | ||||
8402 | return convertFromScalableVector(DAG, VT, Res); | ||||
8403 | } | ||||
8404 | |||||
8405 | auto BitCast = [this](EVT VT, SDValue Op, SelectionDAG &DAG) { | ||||
8406 | if (VT.isScalableVector()) | ||||
8407 | return getSVESafeBitCast(VT, Op, DAG); | ||||
8408 | |||||
8409 | return DAG.getBitcast(VT, Op); | ||||
8410 | }; | ||||
8411 | |||||
8412 | SDValue VecVal1, VecVal2; | ||||
8413 | EVT VecVT; | ||||
8414 | auto SetVecVal = [&](int Idx = -1) { | ||||
8415 | if (!VT.isVector()) { | ||||
8416 | VecVal1 = | ||||
8417 | DAG.getTargetInsertSubreg(Idx, DL, VecVT, DAG.getUNDEF(VecVT), In1); | ||||
8418 | VecVal2 = | ||||
8419 | DAG.getTargetInsertSubreg(Idx, DL, VecVT, DAG.getUNDEF(VecVT), In2); | ||||
8420 | } else { | ||||
8421 | VecVal1 = BitCast(VecVT, In1, DAG); | ||||
8422 | VecVal2 = BitCast(VecVT, In2, DAG); | ||||
8423 | } | ||||
8424 | }; | ||||
8425 | if (VT.isVector()) { | ||||
8426 | VecVT = IntVT; | ||||
8427 | SetVecVal(); | ||||
8428 | } else if (VT == MVT::f64) { | ||||
8429 | VecVT = MVT::v2i64; | ||||
8430 | SetVecVal(AArch64::dsub); | ||||
8431 | } else if (VT == MVT::f32) { | ||||
8432 | VecVT = MVT::v4i32; | ||||
8433 | SetVecVal(AArch64::ssub); | ||||
8434 | } else if (VT == MVT::f16) { | ||||
8435 | VecVT = MVT::v8i16; | ||||
8436 | SetVecVal(AArch64::hsub); | ||||
8437 | } else { | ||||
8438 | llvm_unreachable("Invalid type for copysign!")::llvm::llvm_unreachable_internal("Invalid type for copysign!" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8438); | ||||
8439 | } | ||||
8440 | |||||
8441 | unsigned BitWidth = In1.getScalarValueSizeInBits(); | ||||
8442 | SDValue SignMaskV = DAG.getConstant(~APInt::getSignMask(BitWidth), DL, VecVT); | ||||
8443 | |||||
8444 | // We want to materialize a mask with every bit but the high bit set, but the | ||||
8445 | // AdvSIMD immediate moves cannot materialize that in a single instruction for | ||||
8446 | // 64-bit elements. Instead, materialize all bits set and then negate that. | ||||
8447 | if (VT == MVT::f64 || VT == MVT::v2f64) { | ||||
8448 | SignMaskV = DAG.getConstant(APInt::getAllOnes(BitWidth), DL, VecVT); | ||||
8449 | SignMaskV = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, SignMaskV); | ||||
8450 | SignMaskV = DAG.getNode(ISD::FNEG, DL, MVT::v2f64, SignMaskV); | ||||
8451 | SignMaskV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, SignMaskV); | ||||
8452 | } | ||||
8453 | |||||
8454 | SDValue BSP = | ||||
8455 | DAG.getNode(AArch64ISD::BSP, DL, VecVT, SignMaskV, VecVal1, VecVal2); | ||||
8456 | if (VT == MVT::f16) | ||||
8457 | return DAG.getTargetExtractSubreg(AArch64::hsub, DL, VT, BSP); | ||||
8458 | if (VT == MVT::f32) | ||||
8459 | return DAG.getTargetExtractSubreg(AArch64::ssub, DL, VT, BSP); | ||||
8460 | if (VT == MVT::f64) | ||||
8461 | return DAG.getTargetExtractSubreg(AArch64::dsub, DL, VT, BSP); | ||||
8462 | |||||
8463 | return BitCast(VT, BSP, DAG); | ||||
8464 | } | ||||
8465 | |||||
8466 | SDValue AArch64TargetLowering::LowerCTPOP_PARITY(SDValue Op, | ||||
8467 | SelectionDAG &DAG) const { | ||||
8468 | if (DAG.getMachineFunction().getFunction().hasFnAttribute( | ||||
8469 | Attribute::NoImplicitFloat)) | ||||
8470 | return SDValue(); | ||||
8471 | |||||
8472 | if (!Subtarget->hasNEON()) | ||||
8473 | return SDValue(); | ||||
8474 | |||||
8475 | bool IsParity = Op.getOpcode() == ISD::PARITY; | ||||
8476 | SDValue Val = Op.getOperand(0); | ||||
8477 | SDLoc DL(Op); | ||||
8478 | EVT VT = Op.getValueType(); | ||||
8479 | |||||
8480 | // for i32, general parity function using EORs is more efficient compared to | ||||
8481 | // using floating point | ||||
8482 | if (VT == MVT::i32 && IsParity) | ||||
8483 | return SDValue(); | ||||
8484 | |||||
8485 | // If there is no CNT instruction available, GPR popcount can | ||||
8486 | // be more efficiently lowered to the following sequence that uses | ||||
8487 | // AdvSIMD registers/instructions as long as the copies to/from | ||||
8488 | // the AdvSIMD registers are cheap. | ||||
8489 | // FMOV D0, X0 // copy 64-bit int to vector, high bits zero'd | ||||
8490 | // CNT V0.8B, V0.8B // 8xbyte pop-counts | ||||
8491 | // ADDV B0, V0.8B // sum 8xbyte pop-counts | ||||
8492 | // UMOV X0, V0.B[0] // copy byte result back to integer reg | ||||
8493 | if (VT == MVT::i32 || VT == MVT::i64) { | ||||
8494 | if (VT == MVT::i32) | ||||
8495 | Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val); | ||||
8496 | Val = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Val); | ||||
8497 | |||||
8498 | SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v8i8, Val); | ||||
8499 | SDValue UaddLV = DAG.getNode( | ||||
8500 | ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32, | ||||
8501 | DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop); | ||||
8502 | |||||
8503 | if (IsParity) | ||||
8504 | UaddLV = DAG.getNode(ISD::AND, DL, MVT::i32, UaddLV, | ||||
8505 | DAG.getConstant(1, DL, MVT::i32)); | ||||
8506 | |||||
8507 | if (VT == MVT::i64) | ||||
8508 | UaddLV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, UaddLV); | ||||
8509 | return UaddLV; | ||||
8510 | } else if (VT == MVT::i128) { | ||||
8511 | Val = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Val); | ||||
8512 | |||||
8513 | SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v16i8, Val); | ||||
8514 | SDValue UaddLV = DAG.getNode( | ||||
8515 | ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32, | ||||
8516 | DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop); | ||||
8517 | |||||
8518 | if (IsParity) | ||||
8519 | UaddLV = DAG.getNode(ISD::AND, DL, MVT::i32, UaddLV, | ||||
8520 | DAG.getConstant(1, DL, MVT::i32)); | ||||
8521 | |||||
8522 | return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i128, UaddLV); | ||||
8523 | } | ||||
8524 | |||||
8525 | assert(!IsParity && "ISD::PARITY of vector types not supported")(static_cast <bool> (!IsParity && "ISD::PARITY of vector types not supported" ) ? void (0) : __assert_fail ("!IsParity && \"ISD::PARITY of vector types not supported\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8525, __extension__ __PRETTY_FUNCTION__)); | ||||
8526 | |||||
8527 | if (VT.isScalableVector() || | ||||
8528 | useSVEForFixedLengthVectorVT(VT, | ||||
8529 | Subtarget->forceStreamingCompatibleSVE())) | ||||
8530 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::CTPOP_MERGE_PASSTHRU); | ||||
8531 | |||||
8532 | assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 ||(static_cast <bool> ((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && "Unexpected type for custom ctpop lowering" ) ? void (0) : __assert_fail ("(VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && \"Unexpected type for custom ctpop lowering\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8534, __extension__ __PRETTY_FUNCTION__)) | ||||
8533 | VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) &&(static_cast <bool> ((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && "Unexpected type for custom ctpop lowering" ) ? void (0) : __assert_fail ("(VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && \"Unexpected type for custom ctpop lowering\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8534, __extension__ __PRETTY_FUNCTION__)) | ||||
8534 | "Unexpected type for custom ctpop lowering")(static_cast <bool> ((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && "Unexpected type for custom ctpop lowering" ) ? void (0) : __assert_fail ("(VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && \"Unexpected type for custom ctpop lowering\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8534, __extension__ __PRETTY_FUNCTION__)); | ||||
8535 | |||||
8536 | EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; | ||||
8537 | Val = DAG.getBitcast(VT8Bit, Val); | ||||
8538 | Val = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Val); | ||||
8539 | |||||
8540 | // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds. | ||||
8541 | unsigned EltSize = 8; | ||||
8542 | unsigned NumElts = VT.is64BitVector() ? 8 : 16; | ||||
8543 | while (EltSize != VT.getScalarSizeInBits()) { | ||||
8544 | EltSize *= 2; | ||||
8545 | NumElts /= 2; | ||||
8546 | MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts); | ||||
8547 | Val = DAG.getNode( | ||||
8548 | ISD::INTRINSIC_WO_CHAIN, DL, WidenVT, | ||||
8549 | DAG.getConstant(Intrinsic::aarch64_neon_uaddlp, DL, MVT::i32), Val); | ||||
8550 | } | ||||
8551 | |||||
8552 | return Val; | ||||
8553 | } | ||||
8554 | |||||
8555 | SDValue AArch64TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const { | ||||
8556 | EVT VT = Op.getValueType(); | ||||
8557 | assert(VT.isScalableVector() ||(static_cast <bool> (VT.isScalableVector() || useSVEForFixedLengthVectorVT ( VT, Subtarget->useSVEForFixedLengthVectors())) ? void (0 ) : __assert_fail ("VT.isScalableVector() || useSVEForFixedLengthVectorVT( VT, Subtarget->useSVEForFixedLengthVectors())" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8559, __extension__ __PRETTY_FUNCTION__)) | ||||
8558 | useSVEForFixedLengthVectorVT((static_cast <bool> (VT.isScalableVector() || useSVEForFixedLengthVectorVT ( VT, Subtarget->useSVEForFixedLengthVectors())) ? void (0 ) : __assert_fail ("VT.isScalableVector() || useSVEForFixedLengthVectorVT( VT, Subtarget->useSVEForFixedLengthVectors())" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8559, __extension__ __PRETTY_FUNCTION__)) | ||||
8559 | VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()))(static_cast <bool> (VT.isScalableVector() || useSVEForFixedLengthVectorVT ( VT, Subtarget->useSVEForFixedLengthVectors())) ? void (0 ) : __assert_fail ("VT.isScalableVector() || useSVEForFixedLengthVectorVT( VT, Subtarget->useSVEForFixedLengthVectors())" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8559, __extension__ __PRETTY_FUNCTION__)); | ||||
8560 | |||||
8561 | SDLoc DL(Op); | ||||
8562 | SDValue RBIT = DAG.getNode(ISD::BITREVERSE, DL, VT, Op.getOperand(0)); | ||||
8563 | return DAG.getNode(ISD::CTLZ, DL, VT, RBIT); | ||||
8564 | } | ||||
8565 | |||||
8566 | SDValue AArch64TargetLowering::LowerMinMax(SDValue Op, | ||||
8567 | SelectionDAG &DAG) const { | ||||
8568 | |||||
8569 | EVT VT = Op.getValueType(); | ||||
8570 | SDLoc DL(Op); | ||||
8571 | unsigned Opcode = Op.getOpcode(); | ||||
8572 | ISD::CondCode CC; | ||||
8573 | switch (Opcode) { | ||||
8574 | default: | ||||
8575 | llvm_unreachable("Wrong instruction")::llvm::llvm_unreachable_internal("Wrong instruction", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 8575); | ||||
8576 | case ISD::SMAX: | ||||
8577 | CC = ISD::SETGT; | ||||
8578 | break; | ||||
8579 | case ISD::SMIN: | ||||
8580 | CC = ISD::SETLT; | ||||
8581 | break; | ||||
8582 | case ISD::UMAX: | ||||
8583 | CC = ISD::SETUGT; | ||||
8584 | break; | ||||
8585 | case ISD::UMIN: | ||||
8586 | CC = ISD::SETULT; | ||||
8587 | break; | ||||
8588 | } | ||||
8589 | |||||
8590 | if (VT.isScalableVector() || | ||||
8591 | useSVEForFixedLengthVectorVT( | ||||
8592 | VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors())) { | ||||
8593 | switch (Opcode) { | ||||
8594 | default: | ||||
8595 | llvm_unreachable("Wrong instruction")::llvm::llvm_unreachable_internal("Wrong instruction", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 8595); | ||||
8596 | case ISD::SMAX: | ||||
8597 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMAX_PRED); | ||||
8598 | case ISD::SMIN: | ||||
8599 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMIN_PRED); | ||||
8600 | case ISD::UMAX: | ||||
8601 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMAX_PRED); | ||||
8602 | case ISD::UMIN: | ||||
8603 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMIN_PRED); | ||||
8604 | } | ||||
8605 | } | ||||
8606 | |||||
8607 | SDValue Op0 = Op.getOperand(0); | ||||
8608 | SDValue Op1 = Op.getOperand(1); | ||||
8609 | SDValue Cond = DAG.getSetCC(DL, VT, Op0, Op1, CC); | ||||
8610 | return DAG.getSelect(DL, VT, Cond, Op0, Op1); | ||||
8611 | } | ||||
8612 | |||||
8613 | SDValue AArch64TargetLowering::LowerBitreverse(SDValue Op, | ||||
8614 | SelectionDAG &DAG) const { | ||||
8615 | EVT VT = Op.getValueType(); | ||||
8616 | |||||
8617 | if (VT.isScalableVector() || | ||||
8618 | useSVEForFixedLengthVectorVT( | ||||
8619 | VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors())) | ||||
8620 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::BITREVERSE_MERGE_PASSTHRU); | ||||
8621 | |||||
8622 | SDLoc DL(Op); | ||||
8623 | SDValue REVB; | ||||
8624 | MVT VST; | ||||
8625 | |||||
8626 | switch (VT.getSimpleVT().SimpleTy) { | ||||
8627 | default: | ||||
8628 | llvm_unreachable("Invalid type for bitreverse!")::llvm::llvm_unreachable_internal("Invalid type for bitreverse!" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8628); | ||||
8629 | |||||
8630 | case MVT::v2i32: { | ||||
8631 | VST = MVT::v8i8; | ||||
8632 | REVB = DAG.getNode(AArch64ISD::REV32, DL, VST, Op.getOperand(0)); | ||||
8633 | |||||
8634 | break; | ||||
8635 | } | ||||
8636 | |||||
8637 | case MVT::v4i32: { | ||||
8638 | VST = MVT::v16i8; | ||||
8639 | REVB = DAG.getNode(AArch64ISD::REV32, DL, VST, Op.getOperand(0)); | ||||
8640 | |||||
8641 | break; | ||||
8642 | } | ||||
8643 | |||||
8644 | case MVT::v1i64: { | ||||
8645 | VST = MVT::v8i8; | ||||
8646 | REVB = DAG.getNode(AArch64ISD::REV64, DL, VST, Op.getOperand(0)); | ||||
8647 | |||||
8648 | break; | ||||
8649 | } | ||||
8650 | |||||
8651 | case MVT::v2i64: { | ||||
8652 | VST = MVT::v16i8; | ||||
8653 | REVB = DAG.getNode(AArch64ISD::REV64, DL, VST, Op.getOperand(0)); | ||||
8654 | |||||
8655 | break; | ||||
8656 | } | ||||
8657 | } | ||||
8658 | |||||
8659 | return DAG.getNode(AArch64ISD::NVCAST, DL, VT, | ||||
8660 | DAG.getNode(ISD::BITREVERSE, DL, VST, REVB)); | ||||
8661 | } | ||||
8662 | |||||
8663 | // Check whether the continuous comparison sequence. | ||||
8664 | static bool | ||||
8665 | isOrXorChain(SDValue N, unsigned &Num, | ||||
8666 | SmallVector<std::pair<SDValue, SDValue>, 16> &WorkList) { | ||||
8667 | if (Num == MaxXors) | ||||
8668 | return false; | ||||
8669 | |||||
8670 | // Skip the one-use zext | ||||
8671 | if (N->getOpcode() == ISD::ZERO_EXTEND && N->hasOneUse()) | ||||
8672 | N = N->getOperand(0); | ||||
8673 | |||||
8674 | // The leaf node must be XOR | ||||
8675 | if (N->getOpcode() == ISD::XOR) { | ||||
8676 | WorkList.push_back(std::make_pair(N->getOperand(0), N->getOperand(1))); | ||||
8677 | Num++; | ||||
8678 | return true; | ||||
8679 | } | ||||
8680 | |||||
8681 | // All the non-leaf nodes must be OR. | ||||
8682 | if (N->getOpcode() != ISD::OR || !N->hasOneUse()) | ||||
8683 | return false; | ||||
8684 | |||||
8685 | if (isOrXorChain(N->getOperand(0), Num, WorkList) && | ||||
8686 | isOrXorChain(N->getOperand(1), Num, WorkList)) | ||||
8687 | return true; | ||||
8688 | return false; | ||||
8689 | } | ||||
8690 | |||||
8691 | // Transform chains of ORs and XORs, which usually outlined by memcmp/bmp. | ||||
8692 | static SDValue performOrXorChainCombine(SDNode *N, SelectionDAG &DAG) { | ||||
8693 | SDValue LHS = N->getOperand(0); | ||||
8694 | SDValue RHS = N->getOperand(1); | ||||
8695 | SDLoc DL(N); | ||||
8696 | EVT VT = N->getValueType(0); | ||||
8697 | SmallVector<std::pair<SDValue, SDValue>, 16> WorkList; | ||||
8698 | |||||
8699 | // Only handle integer compares. | ||||
8700 | if (N->getOpcode() != ISD::SETCC) | ||||
8701 | return SDValue(); | ||||
8702 | |||||
8703 | ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get(); | ||||
8704 | // Try to express conjunction "cmp 0 (or (xor A0 A1) (xor B0 B1))" as: | ||||
8705 | // sub A0, A1; ccmp B0, B1, 0, eq; cmp inv(Cond) flag | ||||
8706 | unsigned NumXors = 0; | ||||
8707 | if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && isNullConstant(RHS) && | ||||
8708 | LHS->getOpcode() == ISD::OR && LHS->hasOneUse() && | ||||
8709 | isOrXorChain(LHS, NumXors, WorkList)) { | ||||
8710 | SDValue XOR0, XOR1; | ||||
8711 | std::tie(XOR0, XOR1) = WorkList[0]; | ||||
8712 | unsigned LogicOp = (Cond == ISD::SETEQ) ? ISD::AND : ISD::OR; | ||||
8713 | SDValue Cmp = DAG.getSetCC(DL, VT, XOR0, XOR1, Cond); | ||||
8714 | for (unsigned I = 1; I < WorkList.size(); I++) { | ||||
8715 | std::tie(XOR0, XOR1) = WorkList[I]; | ||||
8716 | SDValue CmpChain = DAG.getSetCC(DL, VT, XOR0, XOR1, Cond); | ||||
8717 | Cmp = DAG.getNode(LogicOp, DL, VT, Cmp, CmpChain); | ||||
8718 | } | ||||
8719 | |||||
8720 | // Exit early by inverting the condition, which help reduce indentations. | ||||
8721 | return Cmp; | ||||
8722 | } | ||||
8723 | |||||
8724 | return SDValue(); | ||||
8725 | } | ||||
8726 | |||||
8727 | SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { | ||||
8728 | |||||
8729 | if (Op.getValueType().isVector()) | ||||
8730 | return LowerVSETCC(Op, DAG); | ||||
8731 | |||||
8732 | bool IsStrict = Op->isStrictFPOpcode(); | ||||
8733 | bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS; | ||||
8734 | unsigned OpNo = IsStrict ? 1 : 0; | ||||
8735 | SDValue Chain; | ||||
8736 | if (IsStrict) | ||||
8737 | Chain = Op.getOperand(0); | ||||
8738 | SDValue LHS = Op.getOperand(OpNo + 0); | ||||
8739 | SDValue RHS = Op.getOperand(OpNo + 1); | ||||
8740 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(OpNo + 2))->get(); | ||||
8741 | SDLoc dl(Op); | ||||
8742 | |||||
8743 | // We chose ZeroOrOneBooleanContents, so use zero and one. | ||||
8744 | EVT VT = Op.getValueType(); | ||||
8745 | SDValue TVal = DAG.getConstant(1, dl, VT); | ||||
8746 | SDValue FVal = DAG.getConstant(0, dl, VT); | ||||
8747 | |||||
8748 | // Handle f128 first, since one possible outcome is a normal integer | ||||
8749 | // comparison which gets picked up by the next if statement. | ||||
8750 | if (LHS.getValueType() == MVT::f128) { | ||||
8751 | softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS, Chain, | ||||
8752 | IsSignaling); | ||||
8753 | |||||
8754 | // If softenSetCCOperands returned a scalar, use it. | ||||
8755 | if (!RHS.getNode()) { | ||||
8756 | assert(LHS.getValueType() == Op.getValueType() &&(static_cast <bool> (LHS.getValueType() == Op.getValueType () && "Unexpected setcc expansion!") ? void (0) : __assert_fail ("LHS.getValueType() == Op.getValueType() && \"Unexpected setcc expansion!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8757, __extension__ __PRETTY_FUNCTION__)) | ||||
8757 | "Unexpected setcc expansion!")(static_cast <bool> (LHS.getValueType() == Op.getValueType () && "Unexpected setcc expansion!") ? void (0) : __assert_fail ("LHS.getValueType() == Op.getValueType() && \"Unexpected setcc expansion!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8757, __extension__ __PRETTY_FUNCTION__)); | ||||
8758 | return IsStrict ? DAG.getMergeValues({LHS, Chain}, dl) : LHS; | ||||
8759 | } | ||||
8760 | } | ||||
8761 | |||||
8762 | if (LHS.getValueType().isInteger()) { | ||||
8763 | SDValue CCVal; | ||||
8764 | SDValue Cmp = getAArch64Cmp( | ||||
8765 | LHS, RHS, ISD::getSetCCInverse(CC, LHS.getValueType()), CCVal, DAG, dl); | ||||
8766 | |||||
8767 | // Note that we inverted the condition above, so we reverse the order of | ||||
8768 | // the true and false operands here. This will allow the setcc to be | ||||
8769 | // matched to a single CSINC instruction. | ||||
8770 | SDValue Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CCVal, Cmp); | ||||
8771 | return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res; | ||||
8772 | } | ||||
8773 | |||||
8774 | // Now we know we're dealing with FP values. | ||||
8775 | assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 ||(static_cast <bool> (LHS.getValueType() == MVT::f16 || LHS .getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64 ) ? void (0) : __assert_fail ("LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8776, __extension__ __PRETTY_FUNCTION__)) | ||||
8776 | LHS.getValueType() == MVT::f64)(static_cast <bool> (LHS.getValueType() == MVT::f16 || LHS .getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64 ) ? void (0) : __assert_fail ("LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8776, __extension__ __PRETTY_FUNCTION__)); | ||||
8777 | |||||
8778 | // If that fails, we'll need to perform an FCMP + CSEL sequence. Go ahead | ||||
8779 | // and do the comparison. | ||||
8780 | SDValue Cmp; | ||||
8781 | if (IsStrict) | ||||
8782 | Cmp = emitStrictFPComparison(LHS, RHS, dl, DAG, Chain, IsSignaling); | ||||
8783 | else | ||||
8784 | Cmp = emitComparison(LHS, RHS, CC, dl, DAG); | ||||
8785 | |||||
8786 | AArch64CC::CondCode CC1, CC2; | ||||
8787 | changeFPCCToAArch64CC(CC, CC1, CC2); | ||||
8788 | SDValue Res; | ||||
8789 | if (CC2 == AArch64CC::AL) { | ||||
8790 | changeFPCCToAArch64CC(ISD::getSetCCInverse(CC, LHS.getValueType()), CC1, | ||||
8791 | CC2); | ||||
8792 | SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32); | ||||
8793 | |||||
8794 | // Note that we inverted the condition above, so we reverse the order of | ||||
8795 | // the true and false operands here. This will allow the setcc to be | ||||
8796 | // matched to a single CSINC instruction. | ||||
8797 | Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CC1Val, Cmp); | ||||
8798 | } else { | ||||
8799 | // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't | ||||
8800 | // totally clean. Some of them require two CSELs to implement. As is in | ||||
8801 | // this case, we emit the first CSEL and then emit a second using the output | ||||
8802 | // of the first as the RHS. We're effectively OR'ing the two CC's together. | ||||
8803 | |||||
8804 | // FIXME: It would be nice if we could match the two CSELs to two CSINCs. | ||||
8805 | SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32); | ||||
8806 | SDValue CS1 = | ||||
8807 | DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp); | ||||
8808 | |||||
8809 | SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32); | ||||
8810 | Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp); | ||||
8811 | } | ||||
8812 | return IsStrict ? DAG.getMergeValues({Res, Cmp.getValue(1)}, dl) : Res; | ||||
8813 | } | ||||
8814 | |||||
8815 | SDValue AArch64TargetLowering::LowerSETCCCARRY(SDValue Op, | ||||
8816 | SelectionDAG &DAG) const { | ||||
8817 | |||||
8818 | SDValue LHS = Op.getOperand(0); | ||||
8819 | SDValue RHS = Op.getOperand(1); | ||||
8820 | EVT VT = LHS.getValueType(); | ||||
8821 | if (VT != MVT::i32 && VT != MVT::i64) | ||||
8822 | return SDValue(); | ||||
8823 | |||||
8824 | SDLoc DL(Op); | ||||
8825 | SDValue Carry = Op.getOperand(2); | ||||
8826 | // SBCS uses a carry not a borrow so the carry flag should be inverted first. | ||||
8827 | SDValue InvCarry = valueToCarryFlag(Carry, DAG, true); | ||||
8828 | SDValue Cmp = DAG.getNode(AArch64ISD::SBCS, DL, DAG.getVTList(VT, MVT::Glue), | ||||
8829 | LHS, RHS, InvCarry); | ||||
8830 | |||||
8831 | EVT OpVT = Op.getValueType(); | ||||
8832 | SDValue TVal = DAG.getConstant(1, DL, OpVT); | ||||
8833 | SDValue FVal = DAG.getConstant(0, DL, OpVT); | ||||
8834 | |||||
8835 | ISD::CondCode Cond = cast<CondCodeSDNode>(Op.getOperand(3))->get(); | ||||
8836 | ISD::CondCode CondInv = ISD::getSetCCInverse(Cond, VT); | ||||
8837 | SDValue CCVal = | ||||
8838 | DAG.getConstant(changeIntCCToAArch64CC(CondInv), DL, MVT::i32); | ||||
8839 | // Inputs are swapped because the condition is inverted. This will allow | ||||
8840 | // matching with a single CSINC instruction. | ||||
8841 | return DAG.getNode(AArch64ISD::CSEL, DL, OpVT, FVal, TVal, CCVal, | ||||
8842 | Cmp.getValue(1)); | ||||
8843 | } | ||||
8844 | |||||
8845 | SDValue AArch64TargetLowering::LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, | ||||
8846 | SDValue RHS, SDValue TVal, | ||||
8847 | SDValue FVal, const SDLoc &dl, | ||||
8848 | SelectionDAG &DAG) const { | ||||
8849 | // Handle f128 first, because it will result in a comparison of some RTLIB | ||||
8850 | // call result against zero. | ||||
8851 | if (LHS.getValueType() == MVT::f128) { | ||||
8852 | softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS); | ||||
8853 | |||||
8854 | // If softenSetCCOperands returned a scalar, we need to compare the result | ||||
8855 | // against zero to select between true and false values. | ||||
8856 | if (!RHS.getNode()) { | ||||
8857 | RHS = DAG.getConstant(0, dl, LHS.getValueType()); | ||||
8858 | CC = ISD::SETNE; | ||||
8859 | } | ||||
8860 | } | ||||
8861 | |||||
8862 | // Also handle f16, for which we need to do a f32 comparison. | ||||
8863 | if (LHS.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) { | ||||
8864 | LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS); | ||||
8865 | RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS); | ||||
8866 | } | ||||
8867 | |||||
8868 | // Next, handle integers. | ||||
8869 | if (LHS.getValueType().isInteger()) { | ||||
8870 | assert((LHS.getValueType() == RHS.getValueType()) &&(static_cast <bool> ((LHS.getValueType() == RHS.getValueType ()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType () == MVT::i64)) ? void (0) : __assert_fail ("(LHS.getValueType() == RHS.getValueType()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64)" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8871, __extension__ __PRETTY_FUNCTION__)) | ||||
8871 | (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64))(static_cast <bool> ((LHS.getValueType() == RHS.getValueType ()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType () == MVT::i64)) ? void (0) : __assert_fail ("(LHS.getValueType() == RHS.getValueType()) && (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64)" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8871, __extension__ __PRETTY_FUNCTION__)); | ||||
8872 | |||||
8873 | ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal); | ||||
8874 | ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal); | ||||
8875 | ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS); | ||||
8876 | // Check for sign pattern (SELECT_CC setgt, iN lhs, -1, 1, -1) and transform | ||||
8877 | // into (OR (ASR lhs, N-1), 1), which requires less instructions for the | ||||
8878 | // supported types. | ||||
8879 | if (CC == ISD::SETGT && RHSC && RHSC->isAllOnes() && CTVal && CFVal && | ||||
8880 | CTVal->isOne() && CFVal->isAllOnes() && | ||||
8881 | LHS.getValueType() == TVal.getValueType()) { | ||||
8882 | EVT VT = LHS.getValueType(); | ||||
8883 | SDValue Shift = | ||||
8884 | DAG.getNode(ISD::SRA, dl, VT, LHS, | ||||
8885 | DAG.getConstant(VT.getSizeInBits() - 1, dl, VT)); | ||||
8886 | return DAG.getNode(ISD::OR, dl, VT, Shift, DAG.getConstant(1, dl, VT)); | ||||
8887 | } | ||||
8888 | |||||
8889 | unsigned Opcode = AArch64ISD::CSEL; | ||||
8890 | |||||
8891 | // If both the TVal and the FVal are constants, see if we can swap them in | ||||
8892 | // order to for a CSINV or CSINC out of them. | ||||
8893 | if (CTVal && CFVal && CTVal->isAllOnes() && CFVal->isZero()) { | ||||
8894 | std::swap(TVal, FVal); | ||||
8895 | std::swap(CTVal, CFVal); | ||||
8896 | CC = ISD::getSetCCInverse(CC, LHS.getValueType()); | ||||
8897 | } else if (CTVal && CFVal && CTVal->isOne() && CFVal->isZero()) { | ||||
8898 | std::swap(TVal, FVal); | ||||
8899 | std::swap(CTVal, CFVal); | ||||
8900 | CC = ISD::getSetCCInverse(CC, LHS.getValueType()); | ||||
8901 | } else if (TVal.getOpcode() == ISD::XOR) { | ||||
8902 | // If TVal is a NOT we want to swap TVal and FVal so that we can match | ||||
8903 | // with a CSINV rather than a CSEL. | ||||
8904 | if (isAllOnesConstant(TVal.getOperand(1))) { | ||||
8905 | std::swap(TVal, FVal); | ||||
8906 | std::swap(CTVal, CFVal); | ||||
8907 | CC = ISD::getSetCCInverse(CC, LHS.getValueType()); | ||||
8908 | } | ||||
8909 | } else if (TVal.getOpcode() == ISD::SUB) { | ||||
8910 | // If TVal is a negation (SUB from 0) we want to swap TVal and FVal so | ||||
8911 | // that we can match with a CSNEG rather than a CSEL. | ||||
8912 | if (isNullConstant(TVal.getOperand(0))) { | ||||
8913 | std::swap(TVal, FVal); | ||||
8914 | std::swap(CTVal, CFVal); | ||||
8915 | CC = ISD::getSetCCInverse(CC, LHS.getValueType()); | ||||
8916 | } | ||||
8917 | } else if (CTVal && CFVal) { | ||||
8918 | const int64_t TrueVal = CTVal->getSExtValue(); | ||||
8919 | const int64_t FalseVal = CFVal->getSExtValue(); | ||||
8920 | bool Swap = false; | ||||
8921 | |||||
8922 | // If both TVal and FVal are constants, see if FVal is the | ||||
8923 | // inverse/negation/increment of TVal and generate a CSINV/CSNEG/CSINC | ||||
8924 | // instead of a CSEL in that case. | ||||
8925 | if (TrueVal == ~FalseVal) { | ||||
8926 | Opcode = AArch64ISD::CSINV; | ||||
8927 | } else if (FalseVal > std::numeric_limits<int64_t>::min() && | ||||
8928 | TrueVal == -FalseVal) { | ||||
8929 | Opcode = AArch64ISD::CSNEG; | ||||
8930 | } else if (TVal.getValueType() == MVT::i32) { | ||||
8931 | // If our operands are only 32-bit wide, make sure we use 32-bit | ||||
8932 | // arithmetic for the check whether we can use CSINC. This ensures that | ||||
8933 | // the addition in the check will wrap around properly in case there is | ||||
8934 | // an overflow (which would not be the case if we do the check with | ||||
8935 | // 64-bit arithmetic). | ||||
8936 | const uint32_t TrueVal32 = CTVal->getZExtValue(); | ||||
8937 | const uint32_t FalseVal32 = CFVal->getZExtValue(); | ||||
8938 | |||||
8939 | if ((TrueVal32 == FalseVal32 + 1) || (TrueVal32 + 1 == FalseVal32)) { | ||||
8940 | Opcode = AArch64ISD::CSINC; | ||||
8941 | |||||
8942 | if (TrueVal32 > FalseVal32) { | ||||
8943 | Swap = true; | ||||
8944 | } | ||||
8945 | } | ||||
8946 | } else { | ||||
8947 | // 64-bit check whether we can use CSINC. | ||||
8948 | const uint64_t TrueVal64 = TrueVal; | ||||
8949 | const uint64_t FalseVal64 = FalseVal; | ||||
8950 | |||||
8951 | if ((TrueVal64 == FalseVal64 + 1) || (TrueVal64 + 1 == FalseVal64)) { | ||||
8952 | Opcode = AArch64ISD::CSINC; | ||||
8953 | |||||
8954 | if (TrueVal > FalseVal) { | ||||
8955 | Swap = true; | ||||
8956 | } | ||||
8957 | } | ||||
8958 | } | ||||
8959 | |||||
8960 | // Swap TVal and FVal if necessary. | ||||
8961 | if (Swap) { | ||||
8962 | std::swap(TVal, FVal); | ||||
8963 | std::swap(CTVal, CFVal); | ||||
8964 | CC = ISD::getSetCCInverse(CC, LHS.getValueType()); | ||||
8965 | } | ||||
8966 | |||||
8967 | if (Opcode != AArch64ISD::CSEL) { | ||||
8968 | // Drop FVal since we can get its value by simply inverting/negating | ||||
8969 | // TVal. | ||||
8970 | FVal = TVal; | ||||
8971 | } | ||||
8972 | } | ||||
8973 | |||||
8974 | // Avoid materializing a constant when possible by reusing a known value in | ||||
8975 | // a register. However, don't perform this optimization if the known value | ||||
8976 | // is one, zero or negative one in the case of a CSEL. We can always | ||||
8977 | // materialize these values using CSINC, CSEL and CSINV with wzr/xzr as the | ||||
8978 | // FVal, respectively. | ||||
8979 | ConstantSDNode *RHSVal = dyn_cast<ConstantSDNode>(RHS); | ||||
8980 | if (Opcode == AArch64ISD::CSEL && RHSVal && !RHSVal->isOne() && | ||||
8981 | !RHSVal->isZero() && !RHSVal->isAllOnes()) { | ||||
8982 | AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC); | ||||
8983 | // Transform "a == C ? C : x" to "a == C ? a : x" and "a != C ? x : C" to | ||||
8984 | // "a != C ? x : a" to avoid materializing C. | ||||
8985 | if (CTVal && CTVal == RHSVal && AArch64CC == AArch64CC::EQ) | ||||
8986 | TVal = LHS; | ||||
8987 | else if (CFVal && CFVal == RHSVal && AArch64CC == AArch64CC::NE) | ||||
8988 | FVal = LHS; | ||||
8989 | } else if (Opcode == AArch64ISD::CSNEG && RHSVal && RHSVal->isOne()) { | ||||
8990 | assert (CTVal && CFVal && "Expected constant operands for CSNEG.")(static_cast <bool> (CTVal && CFVal && "Expected constant operands for CSNEG." ) ? void (0) : __assert_fail ("CTVal && CFVal && \"Expected constant operands for CSNEG.\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 8990, __extension__ __PRETTY_FUNCTION__)); | ||||
8991 | // Use a CSINV to transform "a == C ? 1 : -1" to "a == C ? a : -1" to | ||||
8992 | // avoid materializing C. | ||||
8993 | AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC); | ||||
8994 | if (CTVal == RHSVal && AArch64CC == AArch64CC::EQ) { | ||||
8995 | Opcode = AArch64ISD::CSINV; | ||||
8996 | TVal = LHS; | ||||
8997 | FVal = DAG.getConstant(0, dl, FVal.getValueType()); | ||||
8998 | } | ||||
8999 | } | ||||
9000 | |||||
9001 | SDValue CCVal; | ||||
9002 | SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl); | ||||
9003 | EVT VT = TVal.getValueType(); | ||||
9004 | return DAG.getNode(Opcode, dl, VT, TVal, FVal, CCVal, Cmp); | ||||
9005 | } | ||||
9006 | |||||
9007 | // Now we know we're dealing with FP values. | ||||
9008 | assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 ||(static_cast <bool> (LHS.getValueType() == MVT::f16 || LHS .getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64 ) ? void (0) : __assert_fail ("LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 9009, __extension__ __PRETTY_FUNCTION__)) | ||||
9009 | LHS.getValueType() == MVT::f64)(static_cast <bool> (LHS.getValueType() == MVT::f16 || LHS .getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64 ) ? void (0) : __assert_fail ("LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 9009, __extension__ __PRETTY_FUNCTION__)); | ||||
9010 | assert(LHS.getValueType() == RHS.getValueType())(static_cast <bool> (LHS.getValueType() == RHS.getValueType ()) ? void (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType()" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 9010, __extension__ __PRETTY_FUNCTION__)); | ||||
9011 | EVT VT = TVal.getValueType(); | ||||
9012 | SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG); | ||||
9013 | |||||
9014 | // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally | ||||
9015 | // clean. Some of them require two CSELs to implement. | ||||
9016 | AArch64CC::CondCode CC1, CC2; | ||||
9017 | changeFPCCToAArch64CC(CC, CC1, CC2); | ||||
9018 | |||||
9019 | if (DAG.getTarget().Options.UnsafeFPMath) { | ||||
9020 | // Transform "a == 0.0 ? 0.0 : x" to "a == 0.0 ? a : x" and | ||||
9021 | // "a != 0.0 ? x : 0.0" to "a != 0.0 ? x : a" to avoid materializing 0.0. | ||||
9022 | ConstantFPSDNode *RHSVal = dyn_cast<ConstantFPSDNode>(RHS); | ||||
9023 | if (RHSVal && RHSVal->isZero()) { | ||||
9024 | ConstantFPSDNode *CFVal = dyn_cast<ConstantFPSDNode>(FVal); | ||||
9025 | ConstantFPSDNode *CTVal = dyn_cast<ConstantFPSDNode>(TVal); | ||||
9026 | |||||
9027 | if ((CC == ISD::SETEQ || CC == ISD::SETOEQ || CC == ISD::SETUEQ) && | ||||
9028 | CTVal && CTVal->isZero() && TVal.getValueType() == LHS.getValueType()) | ||||
9029 | TVal = LHS; | ||||
9030 | else if ((CC == ISD::SETNE || CC == ISD::SETONE || CC == ISD::SETUNE) && | ||||
9031 | CFVal && CFVal->isZero() && | ||||
9032 | FVal.getValueType() == LHS.getValueType()) | ||||
9033 | FVal = LHS; | ||||
9034 | } | ||||
9035 | } | ||||
9036 | |||||
9037 | // Emit first, and possibly only, CSEL. | ||||
9038 | SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32); | ||||
9039 | SDValue CS1 = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp); | ||||
9040 | |||||
9041 | // If we need a second CSEL, emit it, using the output of the first as the | ||||
9042 | // RHS. We're effectively OR'ing the two CC's together. | ||||
9043 | if (CC2 != AArch64CC::AL) { | ||||
9044 | SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32); | ||||
9045 | return DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp); | ||||
9046 | } | ||||
9047 | |||||
9048 | // Otherwise, return the output of the first CSEL. | ||||
9049 | return CS1; | ||||
9050 | } | ||||
9051 | |||||
9052 | SDValue AArch64TargetLowering::LowerVECTOR_SPLICE(SDValue Op, | ||||
9053 | SelectionDAG &DAG) const { | ||||
9054 | EVT Ty = Op.getValueType(); | ||||
9055 | auto Idx = Op.getConstantOperandAPInt(2); | ||||
9056 | int64_t IdxVal = Idx.getSExtValue(); | ||||
9057 | assert(Ty.isScalableVector() &&(static_cast <bool> (Ty.isScalableVector() && "Only expect scalable vectors for custom lowering of VECTOR_SPLICE" ) ? void (0) : __assert_fail ("Ty.isScalableVector() && \"Only expect scalable vectors for custom lowering of VECTOR_SPLICE\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 9058, __extension__ __PRETTY_FUNCTION__)) | ||||
9058 | "Only expect scalable vectors for custom lowering of VECTOR_SPLICE")(static_cast <bool> (Ty.isScalableVector() && "Only expect scalable vectors for custom lowering of VECTOR_SPLICE" ) ? void (0) : __assert_fail ("Ty.isScalableVector() && \"Only expect scalable vectors for custom lowering of VECTOR_SPLICE\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 9058, __extension__ __PRETTY_FUNCTION__)); | ||||
9059 | |||||
9060 | // We can use the splice instruction for certain index values where we are | ||||
9061 | // able to efficiently generate the correct predicate. The index will be | ||||
9062 | // inverted and used directly as the input to the ptrue instruction, i.e. | ||||
9063 | // -1 -> vl1, -2 -> vl2, etc. The predicate will then be reversed to get the | ||||
9064 | // splice predicate. However, we can only do this if we can guarantee that | ||||
9065 | // there are enough elements in the vector, hence we check the index <= min | ||||
9066 | // number of elements. | ||||
9067 | std::optional<unsigned> PredPattern; | ||||
9068 | if (Ty.isScalableVector() && IdxVal < 0 && | ||||
9069 | (PredPattern = getSVEPredPatternFromNumElements(std::abs(IdxVal))) != | ||||
9070 | std::nullopt) { | ||||
9071 | SDLoc DL(Op); | ||||
9072 | |||||
9073 | // Create a predicate where all but the last -IdxVal elements are false. | ||||
9074 | EVT PredVT = Ty.changeVectorElementType(MVT::i1); | ||||
9075 | SDValue Pred = getPTrue(DAG, DL, PredVT, *PredPattern); | ||||
9076 | Pred = DAG.getNode(ISD::VECTOR_REVERSE, DL, PredVT, Pred); | ||||
9077 | |||||
9078 | // Now splice the two inputs together using the predicate. | ||||
9079 | return DAG.getNode(AArch64ISD::SPLICE, DL, Ty, Pred, Op.getOperand(0), | ||||
9080 | Op.getOperand(1)); | ||||
9081 | } | ||||
9082 | |||||
9083 | // This will select to an EXT instruction, which has a maximum immediate | ||||
9084 | // value of 255, hence 2048-bits is the maximum value we can lower. | ||||
9085 | if (IdxVal >= 0 && | ||||
9086 | IdxVal < int64_t(2048 / Ty.getVectorElementType().getSizeInBits())) | ||||
9087 | return Op; | ||||
9088 | |||||
9089 | return SDValue(); | ||||
9090 | } | ||||
9091 | |||||
9092 | SDValue AArch64TargetLowering::LowerSELECT_CC(SDValue Op, | ||||
9093 | SelectionDAG &DAG) const { | ||||
9094 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); | ||||
9095 | SDValue LHS = Op.getOperand(0); | ||||
9096 | SDValue RHS = Op.getOperand(1); | ||||
9097 | SDValue TVal = Op.getOperand(2); | ||||
9098 | SDValue FVal = Op.getOperand(3); | ||||
9099 | SDLoc DL(Op); | ||||
9100 | return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG); | ||||
9101 | } | ||||
9102 | |||||
9103 | SDValue AArch64TargetLowering::LowerSELECT(SDValue Op, | ||||
9104 | SelectionDAG &DAG) const { | ||||
9105 | SDValue CCVal = Op->getOperand(0); | ||||
9106 | SDValue TVal = Op->getOperand(1); | ||||
9107 | SDValue FVal = Op->getOperand(2); | ||||
9108 | SDLoc DL(Op); | ||||
9109 | |||||
9110 | EVT Ty = Op.getValueType(); | ||||
9111 | if (Ty.isScalableVector()) { | ||||
9112 | SDValue TruncCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, CCVal); | ||||
9113 | MVT PredVT = MVT::getVectorVT(MVT::i1, Ty.getVectorElementCount()); | ||||
9114 | SDValue SplatPred = DAG.getNode(ISD::SPLAT_VECTOR, DL, PredVT, TruncCC); | ||||
9115 | return DAG.getNode(ISD::VSELECT, DL, Ty, SplatPred, TVal, FVal); | ||||
9116 | } | ||||
9117 | |||||
9118 | if (useSVEForFixedLengthVectorVT(Ty)) { | ||||
9119 | // FIXME: Ideally this would be the same as above using i1 types, however | ||||
9120 | // for the moment we can't deal with fixed i1 vector types properly, so | ||||
9121 | // instead extend the predicate to a result type sized integer vector. | ||||
9122 | MVT SplatValVT = MVT::getIntegerVT(Ty.getScalarSizeInBits()); | ||||
9123 | MVT PredVT = MVT::getVectorVT(SplatValVT, Ty.getVectorElementCount()); | ||||
9124 | SDValue SplatVal = DAG.getSExtOrTrunc(CCVal, DL, SplatValVT); | ||||
9125 | SDValue SplatPred = DAG.getNode(ISD::SPLAT_VECTOR, DL, PredVT, SplatVal); | ||||
9126 | return DAG.getNode(ISD::VSELECT, DL, Ty, SplatPred, TVal, FVal); | ||||
9127 | } | ||||
9128 | |||||
9129 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a select | ||||
9130 | // instruction. | ||||
9131 | if (ISD::isOverflowIntrOpRes(CCVal)) { | ||||
9132 | // Only lower legal XALUO ops. | ||||
9133 | if (!DAG.getTargetLoweringInfo().isTypeLegal(CCVal->getValueType(0))) | ||||
9134 | return SDValue(); | ||||
9135 | |||||
9136 | AArch64CC::CondCode OFCC; | ||||
9137 | SDValue Value, Overflow; | ||||
9138 | std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, CCVal.getValue(0), DAG); | ||||
9139 | SDValue CCVal = DAG.getConstant(OFCC, DL, MVT::i32); | ||||
9140 | |||||
9141 | return DAG.getNode(AArch64ISD::CSEL, DL, Op.getValueType(), TVal, FVal, | ||||
9142 | CCVal, Overflow); | ||||
9143 | } | ||||
9144 | |||||
9145 | // Lower it the same way as we would lower a SELECT_CC node. | ||||
9146 | ISD::CondCode CC; | ||||
9147 | SDValue LHS, RHS; | ||||
9148 | if (CCVal.getOpcode() == ISD::SETCC) { | ||||
9149 | LHS = CCVal.getOperand(0); | ||||
9150 | RHS = CCVal.getOperand(1); | ||||
9151 | CC = cast<CondCodeSDNode>(CCVal.getOperand(2))->get(); | ||||
9152 | } else { | ||||
9153 | LHS = CCVal; | ||||
9154 | RHS = DAG.getConstant(0, DL, CCVal.getValueType()); | ||||
9155 | CC = ISD::SETNE; | ||||
9156 | } | ||||
9157 | |||||
9158 | // If we are lowering a f16 and we do not have fullf16, convert to a f32 in | ||||
9159 | // order to use FCSELSrrr | ||||
9160 | if ((Ty == MVT::f16 || Ty == MVT::bf16) && !Subtarget->hasFullFP16()) { | ||||
9161 | TVal = SDValue( | ||||
9162 | DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f32, | ||||
9163 | DAG.getUNDEF(MVT::f32), TVal, | ||||
9164 | DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)), | ||||
9165 | 0); | ||||
9166 | FVal = SDValue( | ||||
9167 | DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f32, | ||||
9168 | DAG.getUNDEF(MVT::f32), FVal, | ||||
9169 | DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)), | ||||
9170 | 0); | ||||
9171 | } | ||||
9172 | |||||
9173 | SDValue Res = LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG); | ||||
9174 | |||||
9175 | if ((Ty == MVT::f16 || Ty == MVT::bf16) && !Subtarget->hasFullFP16()) { | ||||
9176 | Res = SDValue( | ||||
9177 | DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, Ty, Res, | ||||
9178 | DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)), | ||||
9179 | 0); | ||||
9180 | } | ||||
9181 | |||||
9182 | return Res; | ||||
9183 | } | ||||
9184 | |||||
9185 | SDValue AArch64TargetLowering::LowerJumpTable(SDValue Op, | ||||
9186 | SelectionDAG &DAG) const { | ||||
9187 | // Jump table entries as PC relative offsets. No additional tweaking | ||||
9188 | // is necessary here. Just get the address of the jump table. | ||||
9189 | JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); | ||||
9190 | |||||
9191 | if (getTargetMachine().getCodeModel() == CodeModel::Large && | ||||
9192 | !Subtarget->isTargetMachO()) { | ||||
9193 | return getAddrLarge(JT, DAG); | ||||
9194 | } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) { | ||||
9195 | return getAddrTiny(JT, DAG); | ||||
9196 | } | ||||
9197 | return getAddr(JT, DAG); | ||||
9198 | } | ||||
9199 | |||||
9200 | SDValue AArch64TargetLowering::LowerBR_JT(SDValue Op, | ||||
9201 | SelectionDAG &DAG) const { | ||||
9202 | // Jump table entries as PC relative offsets. No additional tweaking | ||||
9203 | // is necessary here. Just get the address of the jump table. | ||||
9204 | SDLoc DL(Op); | ||||
9205 | SDValue JT = Op.getOperand(1); | ||||
9206 | SDValue Entry = Op.getOperand(2); | ||||
9207 | int JTI = cast<JumpTableSDNode>(JT.getNode())->getIndex(); | ||||
9208 | |||||
9209 | auto *AFI = DAG.getMachineFunction().getInfo<AArch64FunctionInfo>(); | ||||
9210 | AFI->setJumpTableEntryInfo(JTI, 4, nullptr); | ||||
9211 | |||||
9212 | SDNode *Dest = | ||||
9213 | DAG.getMachineNode(AArch64::JumpTableDest32, DL, MVT::i64, MVT::i64, JT, | ||||
9214 | Entry, DAG.getTargetJumpTable(JTI, MVT::i32)); | ||||
9215 | return DAG.getNode(ISD::BRIND, DL, MVT::Other, Op.getOperand(0), | ||||
9216 | SDValue(Dest, 0)); | ||||
9217 | } | ||||
9218 | |||||
9219 | SDValue AArch64TargetLowering::LowerConstantPool(SDValue Op, | ||||
9220 | SelectionDAG &DAG) const { | ||||
9221 | ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); | ||||
9222 | |||||
9223 | if (getTargetMachine().getCodeModel() == CodeModel::Large) { | ||||
9224 | // Use the GOT for the large code model on iOS. | ||||
9225 | if (Subtarget->isTargetMachO()) { | ||||
9226 | return getGOT(CP, DAG); | ||||
9227 | } | ||||
9228 | return getAddrLarge(CP, DAG); | ||||
9229 | } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) { | ||||
9230 | return getAddrTiny(CP, DAG); | ||||
9231 | } else { | ||||
9232 | return getAddr(CP, DAG); | ||||
9233 | } | ||||
9234 | } | ||||
9235 | |||||
9236 | SDValue AArch64TargetLowering::LowerBlockAddress(SDValue Op, | ||||
9237 | SelectionDAG &DAG) const { | ||||
9238 | BlockAddressSDNode *BA = cast<BlockAddressSDNode>(Op); | ||||
9239 | if (getTargetMachine().getCodeModel() == CodeModel::Large && | ||||
9240 | !Subtarget->isTargetMachO()) { | ||||
9241 | return getAddrLarge(BA, DAG); | ||||
9242 | } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) { | ||||
9243 | return getAddrTiny(BA, DAG); | ||||
9244 | } | ||||
9245 | return getAddr(BA, DAG); | ||||
9246 | } | ||||
9247 | |||||
9248 | SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op, | ||||
9249 | SelectionDAG &DAG) const { | ||||
9250 | AArch64FunctionInfo *FuncInfo = | ||||
9251 | DAG.getMachineFunction().getInfo<AArch64FunctionInfo>(); | ||||
9252 | |||||
9253 | SDLoc DL(Op); | ||||
9254 | SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), | ||||
9255 | getPointerTy(DAG.getDataLayout())); | ||||
9256 | FR = DAG.getZExtOrTrunc(FR, DL, getPointerMemTy(DAG.getDataLayout())); | ||||
9257 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); | ||||
9258 | return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), | ||||
9259 | MachinePointerInfo(SV)); | ||||
9260 | } | ||||
9261 | |||||
9262 | SDValue AArch64TargetLowering::LowerWin64_VASTART(SDValue Op, | ||||
9263 | SelectionDAG &DAG) const { | ||||
9264 | MachineFunction &MF = DAG.getMachineFunction(); | ||||
9265 | AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); | ||||
9266 | |||||
9267 | SDLoc DL(Op); | ||||
9268 | SDValue FR; | ||||
9269 | if (Subtarget->isWindowsArm64EC()) { | ||||
9270 | // With the Arm64EC ABI, we compute the address of the varargs save area | ||||
9271 | // relative to x4. For a normal AArch64->AArch64 call, x4 == sp on entry, | ||||
9272 | // but calls from an entry thunk can pass in a different address. | ||||
9273 | Register VReg = MF.addLiveIn(AArch64::X4, &AArch64::GPR64RegClass); | ||||
9274 | SDValue Val = DAG.getCopyFromReg(DAG.getEntryNode(), DL, VReg, MVT::i64); | ||||
9275 | uint64_t StackOffset; | ||||
9276 | if (FuncInfo->getVarArgsGPRSize() > 0) | ||||
9277 | StackOffset = -(uint64_t)FuncInfo->getVarArgsGPRSize(); | ||||
9278 | else | ||||
9279 | StackOffset = FuncInfo->getVarArgsStackOffset(); | ||||
9280 | FR = DAG.getNode(ISD::ADD, DL, MVT::i64, Val, | ||||
9281 | DAG.getConstant(StackOffset, DL, MVT::i64)); | ||||
9282 | } else { | ||||
9283 | FR = DAG.getFrameIndex(FuncInfo->getVarArgsGPRSize() > 0 | ||||
9284 | ? FuncInfo->getVarArgsGPRIndex() | ||||
9285 | : FuncInfo->getVarArgsStackIndex(), | ||||
9286 | getPointerTy(DAG.getDataLayout())); | ||||
9287 | } | ||||
9288 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); | ||||
9289 | return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), | ||||
9290 | MachinePointerInfo(SV)); | ||||
9291 | } | ||||
9292 | |||||
9293 | SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op, | ||||
9294 | SelectionDAG &DAG) const { | ||||
9295 | // The layout of the va_list struct is specified in the AArch64 Procedure Call | ||||
9296 | // Standard, section B.3. | ||||
9297 | MachineFunction &MF = DAG.getMachineFunction(); | ||||
9298 | AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); | ||||
9299 | unsigned PtrSize = Subtarget->isTargetILP32() ? 4 : 8; | ||||
9300 | auto PtrMemVT = getPointerMemTy(DAG.getDataLayout()); | ||||
9301 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | ||||
9302 | SDLoc DL(Op); | ||||
9303 | |||||
9304 | SDValue Chain = Op.getOperand(0); | ||||
9305 | SDValue VAList = Op.getOperand(1); | ||||
9306 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); | ||||
9307 | SmallVector<SDValue, 4> MemOps; | ||||
9308 | |||||
9309 | // void *__stack at offset 0 | ||||
9310 | unsigned Offset = 0; | ||||
9311 | SDValue Stack = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), PtrVT); | ||||
9312 | Stack = DAG.getZExtOrTrunc(Stack, DL, PtrMemVT); | ||||
9313 | MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList, | ||||
9314 | MachinePointerInfo(SV), Align(PtrSize))); | ||||
9315 | |||||
9316 | // void *__gr_top at offset 8 (4 on ILP32) | ||||
9317 | Offset += PtrSize; | ||||
9318 | int GPRSize = FuncInfo->getVarArgsGPRSize(); | ||||
9319 | if (GPRSize > 0) { | ||||
9320 | SDValue GRTop, GRTopAddr; | ||||
9321 | |||||
9322 | GRTopAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, | ||||
9323 | DAG.getConstant(Offset, DL, PtrVT)); | ||||
9324 | |||||
9325 | GRTop = DAG.getFrameIndex(FuncInfo->getVarArgsGPRIndex(), PtrVT); | ||||
9326 | GRTop = DAG.getNode(ISD::ADD, DL, PtrVT, GRTop, | ||||
9327 | DAG.getConstant(GPRSize, DL, PtrVT)); | ||||
9328 | GRTop = DAG.getZExtOrTrunc(GRTop, DL, PtrMemVT); | ||||
9329 | |||||
9330 | MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr, | ||||
9331 | MachinePointerInfo(SV, Offset), | ||||
9332 | Align(PtrSize))); | ||||
9333 | } | ||||
9334 | |||||
9335 | // void *__vr_top at offset 16 (8 on ILP32) | ||||
9336 | Offset += PtrSize; | ||||
9337 | int FPRSize = FuncInfo->getVarArgsFPRSize(); | ||||
9338 | if (FPRSize > 0) { | ||||
9339 | SDValue VRTop, VRTopAddr; | ||||
9340 | VRTopAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, | ||||
9341 | DAG.getConstant(Offset, DL, PtrVT)); | ||||
9342 | |||||
9343 | VRTop = DAG.getFrameIndex(FuncInfo->getVarArgsFPRIndex(), PtrVT); | ||||
9344 | VRTop = DAG.getNode(ISD::ADD, DL, PtrVT, VRTop, | ||||
9345 | DAG.getConstant(FPRSize, DL, PtrVT)); | ||||
9346 | VRTop = DAG.getZExtOrTrunc(VRTop, DL, PtrMemVT); | ||||
9347 | |||||
9348 | MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr, | ||||
9349 | MachinePointerInfo(SV, Offset), | ||||
9350 | Align(PtrSize))); | ||||
9351 | } | ||||
9352 | |||||
9353 | // int __gr_offs at offset 24 (12 on ILP32) | ||||
9354 | Offset += PtrSize; | ||||
9355 | SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, | ||||
9356 | DAG.getConstant(Offset, DL, PtrVT)); | ||||
9357 | MemOps.push_back( | ||||
9358 | DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, DL, MVT::i32), | ||||
9359 | GROffsAddr, MachinePointerInfo(SV, Offset), Align(4))); | ||||
9360 | |||||
9361 | // int __vr_offs at offset 28 (16 on ILP32) | ||||
9362 | Offset += 4; | ||||
9363 | SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, | ||||
9364 | DAG.getConstant(Offset, DL, PtrVT)); | ||||
9365 | MemOps.push_back( | ||||
9366 | DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, DL, MVT::i32), | ||||
9367 | VROffsAddr, MachinePointerInfo(SV, Offset), Align(4))); | ||||
9368 | |||||
9369 | return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); | ||||
9370 | } | ||||
9371 | |||||
9372 | SDValue AArch64TargetLowering::LowerVASTART(SDValue Op, | ||||
9373 | SelectionDAG &DAG) const { | ||||
9374 | MachineFunction &MF = DAG.getMachineFunction(); | ||||
9375 | |||||
9376 | if (Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv())) | ||||
9377 | return LowerWin64_VASTART(Op, DAG); | ||||
9378 | else if (Subtarget->isTargetDarwin()) | ||||
9379 | return LowerDarwin_VASTART(Op, DAG); | ||||
9380 | else | ||||
9381 | return LowerAAPCS_VASTART(Op, DAG); | ||||
9382 | } | ||||
9383 | |||||
9384 | SDValue AArch64TargetLowering::LowerVACOPY(SDValue Op, | ||||
9385 | SelectionDAG &DAG) const { | ||||
9386 | // AAPCS has three pointers and two ints (= 32 bytes), Darwin has single | ||||
9387 | // pointer. | ||||
9388 | SDLoc DL(Op); | ||||
9389 | unsigned PtrSize = Subtarget->isTargetILP32() ? 4 : 8; | ||||
9390 | unsigned VaListSize = | ||||
9391 | (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows()) | ||||
9392 | ? PtrSize | ||||
9393 | : Subtarget->isTargetILP32() ? 20 : 32; | ||||
9394 | const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); | ||||
9395 | const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); | ||||
9396 | |||||
9397 | return DAG.getMemcpy(Op.getOperand(0), DL, Op.getOperand(1), Op.getOperand(2), | ||||
9398 | DAG.getConstant(VaListSize, DL, MVT::i32), | ||||
9399 | Align(PtrSize), false, false, false, | ||||
9400 | MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV)); | ||||
9401 | } | ||||
9402 | |||||
9403 | SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { | ||||
9404 | assert(Subtarget->isTargetDarwin() &&(static_cast <bool> (Subtarget->isTargetDarwin() && "automatic va_arg instruction only works on Darwin") ? void ( 0) : __assert_fail ("Subtarget->isTargetDarwin() && \"automatic va_arg instruction only works on Darwin\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 9405, __extension__ __PRETTY_FUNCTION__)) | ||||
9405 | "automatic va_arg instruction only works on Darwin")(static_cast <bool> (Subtarget->isTargetDarwin() && "automatic va_arg instruction only works on Darwin") ? void ( 0) : __assert_fail ("Subtarget->isTargetDarwin() && \"automatic va_arg instruction only works on Darwin\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 9405, __extension__ __PRETTY_FUNCTION__)); | ||||
9406 | |||||
9407 | const Value *V = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); | ||||
9408 | EVT VT = Op.getValueType(); | ||||
9409 | SDLoc DL(Op); | ||||
9410 | SDValue Chain = Op.getOperand(0); | ||||
9411 | SDValue Addr = Op.getOperand(1); | ||||
9412 | MaybeAlign Align(Op.getConstantOperandVal(3)); | ||||
9413 | unsigned MinSlotSize = Subtarget->isTargetILP32() ? 4 : 8; | ||||
9414 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | ||||
9415 | auto PtrMemVT = getPointerMemTy(DAG.getDataLayout()); | ||||
9416 | SDValue VAList = | ||||
9417 | DAG.getLoad(PtrMemVT, DL, Chain, Addr, MachinePointerInfo(V)); | ||||
9418 | Chain = VAList.getValue(1); | ||||
9419 | VAList = DAG.getZExtOrTrunc(VAList, DL, PtrVT); | ||||
9420 | |||||
9421 | if (VT.isScalableVector()) | ||||
9422 | report_fatal_error("Passing SVE types to variadic functions is " | ||||
9423 | "currently not supported"); | ||||
9424 | |||||
9425 | if (Align && *Align > MinSlotSize) { | ||||
9426 | VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, | ||||
9427 | DAG.getConstant(Align->value() - 1, DL, PtrVT)); | ||||
9428 | VAList = DAG.getNode(ISD::AND, DL, PtrVT, VAList, | ||||
9429 | DAG.getConstant(-(int64_t)Align->value(), DL, PtrVT)); | ||||
9430 | } | ||||
9431 | |||||
9432 | Type *ArgTy = VT.getTypeForEVT(*DAG.getContext()); | ||||
9433 | unsigned ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy); | ||||
9434 | |||||
9435 | // Scalar integer and FP values smaller than 64 bits are implicitly extended | ||||
9436 | // up to 64 bits. At the very least, we have to increase the striding of the | ||||
9437 | // vaargs list to match this, and for FP values we need to introduce | ||||
9438 | // FP_ROUND nodes as well. | ||||
9439 | if (VT.isInteger() && !VT.isVector()) | ||||
9440 | ArgSize = std::max(ArgSize, MinSlotSize); | ||||
9441 | bool NeedFPTrunc = false; | ||||
9442 | if (VT.isFloatingPoint() && !VT.isVector() && VT != MVT::f64) { | ||||
9443 | ArgSize = 8; | ||||
9444 | NeedFPTrunc = true; | ||||
9445 | } | ||||
9446 | |||||
9447 | // Increment the pointer, VAList, to the next vaarg | ||||
9448 | SDValue VANext = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, | ||||
9449 | DAG.getConstant(ArgSize, DL, PtrVT)); | ||||
9450 | VANext = DAG.getZExtOrTrunc(VANext, DL, PtrMemVT); | ||||
9451 | |||||
9452 | // Store the incremented VAList to the legalized pointer | ||||
9453 | SDValue APStore = | ||||
9454 | DAG.getStore(Chain, DL, VANext, Addr, MachinePointerInfo(V)); | ||||
9455 | |||||
9456 | // Load the actual argument out of the pointer VAList | ||||
9457 | if (NeedFPTrunc) { | ||||
9458 | // Load the value as an f64. | ||||
9459 | SDValue WideFP = | ||||
9460 | DAG.getLoad(MVT::f64, DL, APStore, VAList, MachinePointerInfo()); | ||||
9461 | // Round the value down to an f32. | ||||
9462 | SDValue NarrowFP = | ||||
9463 | DAG.getNode(ISD::FP_ROUND, DL, VT, WideFP.getValue(0), | ||||
9464 | DAG.getIntPtrConstant(1, DL, /*isTarget=*/true)); | ||||
9465 | SDValue Ops[] = { NarrowFP, WideFP.getValue(1) }; | ||||
9466 | // Merge the rounded value with the chain output of the load. | ||||
9467 | return DAG.getMergeValues(Ops, DL); | ||||
9468 | } | ||||
9469 | |||||
9470 | return DAG.getLoad(VT, DL, APStore, VAList, MachinePointerInfo()); | ||||
9471 | } | ||||
9472 | |||||
9473 | SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, | ||||
9474 | SelectionDAG &DAG) const { | ||||
9475 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); | ||||
9476 | MFI.setFrameAddressIsTaken(true); | ||||
9477 | |||||
9478 | EVT VT = Op.getValueType(); | ||||
9479 | SDLoc DL(Op); | ||||
9480 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | ||||
9481 | SDValue FrameAddr = | ||||
9482 | DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, MVT::i64); | ||||
9483 | while (Depth--) | ||||
9484 | FrameAddr = DAG.getLoad(VT, DL, DAG.getEntryNode(), FrameAddr, | ||||
9485 | MachinePointerInfo()); | ||||
9486 | |||||
9487 | if (Subtarget->isTargetILP32()) | ||||
9488 | FrameAddr = DAG.getNode(ISD::AssertZext, DL, MVT::i64, FrameAddr, | ||||
9489 | DAG.getValueType(VT)); | ||||
9490 | |||||
9491 | return FrameAddr; | ||||
9492 | } | ||||
9493 | |||||
9494 | SDValue AArch64TargetLowering::LowerSPONENTRY(SDValue Op, | ||||
9495 | SelectionDAG &DAG) const { | ||||
9496 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); | ||||
9497 | |||||
9498 | EVT VT = getPointerTy(DAG.getDataLayout()); | ||||
9499 | SDLoc DL(Op); | ||||
9500 | int FI = MFI.CreateFixedObject(4, 0, false); | ||||
9501 | return DAG.getFrameIndex(FI, VT); | ||||
9502 | } | ||||
9503 | |||||
9504 | #define GET_REGISTER_MATCHER | ||||
9505 | #include "AArch64GenAsmMatcher.inc" | ||||
9506 | |||||
9507 | // FIXME? Maybe this could be a TableGen attribute on some registers and | ||||
9508 | // this table could be generated automatically from RegInfo. | ||||
9509 | Register AArch64TargetLowering:: | ||||
9510 | getRegisterByName(const char* RegName, LLT VT, const MachineFunction &MF) const { | ||||
9511 | Register Reg = MatchRegisterName(RegName); | ||||
9512 | if (AArch64::X1 <= Reg && Reg <= AArch64::X28) { | ||||
9513 | const MCRegisterInfo *MRI = Subtarget->getRegisterInfo(); | ||||
9514 | unsigned DwarfRegNum = MRI->getDwarfRegNum(Reg, false); | ||||
9515 | if (!Subtarget->isXRegisterReserved(DwarfRegNum)) | ||||
9516 | Reg = 0; | ||||
9517 | } | ||||
9518 | if (Reg) | ||||
9519 | return Reg; | ||||
9520 | report_fatal_error(Twine("Invalid register name \"" | ||||
9521 | + StringRef(RegName) + "\".")); | ||||
9522 | } | ||||
9523 | |||||
9524 | SDValue AArch64TargetLowering::LowerADDROFRETURNADDR(SDValue Op, | ||||
9525 | SelectionDAG &DAG) const { | ||||
9526 | DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true); | ||||
9527 | |||||
9528 | EVT VT = Op.getValueType(); | ||||
9529 | SDLoc DL(Op); | ||||
9530 | |||||
9531 | SDValue FrameAddr = | ||||
9532 | DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, VT); | ||||
9533 | SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout())); | ||||
9534 | |||||
9535 | return DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset); | ||||
9536 | } | ||||
9537 | |||||
9538 | SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, | ||||
9539 | SelectionDAG &DAG) const { | ||||
9540 | MachineFunction &MF = DAG.getMachineFunction(); | ||||
9541 | MachineFrameInfo &MFI = MF.getFrameInfo(); | ||||
9542 | MFI.setReturnAddressIsTaken(true); | ||||
9543 | |||||
9544 | EVT VT = Op.getValueType(); | ||||
9545 | SDLoc DL(Op); | ||||
9546 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | ||||
9547 | SDValue ReturnAddress; | ||||
9548 | if (Depth) { | ||||
9549 | SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); | ||||
9550 | SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout())); | ||||
9551 | ReturnAddress = DAG.getLoad( | ||||
9552 | VT, DL, DAG.getEntryNode(), | ||||
9553 | DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), MachinePointerInfo()); | ||||
9554 | } else { | ||||
9555 | // Return LR, which contains the return address. Mark it an implicit | ||||
9556 | // live-in. | ||||
9557 | Register Reg = MF.addLiveIn(AArch64::LR, &AArch64::GPR64RegClass); | ||||
9558 | ReturnAddress = DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT); | ||||
9559 | } | ||||
9560 | |||||
9561 | // The XPACLRI instruction assembles to a hint-space instruction before | ||||
9562 | // Armv8.3-A therefore this instruction can be safely used for any pre | ||||
9563 | // Armv8.3-A architectures. On Armv8.3-A and onwards XPACI is available so use | ||||
9564 | // that instead. | ||||
9565 | SDNode *St; | ||||
9566 | if (Subtarget->hasPAuth()) { | ||||
9567 | St = DAG.getMachineNode(AArch64::XPACI, DL, VT, ReturnAddress); | ||||
9568 | } else { | ||||
9569 | // XPACLRI operates on LR therefore we must move the operand accordingly. | ||||
9570 | SDValue Chain = | ||||
9571 | DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::LR, ReturnAddress); | ||||
9572 | St = DAG.getMachineNode(AArch64::XPACLRI, DL, VT, Chain); | ||||
9573 | } | ||||
9574 | return SDValue(St, 0); | ||||
9575 | } | ||||
9576 | |||||
9577 | /// LowerShiftParts - Lower SHL_PARTS/SRA_PARTS/SRL_PARTS, which returns two | ||||
9578 | /// i32 values and take a 2 x i32 value to shift plus a shift amount. | ||||
9579 | SDValue AArch64TargetLowering::LowerShiftParts(SDValue Op, | ||||
9580 | SelectionDAG &DAG) const { | ||||
9581 | SDValue Lo, Hi; | ||||
9582 | expandShiftParts(Op.getNode(), Lo, Hi, DAG); | ||||
9583 | return DAG.getMergeValues({Lo, Hi}, SDLoc(Op)); | ||||
9584 | } | ||||
9585 | |||||
9586 | bool AArch64TargetLowering::isOffsetFoldingLegal( | ||||
9587 | const GlobalAddressSDNode *GA) const { | ||||
9588 | // Offsets are folded in the DAG combine rather than here so that we can | ||||
9589 | // intelligently choose an offset based on the uses. | ||||
9590 | return false; | ||||
9591 | } | ||||
9592 | |||||
9593 | bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, | ||||
9594 | bool OptForSize) const { | ||||
9595 | bool IsLegal = false; | ||||
9596 | // We can materialize #0.0 as fmov $Rd, XZR for 64-bit, 32-bit cases, and | ||||
9597 | // 16-bit case when target has full fp16 support. | ||||
9598 | // FIXME: We should be able to handle f128 as well with a clever lowering. | ||||
9599 | const APInt ImmInt = Imm.bitcastToAPInt(); | ||||
9600 | if (VT == MVT::f64) | ||||
9601 | IsLegal = AArch64_AM::getFP64Imm(ImmInt) != -1 || Imm.isPosZero(); | ||||
9602 | else if (VT == MVT::f32) | ||||
9603 | IsLegal = AArch64_AM::getFP32Imm(ImmInt) != -1 || Imm.isPosZero(); | ||||
9604 | else if (VT == MVT::f16 && Subtarget->hasFullFP16()) | ||||
9605 | IsLegal = AArch64_AM::getFP16Imm(ImmInt) != -1 || Imm.isPosZero(); | ||||
9606 | // TODO: fmov h0, w0 is also legal, however on't have an isel pattern to | ||||
9607 | // generate that fmov. | ||||
9608 | |||||
9609 | // If we can not materialize in immediate field for fmov, check if the | ||||
9610 | // value can be encoded as the immediate operand of a logical instruction. | ||||
9611 | // The immediate value will be created with either MOVZ, MOVN, or ORR. | ||||
9612 | if (!IsLegal && (VT == MVT::f64 || VT == MVT::f32)) { | ||||
9613 | // The cost is actually exactly the same for mov+fmov vs. adrp+ldr; | ||||
9614 | // however the mov+fmov sequence is always better because of the reduced | ||||
9615 | // cache pressure. The timings are still the same if you consider | ||||
9616 | // movw+movk+fmov vs. adrp+ldr (it's one instruction longer, but the | ||||
9617 | // movw+movk is fused). So we limit up to 2 instrdduction at most. | ||||
9618 | SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn; | ||||
9619 | AArch64_IMM::expandMOVImm(ImmInt.getZExtValue(), VT.getSizeInBits(), | ||||
9620 | Insn); | ||||
9621 | unsigned Limit = (OptForSize ? 1 : (Subtarget->hasFuseLiterals() ? 5 : 2)); | ||||
9622 | IsLegal = Insn.size() <= Limit; | ||||
9623 | } | ||||
9624 | |||||
9625 | LLVM_DEBUG(dbgs() << (IsLegal ? "Legal " : "Illegal ") << VT.getEVTString()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << (IsLegal ? "Legal " : "Illegal " ) << VT.getEVTString() << " imm value: "; Imm.dump ();; } } while (false) | ||||
9626 | << " imm value: "; Imm.dump();)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << (IsLegal ? "Legal " : "Illegal " ) << VT.getEVTString() << " imm value: "; Imm.dump ();; } } while (false); | ||||
9627 | return IsLegal; | ||||
9628 | } | ||||
9629 | |||||
9630 | //===----------------------------------------------------------------------===// | ||||
9631 | // AArch64 Optimization Hooks | ||||
9632 | //===----------------------------------------------------------------------===// | ||||
9633 | |||||
9634 | static SDValue getEstimate(const AArch64Subtarget *ST, unsigned Opcode, | ||||
9635 | SDValue Operand, SelectionDAG &DAG, | ||||
9636 | int &ExtraSteps) { | ||||
9637 | EVT VT = Operand.getValueType(); | ||||
9638 | if ((ST->hasNEON() && | ||||
9639 | (VT == MVT::f64 || VT == MVT::v1f64 || VT == MVT::v2f64 || | ||||
9640 | VT == MVT::f32 || VT == MVT::v1f32 || VT == MVT::v2f32 || | ||||
9641 | VT == MVT::v4f32)) || | ||||
9642 | (ST->hasSVE() && | ||||
9643 | (VT == MVT::nxv8f16 || VT == MVT::nxv4f32 || VT == MVT::nxv2f64))) { | ||||
9644 | if (ExtraSteps == TargetLoweringBase::ReciprocalEstimate::Unspecified) | ||||
9645 | // For the reciprocal estimates, convergence is quadratic, so the number | ||||
9646 | // of digits is doubled after each iteration. In ARMv8, the accuracy of | ||||
9647 | // the initial estimate is 2^-8. Thus the number of extra steps to refine | ||||
9648 | // the result for float (23 mantissa bits) is 2 and for double (52 | ||||
9649 | // mantissa bits) is 3. | ||||
9650 | ExtraSteps = VT.getScalarType() == MVT::f64 ? 3 : 2; | ||||
9651 | |||||
9652 | return DAG.getNode(Opcode, SDLoc(Operand), VT, Operand); | ||||
9653 | } | ||||
9654 | |||||
9655 | return SDValue(); | ||||
9656 | } | ||||
9657 | |||||
9658 | SDValue | ||||
9659 | AArch64TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG, | ||||
9660 | const DenormalMode &Mode) const { | ||||
9661 | SDLoc DL(Op); | ||||
9662 | EVT VT = Op.getValueType(); | ||||
9663 | EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); | ||||
9664 | SDValue FPZero = DAG.getConstantFP(0.0, DL, VT); | ||||
9665 | return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ); | ||||
9666 | } | ||||
9667 | |||||
9668 | SDValue | ||||
9669 | AArch64TargetLowering::getSqrtResultForDenormInput(SDValue Op, | ||||
9670 | SelectionDAG &DAG) const { | ||||
9671 | return Op; | ||||
9672 | } | ||||
9673 | |||||
9674 | SDValue AArch64TargetLowering::getSqrtEstimate(SDValue Operand, | ||||
9675 | SelectionDAG &DAG, int Enabled, | ||||
9676 | int &ExtraSteps, | ||||
9677 | bool &UseOneConst, | ||||
9678 | bool Reciprocal) const { | ||||
9679 | if (Enabled == ReciprocalEstimate::Enabled || | ||||
9680 | (Enabled == ReciprocalEstimate::Unspecified && Subtarget->useRSqrt())) | ||||
9681 | if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRSQRTE, Operand, | ||||
9682 | DAG, ExtraSteps)) { | ||||
9683 | SDLoc DL(Operand); | ||||
9684 | EVT VT = Operand.getValueType(); | ||||
9685 | |||||
9686 | SDNodeFlags Flags; | ||||
9687 | Flags.setAllowReassociation(true); | ||||
9688 | |||||
9689 | // Newton reciprocal square root iteration: E * 0.5 * (3 - X * E^2) | ||||
9690 | // AArch64 reciprocal square root iteration instruction: 0.5 * (3 - M * N) | ||||
9691 | for (int i = ExtraSteps; i > 0; --i) { | ||||
9692 | SDValue Step = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Estimate, | ||||
9693 | Flags); | ||||
9694 | Step = DAG.getNode(AArch64ISD::FRSQRTS, DL, VT, Operand, Step, Flags); | ||||
9695 | Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags); | ||||
9696 | } | ||||
9697 | if (!Reciprocal) | ||||
9698 | Estimate = DAG.getNode(ISD::FMUL, DL, VT, Operand, Estimate, Flags); | ||||
9699 | |||||
9700 | ExtraSteps = 0; | ||||
9701 | return Estimate; | ||||
9702 | } | ||||
9703 | |||||
9704 | return SDValue(); | ||||
9705 | } | ||||
9706 | |||||
9707 | SDValue AArch64TargetLowering::getRecipEstimate(SDValue Operand, | ||||
9708 | SelectionDAG &DAG, int Enabled, | ||||
9709 | int &ExtraSteps) const { | ||||
9710 | if (Enabled == ReciprocalEstimate::Enabled) | ||||
9711 | if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRECPE, Operand, | ||||
9712 | DAG, ExtraSteps)) { | ||||
9713 | SDLoc DL(Operand); | ||||
9714 | EVT VT = Operand.getValueType(); | ||||
9715 | |||||
9716 | SDNodeFlags Flags; | ||||
9717 | Flags.setAllowReassociation(true); | ||||
9718 | |||||
9719 | // Newton reciprocal iteration: E * (2 - X * E) | ||||
9720 | // AArch64 reciprocal iteration instruction: (2 - M * N) | ||||
9721 | for (int i = ExtraSteps; i > 0; --i) { | ||||
9722 | SDValue Step = DAG.getNode(AArch64ISD::FRECPS, DL, VT, Operand, | ||||
9723 | Estimate, Flags); | ||||
9724 | Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags); | ||||
9725 | } | ||||
9726 | |||||
9727 | ExtraSteps = 0; | ||||
9728 | return Estimate; | ||||
9729 | } | ||||
9730 | |||||
9731 | return SDValue(); | ||||
9732 | } | ||||
9733 | |||||
9734 | //===----------------------------------------------------------------------===// | ||||
9735 | // AArch64 Inline Assembly Support | ||||
9736 | //===----------------------------------------------------------------------===// | ||||
9737 | |||||
9738 | // Table of Constraints | ||||
9739 | // TODO: This is the current set of constraints supported by ARM for the | ||||
9740 | // compiler, not all of them may make sense. | ||||
9741 | // | ||||
9742 | // r - A general register | ||||
9743 | // w - An FP/SIMD register of some size in the range v0-v31 | ||||
9744 | // x - An FP/SIMD register of some size in the range v0-v15 | ||||
9745 | // I - Constant that can be used with an ADD instruction | ||||
9746 | // J - Constant that can be used with a SUB instruction | ||||
9747 | // K - Constant that can be used with a 32-bit logical instruction | ||||
9748 | // L - Constant that can be used with a 64-bit logical instruction | ||||
9749 | // M - Constant that can be used as a 32-bit MOV immediate | ||||
9750 | // N - Constant that can be used as a 64-bit MOV immediate | ||||
9751 | // Q - A memory reference with base register and no offset | ||||
9752 | // S - A symbolic address | ||||
9753 | // Y - Floating point constant zero | ||||
9754 | // Z - Integer constant zero | ||||
9755 | // | ||||
9756 | // Note that general register operands will be output using their 64-bit x | ||||
9757 | // register name, whatever the size of the variable, unless the asm operand | ||||
9758 | // is prefixed by the %w modifier. Floating-point and SIMD register operands | ||||
9759 | // will be output with the v prefix unless prefixed by the %b, %h, %s, %d or | ||||
9760 | // %q modifier. | ||||
9761 | const char *AArch64TargetLowering::LowerXConstraint(EVT ConstraintVT) const { | ||||
9762 | // At this point, we have to lower this constraint to something else, so we | ||||
9763 | // lower it to an "r" or "w". However, by doing this we will force the result | ||||
9764 | // to be in register, while the X constraint is much more permissive. | ||||
9765 | // | ||||
9766 | // Although we are correct (we are free to emit anything, without | ||||
9767 | // constraints), we might break use cases that would expect us to be more | ||||
9768 | // efficient and emit something else. | ||||
9769 | if (!Subtarget->hasFPARMv8()) | ||||
9770 | return "r"; | ||||
9771 | |||||
9772 | if (ConstraintVT.isFloatingPoint()) | ||||
9773 | return "w"; | ||||
9774 | |||||
9775 | if (ConstraintVT.isVector() && | ||||
9776 | (ConstraintVT.getSizeInBits() == 64 || | ||||
9777 | ConstraintVT.getSizeInBits() == 128)) | ||||
9778 | return "w"; | ||||
9779 | |||||
9780 | return "r"; | ||||
9781 | } | ||||
9782 | |||||
9783 | enum PredicateConstraint { | ||||
9784 | Upl, | ||||
9785 | Upa, | ||||
9786 | Invalid | ||||
9787 | }; | ||||
9788 | |||||
9789 | static PredicateConstraint parsePredicateConstraint(StringRef Constraint) { | ||||
9790 | PredicateConstraint P = PredicateConstraint::Invalid; | ||||
9791 | if (Constraint == "Upa") | ||||
9792 | P = PredicateConstraint::Upa; | ||||
9793 | if (Constraint == "Upl") | ||||
9794 | P = PredicateConstraint::Upl; | ||||
9795 | return P; | ||||
9796 | } | ||||
9797 | |||||
9798 | /// getConstraintType - Given a constraint letter, return the type of | ||||
9799 | /// constraint it is for this target. | ||||
9800 | AArch64TargetLowering::ConstraintType | ||||
9801 | AArch64TargetLowering::getConstraintType(StringRef Constraint) const { | ||||
9802 | if (Constraint.size() == 1) { | ||||
9803 | switch (Constraint[0]) { | ||||
9804 | default: | ||||
9805 | break; | ||||
9806 | case 'x': | ||||
9807 | case 'w': | ||||
9808 | case 'y': | ||||
9809 | return C_RegisterClass; | ||||
9810 | // An address with a single base register. Due to the way we | ||||
9811 | // currently handle addresses it is the same as 'r'. | ||||
9812 | case 'Q': | ||||
9813 | return C_Memory; | ||||
9814 | case 'I': | ||||
9815 | case 'J': | ||||
9816 | case 'K': | ||||
9817 | case 'L': | ||||
9818 | case 'M': | ||||
9819 | case 'N': | ||||
9820 | case 'Y': | ||||
9821 | case 'Z': | ||||
9822 | return C_Immediate; | ||||
9823 | case 'z': | ||||
9824 | case 'S': // A symbolic address | ||||
9825 | return C_Other; | ||||
9826 | } | ||||
9827 | } else if (parsePredicateConstraint(Constraint) != | ||||
9828 | PredicateConstraint::Invalid) | ||||
9829 | return C_RegisterClass; | ||||
9830 | return TargetLowering::getConstraintType(Constraint); | ||||
9831 | } | ||||
9832 | |||||
9833 | /// Examine constraint type and operand type and determine a weight value. | ||||
9834 | /// This object must already have been set up with the operand type | ||||
9835 | /// and the current alternative constraint selected. | ||||
9836 | TargetLowering::ConstraintWeight | ||||
9837 | AArch64TargetLowering::getSingleConstraintMatchWeight( | ||||
9838 | AsmOperandInfo &info, const char *constraint) const { | ||||
9839 | ConstraintWeight weight = CW_Invalid; | ||||
9840 | Value *CallOperandVal = info.CallOperandVal; | ||||
9841 | // If we don't have a value, we can't do a match, | ||||
9842 | // but allow it at the lowest weight. | ||||
9843 | if (!CallOperandVal) | ||||
9844 | return CW_Default; | ||||
9845 | Type *type = CallOperandVal->getType(); | ||||
9846 | // Look at the constraint type. | ||||
9847 | switch (*constraint) { | ||||
9848 | default: | ||||
9849 | weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); | ||||
9850 | break; | ||||
9851 | case 'x': | ||||
9852 | case 'w': | ||||
9853 | case 'y': | ||||
9854 | if (type->isFloatingPointTy() || type->isVectorTy()) | ||||
9855 | weight = CW_Register; | ||||
9856 | break; | ||||
9857 | case 'z': | ||||
9858 | weight = CW_Constant; | ||||
9859 | break; | ||||
9860 | case 'U': | ||||
9861 | if (parsePredicateConstraint(constraint) != PredicateConstraint::Invalid) | ||||
9862 | weight = CW_Register; | ||||
9863 | break; | ||||
9864 | } | ||||
9865 | return weight; | ||||
9866 | } | ||||
9867 | |||||
9868 | std::pair<unsigned, const TargetRegisterClass *> | ||||
9869 | AArch64TargetLowering::getRegForInlineAsmConstraint( | ||||
9870 | const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { | ||||
9871 | if (Constraint.size() == 1) { | ||||
9872 | switch (Constraint[0]) { | ||||
9873 | case 'r': | ||||
9874 | if (VT.isScalableVector()) | ||||
9875 | return std::make_pair(0U, nullptr); | ||||
9876 | if (Subtarget->hasLS64() && VT.getSizeInBits() == 512) | ||||
9877 | return std::make_pair(0U, &AArch64::GPR64x8ClassRegClass); | ||||
9878 | if (VT.getFixedSizeInBits() == 64) | ||||
9879 | return std::make_pair(0U, &AArch64::GPR64commonRegClass); | ||||
9880 | return std::make_pair(0U, &AArch64::GPR32commonRegClass); | ||||
9881 | case 'w': { | ||||
9882 | if (!Subtarget->hasFPARMv8()) | ||||
9883 | break; | ||||
9884 | if (VT.isScalableVector()) { | ||||
9885 | if (VT.getVectorElementType() != MVT::i1) | ||||
9886 | return std::make_pair(0U, &AArch64::ZPRRegClass); | ||||
9887 | return std::make_pair(0U, nullptr); | ||||
9888 | } | ||||
9889 | uint64_t VTSize = VT.getFixedSizeInBits(); | ||||
9890 | if (VTSize == 16) | ||||
9891 | return std::make_pair(0U, &AArch64::FPR16RegClass); | ||||
9892 | if (VTSize == 32) | ||||
9893 | return std::make_pair(0U, &AArch64::FPR32RegClass); | ||||
9894 | if (VTSize == 64) | ||||
9895 | return std::make_pair(0U, &AArch64::FPR64RegClass); | ||||
9896 | if (VTSize == 128) | ||||
9897 | return std::make_pair(0U, &AArch64::FPR128RegClass); | ||||
9898 | break; | ||||
9899 | } | ||||
9900 | // The instructions that this constraint is designed for can | ||||
9901 | // only take 128-bit registers so just use that regclass. | ||||
9902 | case 'x': | ||||
9903 | if (!Subtarget->hasFPARMv8()) | ||||
9904 | break; | ||||
9905 | if (VT.isScalableVector()) | ||||
9906 | return std::make_pair(0U, &AArch64::ZPR_4bRegClass); | ||||
9907 | if (VT.getSizeInBits() == 128) | ||||
9908 | return std::make_pair(0U, &AArch64::FPR128_loRegClass); | ||||
9909 | break; | ||||
9910 | case 'y': | ||||
9911 | if (!Subtarget->hasFPARMv8()) | ||||
9912 | break; | ||||
9913 | if (VT.isScalableVector()) | ||||
9914 | return std::make_pair(0U, &AArch64::ZPR_3bRegClass); | ||||
9915 | break; | ||||
9916 | } | ||||
9917 | } else { | ||||
9918 | PredicateConstraint PC = parsePredicateConstraint(Constraint); | ||||
9919 | if (PC != PredicateConstraint::Invalid) { | ||||
9920 | if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1) | ||||
9921 | return std::make_pair(0U, nullptr); | ||||
9922 | bool restricted = (PC == PredicateConstraint::Upl); | ||||
9923 | return restricted ? std::make_pair(0U, &AArch64::PPR_3bRegClass) | ||||
9924 | : std::make_pair(0U, &AArch64::PPRRegClass); | ||||
9925 | } | ||||
9926 | } | ||||
9927 | if (StringRef("{cc}").equals_insensitive(Constraint)) | ||||
9928 | return std::make_pair(unsigned(AArch64::NZCV), &AArch64::CCRRegClass); | ||||
9929 | |||||
9930 | // Use the default implementation in TargetLowering to convert the register | ||||
9931 | // constraint into a member of a register class. | ||||
9932 | std::pair<unsigned, const TargetRegisterClass *> Res; | ||||
9933 | Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); | ||||
9934 | |||||
9935 | // Not found as a standard register? | ||||
9936 | if (!Res.second) { | ||||
9937 | unsigned Size = Constraint.size(); | ||||
9938 | if ((Size == 4 || Size == 5) && Constraint[0] == '{' && | ||||
9939 | tolower(Constraint[1]) == 'v' && Constraint[Size - 1] == '}') { | ||||
9940 | int RegNo; | ||||
9941 | bool Failed = Constraint.slice(2, Size - 1).getAsInteger(10, RegNo); | ||||
9942 | if (!Failed && RegNo >= 0 && RegNo <= 31) { | ||||
9943 | // v0 - v31 are aliases of q0 - q31 or d0 - d31 depending on size. | ||||
9944 | // By default we'll emit v0-v31 for this unless there's a modifier where | ||||
9945 | // we'll emit the correct register as well. | ||||
9946 | if (VT != MVT::Other && VT.getSizeInBits() == 64) { | ||||
9947 | Res.first = AArch64::FPR64RegClass.getRegister(RegNo); | ||||
9948 | Res.second = &AArch64::FPR64RegClass; | ||||
9949 | } else { | ||||
9950 | Res.first = AArch64::FPR128RegClass.getRegister(RegNo); | ||||
9951 | Res.second = &AArch64::FPR128RegClass; | ||||
9952 | } | ||||
9953 | } | ||||
9954 | } | ||||
9955 | } | ||||
9956 | |||||
9957 | if (Res.second && !Subtarget->hasFPARMv8() && | ||||
9958 | !AArch64::GPR32allRegClass.hasSubClassEq(Res.second) && | ||||
9959 | !AArch64::GPR64allRegClass.hasSubClassEq(Res.second)) | ||||
9960 | return std::make_pair(0U, nullptr); | ||||
9961 | |||||
9962 | return Res; | ||||
9963 | } | ||||
9964 | |||||
9965 | EVT AArch64TargetLowering::getAsmOperandValueType(const DataLayout &DL, | ||||
9966 | llvm::Type *Ty, | ||||
9967 | bool AllowUnknown) const { | ||||
9968 | if (Subtarget->hasLS64() && Ty->isIntegerTy(512)) | ||||
9969 | return EVT(MVT::i64x8); | ||||
9970 | |||||
9971 | return TargetLowering::getAsmOperandValueType(DL, Ty, AllowUnknown); | ||||
9972 | } | ||||
9973 | |||||
9974 | /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops | ||||
9975 | /// vector. If it is invalid, don't add anything to Ops. | ||||
9976 | void AArch64TargetLowering::LowerAsmOperandForConstraint( | ||||
9977 | SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops, | ||||
9978 | SelectionDAG &DAG) const { | ||||
9979 | SDValue Result; | ||||
9980 | |||||
9981 | // Currently only support length 1 constraints. | ||||
9982 | if (Constraint.length() != 1) | ||||
9983 | return; | ||||
9984 | |||||
9985 | char ConstraintLetter = Constraint[0]; | ||||
9986 | switch (ConstraintLetter) { | ||||
9987 | default: | ||||
9988 | break; | ||||
9989 | |||||
9990 | // This set of constraints deal with valid constants for various instructions. | ||||
9991 | // Validate and return a target constant for them if we can. | ||||
9992 | case 'z': { | ||||
9993 | // 'z' maps to xzr or wzr so it needs an input of 0. | ||||
9994 | if (!isNullConstant(Op)) | ||||
9995 | return; | ||||
9996 | |||||
9997 | if (Op.getValueType() == MVT::i64) | ||||
9998 | Result = DAG.getRegister(AArch64::XZR, MVT::i64); | ||||
9999 | else | ||||
10000 | Result = DAG.getRegister(AArch64::WZR, MVT::i32); | ||||
10001 | break; | ||||
10002 | } | ||||
10003 | case 'S': { | ||||
10004 | // An absolute symbolic address or label reference. | ||||
10005 | if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) { | ||||
10006 | Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), | ||||
10007 | GA->getValueType(0)); | ||||
10008 | } else if (const BlockAddressSDNode *BA = | ||||
10009 | dyn_cast<BlockAddressSDNode>(Op)) { | ||||
10010 | Result = | ||||
10011 | DAG.getTargetBlockAddress(BA->getBlockAddress(), BA->getValueType(0)); | ||||
10012 | } else | ||||
10013 | return; | ||||
10014 | break; | ||||
10015 | } | ||||
10016 | |||||
10017 | case 'I': | ||||
10018 | case 'J': | ||||
10019 | case 'K': | ||||
10020 | case 'L': | ||||
10021 | case 'M': | ||||
10022 | case 'N': | ||||
10023 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); | ||||
10024 | if (!C) | ||||
10025 | return; | ||||
10026 | |||||
10027 | // Grab the value and do some validation. | ||||
10028 | uint64_t CVal = C->getZExtValue(); | ||||
10029 | switch (ConstraintLetter) { | ||||
10030 | // The I constraint applies only to simple ADD or SUB immediate operands: | ||||
10031 | // i.e. 0 to 4095 with optional shift by 12 | ||||
10032 | // The J constraint applies only to ADD or SUB immediates that would be | ||||
10033 | // valid when negated, i.e. if [an add pattern] were to be output as a SUB | ||||
10034 | // instruction [or vice versa], in other words -1 to -4095 with optional | ||||
10035 | // left shift by 12. | ||||
10036 | case 'I': | ||||
10037 | if (isUInt<12>(CVal) || isShiftedUInt<12, 12>(CVal)) | ||||
10038 | break; | ||||
10039 | return; | ||||
10040 | case 'J': { | ||||
10041 | uint64_t NVal = -C->getSExtValue(); | ||||
10042 | if (isUInt<12>(NVal) || isShiftedUInt<12, 12>(NVal)) { | ||||
10043 | CVal = C->getSExtValue(); | ||||
10044 | break; | ||||
10045 | } | ||||
10046 | return; | ||||
10047 | } | ||||
10048 | // The K and L constraints apply *only* to logical immediates, including | ||||
10049 | // what used to be the MOVI alias for ORR (though the MOVI alias has now | ||||
10050 | // been removed and MOV should be used). So these constraints have to | ||||
10051 | // distinguish between bit patterns that are valid 32-bit or 64-bit | ||||
10052 | // "bitmask immediates": for example 0xaaaaaaaa is a valid bimm32 (K), but | ||||
10053 | // not a valid bimm64 (L) where 0xaaaaaaaaaaaaaaaa would be valid, and vice | ||||
10054 | // versa. | ||||
10055 | case 'K': | ||||
10056 | if (AArch64_AM::isLogicalImmediate(CVal, 32)) | ||||
10057 | break; | ||||
10058 | return; | ||||
10059 | case 'L': | ||||
10060 | if (AArch64_AM::isLogicalImmediate(CVal, 64)) | ||||
10061 | break; | ||||
10062 | return; | ||||
10063 | // The M and N constraints are a superset of K and L respectively, for use | ||||
10064 | // with the MOV (immediate) alias. As well as the logical immediates they | ||||
10065 | // also match 32 or 64-bit immediates that can be loaded either using a | ||||
10066 | // *single* MOVZ or MOVN , such as 32-bit 0x12340000, 0x00001234, 0xffffedca | ||||
10067 | // (M) or 64-bit 0x1234000000000000 (N) etc. | ||||
10068 | // As a note some of this code is liberally stolen from the asm parser. | ||||
10069 | case 'M': { | ||||
10070 | if (!isUInt<32>(CVal)) | ||||
10071 | return; | ||||
10072 | if (AArch64_AM::isLogicalImmediate(CVal, 32)) | ||||
10073 | break; | ||||
10074 | if ((CVal & 0xFFFF) == CVal) | ||||
10075 | break; | ||||
10076 | if ((CVal & 0xFFFF0000ULL) == CVal) | ||||
10077 | break; | ||||
10078 | uint64_t NCVal = ~(uint32_t)CVal; | ||||
10079 | if ((NCVal & 0xFFFFULL) == NCVal) | ||||
10080 | break; | ||||
10081 | if ((NCVal & 0xFFFF0000ULL) == NCVal) | ||||
10082 | break; | ||||
10083 | return; | ||||
10084 | } | ||||
10085 | case 'N': { | ||||
10086 | if (AArch64_AM::isLogicalImmediate(CVal, 64)) | ||||
10087 | break; | ||||
10088 | if ((CVal & 0xFFFFULL) == CVal) | ||||
10089 | break; | ||||
10090 | if ((CVal & 0xFFFF0000ULL) == CVal) | ||||
10091 | break; | ||||
10092 | if ((CVal & 0xFFFF00000000ULL) == CVal) | ||||
10093 | break; | ||||
10094 | if ((CVal & 0xFFFF000000000000ULL) == CVal) | ||||
10095 | break; | ||||
10096 | uint64_t NCVal = ~CVal; | ||||
10097 | if ((NCVal & 0xFFFFULL) == NCVal) | ||||
10098 | break; | ||||
10099 | if ((NCVal & 0xFFFF0000ULL) == NCVal) | ||||
10100 | break; | ||||
10101 | if ((NCVal & 0xFFFF00000000ULL) == NCVal) | ||||
10102 | break; | ||||
10103 | if ((NCVal & 0xFFFF000000000000ULL) == NCVal) | ||||
10104 | break; | ||||
10105 | return; | ||||
10106 | } | ||||
10107 | default: | ||||
10108 | return; | ||||
10109 | } | ||||
10110 | |||||
10111 | // All assembler immediates are 64-bit integers. | ||||
10112 | Result = DAG.getTargetConstant(CVal, SDLoc(Op), MVT::i64); | ||||
10113 | break; | ||||
10114 | } | ||||
10115 | |||||
10116 | if (Result.getNode()) { | ||||
10117 | Ops.push_back(Result); | ||||
10118 | return; | ||||
10119 | } | ||||
10120 | |||||
10121 | return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); | ||||
10122 | } | ||||
10123 | |||||
10124 | //===----------------------------------------------------------------------===// | ||||
10125 | // AArch64 Advanced SIMD Support | ||||
10126 | //===----------------------------------------------------------------------===// | ||||
10127 | |||||
10128 | /// WidenVector - Given a value in the V64 register class, produce the | ||||
10129 | /// equivalent value in the V128 register class. | ||||
10130 | static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG) { | ||||
10131 | EVT VT = V64Reg.getValueType(); | ||||
10132 | unsigned NarrowSize = VT.getVectorNumElements(); | ||||
10133 | MVT EltTy = VT.getVectorElementType().getSimpleVT(); | ||||
10134 | MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize); | ||||
10135 | SDLoc DL(V64Reg); | ||||
10136 | |||||
10137 | return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideTy, DAG.getUNDEF(WideTy), | ||||
10138 | V64Reg, DAG.getConstant(0, DL, MVT::i64)); | ||||
10139 | } | ||||
10140 | |||||
10141 | /// getExtFactor - Determine the adjustment factor for the position when | ||||
10142 | /// generating an "extract from vector registers" instruction. | ||||
10143 | static unsigned getExtFactor(SDValue &V) { | ||||
10144 | EVT EltType = V.getValueType().getVectorElementType(); | ||||
10145 | return EltType.getSizeInBits() / 8; | ||||
10146 | } | ||||
10147 | |||||
10148 | /// NarrowVector - Given a value in the V128 register class, produce the | ||||
10149 | /// equivalent value in the V64 register class. | ||||
10150 | static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) { | ||||
10151 | EVT VT = V128Reg.getValueType(); | ||||
10152 | unsigned WideSize = VT.getVectorNumElements(); | ||||
10153 | MVT EltTy = VT.getVectorElementType().getSimpleVT(); | ||||
10154 | MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2); | ||||
10155 | SDLoc DL(V128Reg); | ||||
10156 | |||||
10157 | return DAG.getTargetExtractSubreg(AArch64::dsub, DL, NarrowTy, V128Reg); | ||||
10158 | } | ||||
10159 | |||||
10160 | // Gather data to see if the operation can be modelled as a | ||||
10161 | // shuffle in combination with VEXTs. | ||||
10162 | SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, | ||||
10163 | SelectionDAG &DAG) const { | ||||
10164 | assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!")(static_cast <bool> (Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!") ? void (0) : __assert_fail ("Op.getOpcode() == ISD::BUILD_VECTOR && \"Unknown opcode!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10164, __extension__ __PRETTY_FUNCTION__)); | ||||
10165 | LLVM_DEBUG(dbgs() << "AArch64TargetLowering::ReconstructShuffle\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "AArch64TargetLowering::ReconstructShuffle\n" ; } } while (false); | ||||
10166 | SDLoc dl(Op); | ||||
10167 | EVT VT = Op.getValueType(); | ||||
10168 | assert(!VT.isScalableVector() &&(static_cast <bool> (!VT.isScalableVector() && "Scalable vectors cannot be used with ISD::BUILD_VECTOR" ) ? void (0) : __assert_fail ("!VT.isScalableVector() && \"Scalable vectors cannot be used with ISD::BUILD_VECTOR\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10169, __extension__ __PRETTY_FUNCTION__)) | ||||
10169 | "Scalable vectors cannot be used with ISD::BUILD_VECTOR")(static_cast <bool> (!VT.isScalableVector() && "Scalable vectors cannot be used with ISD::BUILD_VECTOR" ) ? void (0) : __assert_fail ("!VT.isScalableVector() && \"Scalable vectors cannot be used with ISD::BUILD_VECTOR\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10169, __extension__ __PRETTY_FUNCTION__)); | ||||
10170 | unsigned NumElts = VT.getVectorNumElements(); | ||||
10171 | |||||
10172 | struct ShuffleSourceInfo { | ||||
10173 | SDValue Vec; | ||||
10174 | unsigned MinElt; | ||||
10175 | unsigned MaxElt; | ||||
10176 | |||||
10177 | // We may insert some combination of BITCASTs and VEXT nodes to force Vec to | ||||
10178 | // be compatible with the shuffle we intend to construct. As a result | ||||
10179 | // ShuffleVec will be some sliding window into the original Vec. | ||||
10180 | SDValue ShuffleVec; | ||||
10181 | |||||
10182 | // Code should guarantee that element i in Vec starts at element "WindowBase | ||||
10183 | // + i * WindowScale in ShuffleVec". | ||||
10184 | int WindowBase; | ||||
10185 | int WindowScale; | ||||
10186 | |||||
10187 | ShuffleSourceInfo(SDValue Vec) | ||||
10188 | : Vec(Vec), MinElt(std::numeric_limits<unsigned>::max()), MaxElt(0), | ||||
10189 | ShuffleVec(Vec), WindowBase(0), WindowScale(1) {} | ||||
10190 | |||||
10191 | bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } | ||||
10192 | }; | ||||
10193 | |||||
10194 | // First gather all vectors used as an immediate source for this BUILD_VECTOR | ||||
10195 | // node. | ||||
10196 | SmallVector<ShuffleSourceInfo, 2> Sources; | ||||
10197 | for (unsigned i = 0; i < NumElts; ++i) { | ||||
10198 | SDValue V = Op.getOperand(i); | ||||
10199 | if (V.isUndef()) | ||||
10200 | continue; | ||||
10201 | else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | ||||
10202 | !isa<ConstantSDNode>(V.getOperand(1)) || | ||||
10203 | V.getOperand(0).getValueType().isScalableVector()) { | ||||
10204 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: " "a shuffle can only come from building a vector from " "various elements of other fixed-width vectors, provided " "their indices are constant\n" ; } } while (false) | ||||
10205 | dbgs() << "Reshuffle failed: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: " "a shuffle can only come from building a vector from " "various elements of other fixed-width vectors, provided " "their indices are constant\n" ; } } while (false) | ||||
10206 | "a shuffle can only come from building a vector from "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: " "a shuffle can only come from building a vector from " "various elements of other fixed-width vectors, provided " "their indices are constant\n" ; } } while (false) | ||||
10207 | "various elements of other fixed-width vectors, provided "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: " "a shuffle can only come from building a vector from " "various elements of other fixed-width vectors, provided " "their indices are constant\n" ; } } while (false) | ||||
10208 | "their indices are constant\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: " "a shuffle can only come from building a vector from " "various elements of other fixed-width vectors, provided " "their indices are constant\n" ; } } while (false); | ||||
10209 | return SDValue(); | ||||
10210 | } | ||||
10211 | |||||
10212 | // Add this element source to the list if it's not already there. | ||||
10213 | SDValue SourceVec = V.getOperand(0); | ||||
10214 | auto Source = find(Sources, SourceVec); | ||||
10215 | if (Source == Sources.end()) | ||||
10216 | Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec)); | ||||
10217 | |||||
10218 | // Update the minimum and maximum lane number seen. | ||||
10219 | unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); | ||||
10220 | Source->MinElt = std::min(Source->MinElt, EltNo); | ||||
10221 | Source->MaxElt = std::max(Source->MaxElt, EltNo); | ||||
10222 | } | ||||
10223 | |||||
10224 | // If we have 3 or 4 sources, try to generate a TBL, which will at least be | ||||
10225 | // better than moving to/from gpr registers for larger vectors. | ||||
10226 | if ((Sources.size() == 3 || Sources.size() == 4) && NumElts > 4) { | ||||
10227 | // Construct a mask for the tbl. We may need to adjust the index for types | ||||
10228 | // larger than i8. | ||||
10229 | SmallVector<unsigned, 16> Mask; | ||||
10230 | unsigned OutputFactor = VT.getScalarSizeInBits() / 8; | ||||
10231 | for (unsigned I = 0; I < NumElts; ++I) { | ||||
10232 | SDValue V = Op.getOperand(I); | ||||
10233 | if (V.isUndef()) { | ||||
10234 | for (unsigned OF = 0; OF < OutputFactor; OF++) | ||||
10235 | Mask.push_back(-1); | ||||
10236 | continue; | ||||
10237 | } | ||||
10238 | // Set the Mask lanes adjusted for the size of the input and output | ||||
10239 | // lanes. The Mask is always i8, so it will set OutputFactor lanes per | ||||
10240 | // output element, adjusted in their positions per input and output types. | ||||
10241 | unsigned Lane = V.getConstantOperandVal(1); | ||||
10242 | for (unsigned S = 0; S < Sources.size(); S++) { | ||||
10243 | if (V.getOperand(0) == Sources[S].Vec) { | ||||
10244 | unsigned InputSize = Sources[S].Vec.getScalarValueSizeInBits(); | ||||
10245 | unsigned InputBase = 16 * S + Lane * InputSize / 8; | ||||
10246 | for (unsigned OF = 0; OF < OutputFactor; OF++) | ||||
10247 | Mask.push_back(InputBase + OF); | ||||
10248 | break; | ||||
10249 | } | ||||
10250 | } | ||||
10251 | } | ||||
10252 | |||||
10253 | // Construct the tbl3/tbl4 out of an intrinsic, the sources converted to | ||||
10254 | // v16i8, and the TBLMask | ||||
10255 | SmallVector<SDValue, 16> TBLOperands; | ||||
10256 | TBLOperands.push_back(DAG.getConstant(Sources.size() == 3 | ||||
10257 | ? Intrinsic::aarch64_neon_tbl3 | ||||
10258 | : Intrinsic::aarch64_neon_tbl4, | ||||
10259 | dl, MVT::i32)); | ||||
10260 | for (unsigned i = 0; i < Sources.size(); i++) { | ||||
10261 | SDValue Src = Sources[i].Vec; | ||||
10262 | EVT SrcVT = Src.getValueType(); | ||||
10263 | Src = DAG.getBitcast(SrcVT.is64BitVector() ? MVT::v8i8 : MVT::v16i8, Src); | ||||
10264 | assert((SrcVT.is64BitVector() || SrcVT.is128BitVector()) &&(static_cast <bool> ((SrcVT.is64BitVector() || SrcVT.is128BitVector ()) && "Expected a legally typed vector") ? void (0) : __assert_fail ("(SrcVT.is64BitVector() || SrcVT.is128BitVector()) && \"Expected a legally typed vector\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10265, __extension__ __PRETTY_FUNCTION__)) | ||||
10265 | "Expected a legally typed vector")(static_cast <bool> ((SrcVT.is64BitVector() || SrcVT.is128BitVector ()) && "Expected a legally typed vector") ? void (0) : __assert_fail ("(SrcVT.is64BitVector() || SrcVT.is128BitVector()) && \"Expected a legally typed vector\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10265, __extension__ __PRETTY_FUNCTION__)); | ||||
10266 | if (SrcVT.is64BitVector()) | ||||
10267 | Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, Src, | ||||
10268 | DAG.getUNDEF(MVT::v8i8)); | ||||
10269 | TBLOperands.push_back(Src); | ||||
10270 | } | ||||
10271 | |||||
10272 | SmallVector<SDValue, 16> TBLMask; | ||||
10273 | for (unsigned i = 0; i < Mask.size(); i++) | ||||
10274 | TBLMask.push_back(DAG.getConstant(Mask[i], dl, MVT::i32)); | ||||
10275 | assert((Mask.size() == 8 || Mask.size() == 16) &&(static_cast <bool> ((Mask.size() == 8 || Mask.size() == 16) && "Expected a v8i8 or v16i8 Mask") ? void (0) : __assert_fail ("(Mask.size() == 8 || Mask.size() == 16) && \"Expected a v8i8 or v16i8 Mask\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10276, __extension__ __PRETTY_FUNCTION__)) | ||||
10276 | "Expected a v8i8 or v16i8 Mask")(static_cast <bool> ((Mask.size() == 8 || Mask.size() == 16) && "Expected a v8i8 or v16i8 Mask") ? void (0) : __assert_fail ("(Mask.size() == 8 || Mask.size() == 16) && \"Expected a v8i8 or v16i8 Mask\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10276, __extension__ __PRETTY_FUNCTION__)); | ||||
10277 | TBLOperands.push_back( | ||||
10278 | DAG.getBuildVector(Mask.size() == 8 ? MVT::v8i8 : MVT::v16i8, dl, TBLMask)); | ||||
10279 | |||||
10280 | SDValue Shuffle = | ||||
10281 | DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, | ||||
10282 | Mask.size() == 8 ? MVT::v8i8 : MVT::v16i8, TBLOperands); | ||||
10283 | return DAG.getBitcast(VT, Shuffle); | ||||
10284 | } | ||||
10285 | |||||
10286 | if (Sources.size() > 2) { | ||||
10287 | LLVM_DEBUG(dbgs() << "Reshuffle failed: currently only do something "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: currently only do something " << "sensible when at most two source vectors are " << "involved\n"; } } while (false) | ||||
10288 | << "sensible when at most two source vectors are "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: currently only do something " << "sensible when at most two source vectors are " << "involved\n"; } } while (false) | ||||
10289 | << "involved\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: currently only do something " << "sensible when at most two source vectors are " << "involved\n"; } } while (false); | ||||
10290 | return SDValue(); | ||||
10291 | } | ||||
10292 | |||||
10293 | // Find out the smallest element size among result and two sources, and use | ||||
10294 | // it as element size to build the shuffle_vector. | ||||
10295 | EVT SmallestEltTy = VT.getVectorElementType(); | ||||
10296 | for (auto &Source : Sources) { | ||||
10297 | EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); | ||||
10298 | if (SrcEltTy.bitsLT(SmallestEltTy)) { | ||||
10299 | SmallestEltTy = SrcEltTy; | ||||
10300 | } | ||||
10301 | } | ||||
10302 | unsigned ResMultiplier = | ||||
10303 | VT.getScalarSizeInBits() / SmallestEltTy.getFixedSizeInBits(); | ||||
10304 | uint64_t VTSize = VT.getFixedSizeInBits(); | ||||
10305 | NumElts = VTSize / SmallestEltTy.getFixedSizeInBits(); | ||||
10306 | EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts); | ||||
10307 | |||||
10308 | // If the source vector is too wide or too narrow, we may nevertheless be able | ||||
10309 | // to construct a compatible shuffle either by concatenating it with UNDEF or | ||||
10310 | // extracting a suitable range of elements. | ||||
10311 | for (auto &Src : Sources) { | ||||
10312 | EVT SrcVT = Src.ShuffleVec.getValueType(); | ||||
10313 | |||||
10314 | TypeSize SrcVTSize = SrcVT.getSizeInBits(); | ||||
10315 | if (SrcVTSize == TypeSize::Fixed(VTSize)) | ||||
10316 | continue; | ||||
10317 | |||||
10318 | // This stage of the search produces a source with the same element type as | ||||
10319 | // the original, but with a total width matching the BUILD_VECTOR output. | ||||
10320 | EVT EltVT = SrcVT.getVectorElementType(); | ||||
10321 | unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits(); | ||||
10322 | EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); | ||||
10323 | |||||
10324 | if (SrcVTSize.getFixedValue() < VTSize) { | ||||
10325 | assert(2 * SrcVTSize == VTSize)(static_cast <bool> (2 * SrcVTSize == VTSize) ? void (0 ) : __assert_fail ("2 * SrcVTSize == VTSize", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 10325, __extension__ __PRETTY_FUNCTION__)); | ||||
10326 | // We can pad out the smaller vector for free, so if it's part of a | ||||
10327 | // shuffle... | ||||
10328 | Src.ShuffleVec = | ||||
10329 | DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec, | ||||
10330 | DAG.getUNDEF(Src.ShuffleVec.getValueType())); | ||||
10331 | continue; | ||||
10332 | } | ||||
10333 | |||||
10334 | if (SrcVTSize.getFixedValue() != 2 * VTSize) { | ||||
10335 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: result vector too small to extract\n" ; } } while (false) | ||||
10336 | dbgs() << "Reshuffle failed: result vector too small to extract\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: result vector too small to extract\n" ; } } while (false); | ||||
10337 | return SDValue(); | ||||
10338 | } | ||||
10339 | |||||
10340 | if (Src.MaxElt - Src.MinElt >= NumSrcElts) { | ||||
10341 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: span too large for a VEXT to cope\n" ; } } while (false) | ||||
10342 | dbgs() << "Reshuffle failed: span too large for a VEXT to cope\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: span too large for a VEXT to cope\n" ; } } while (false); | ||||
10343 | return SDValue(); | ||||
10344 | } | ||||
10345 | |||||
10346 | if (Src.MinElt >= NumSrcElts) { | ||||
10347 | // The extraction can just take the second half | ||||
10348 | Src.ShuffleVec = | ||||
10349 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, | ||||
10350 | DAG.getConstant(NumSrcElts, dl, MVT::i64)); | ||||
10351 | Src.WindowBase = -NumSrcElts; | ||||
10352 | } else if (Src.MaxElt < NumSrcElts) { | ||||
10353 | // The extraction can just take the first half | ||||
10354 | Src.ShuffleVec = | ||||
10355 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, | ||||
10356 | DAG.getConstant(0, dl, MVT::i64)); | ||||
10357 | } else { | ||||
10358 | // An actual VEXT is needed | ||||
10359 | SDValue VEXTSrc1 = | ||||
10360 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, | ||||
10361 | DAG.getConstant(0, dl, MVT::i64)); | ||||
10362 | SDValue VEXTSrc2 = | ||||
10363 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, | ||||
10364 | DAG.getConstant(NumSrcElts, dl, MVT::i64)); | ||||
10365 | unsigned Imm = Src.MinElt * getExtFactor(VEXTSrc1); | ||||
10366 | |||||
10367 | if (!SrcVT.is64BitVector()) { | ||||
10368 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: don't know how to lower AArch64ISD::EXT " "for SVE vectors."; } } while (false) | ||||
10369 | dbgs() << "Reshuffle failed: don't know how to lower AArch64ISD::EXT "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: don't know how to lower AArch64ISD::EXT " "for SVE vectors."; } } while (false) | ||||
10370 | "for SVE vectors.")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: don't know how to lower AArch64ISD::EXT " "for SVE vectors."; } } while (false); | ||||
10371 | return SDValue(); | ||||
10372 | } | ||||
10373 | |||||
10374 | Src.ShuffleVec = DAG.getNode(AArch64ISD::EXT, dl, DestVT, VEXTSrc1, | ||||
10375 | VEXTSrc2, | ||||
10376 | DAG.getConstant(Imm, dl, MVT::i32)); | ||||
10377 | Src.WindowBase = -Src.MinElt; | ||||
10378 | } | ||||
10379 | } | ||||
10380 | |||||
10381 | // Another possible incompatibility occurs from the vector element types. We | ||||
10382 | // can fix this by bitcasting the source vectors to the same type we intend | ||||
10383 | // for the shuffle. | ||||
10384 | for (auto &Src : Sources) { | ||||
10385 | EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); | ||||
10386 | if (SrcEltTy == SmallestEltTy) | ||||
10387 | continue; | ||||
10388 | assert(ShuffleVT.getVectorElementType() == SmallestEltTy)(static_cast <bool> (ShuffleVT.getVectorElementType() == SmallestEltTy) ? void (0) : __assert_fail ("ShuffleVT.getVectorElementType() == SmallestEltTy" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10388, __extension__ __PRETTY_FUNCTION__)); | ||||
10389 | Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec); | ||||
10390 | Src.WindowScale = | ||||
10391 | SrcEltTy.getFixedSizeInBits() / SmallestEltTy.getFixedSizeInBits(); | ||||
10392 | Src.WindowBase *= Src.WindowScale; | ||||
10393 | } | ||||
10394 | |||||
10395 | // Final check before we try to actually produce a shuffle. | ||||
10396 | LLVM_DEBUG(for (auto Srcdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { for (auto Src : Sources) (static_cast < bool> (Src.ShuffleVec.getValueType() == ShuffleVT) ? void ( 0) : __assert_fail ("Src.ShuffleVec.getValueType() == ShuffleVT" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10398, __extension__ __PRETTY_FUNCTION__));; } } while (false) | ||||
10397 | : Sources)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { for (auto Src : Sources) (static_cast < bool> (Src.ShuffleVec.getValueType() == ShuffleVT) ? void ( 0) : __assert_fail ("Src.ShuffleVec.getValueType() == ShuffleVT" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10398, __extension__ __PRETTY_FUNCTION__));; } } while (false) | ||||
10398 | assert(Src.ShuffleVec.getValueType() == ShuffleVT);)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { for (auto Src : Sources) (static_cast < bool> (Src.ShuffleVec.getValueType() == ShuffleVT) ? void ( 0) : __assert_fail ("Src.ShuffleVec.getValueType() == ShuffleVT" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10398, __extension__ __PRETTY_FUNCTION__));; } } while (false); | ||||
10399 | |||||
10400 | // The stars all align, our next step is to produce the mask for the shuffle. | ||||
10401 | SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); | ||||
10402 | int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); | ||||
10403 | for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { | ||||
10404 | SDValue Entry = Op.getOperand(i); | ||||
10405 | if (Entry.isUndef()) | ||||
10406 | continue; | ||||
10407 | |||||
10408 | auto Src = find(Sources, Entry.getOperand(0)); | ||||
10409 | int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue(); | ||||
10410 | |||||
10411 | // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit | ||||
10412 | // trunc. So only std::min(SrcBits, DestBits) actually get defined in this | ||||
10413 | // segment. | ||||
10414 | EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType(); | ||||
10415 | int BitsDefined = std::min(OrigEltTy.getScalarSizeInBits(), | ||||
10416 | VT.getScalarSizeInBits()); | ||||
10417 | int LanesDefined = BitsDefined / BitsPerShuffleLane; | ||||
10418 | |||||
10419 | // This source is expected to fill ResMultiplier lanes of the final shuffle, | ||||
10420 | // starting at the appropriate offset. | ||||
10421 | int *LaneMask = &Mask[i * ResMultiplier]; | ||||
10422 | |||||
10423 | int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase; | ||||
10424 | ExtractBase += NumElts * (Src - Sources.begin()); | ||||
10425 | for (int j = 0; j < LanesDefined; ++j) | ||||
10426 | LaneMask[j] = ExtractBase + j; | ||||
10427 | } | ||||
10428 | |||||
10429 | // Final check before we try to produce nonsense... | ||||
10430 | if (!isShuffleMaskLegal(Mask, ShuffleVT)) { | ||||
10431 | LLVM_DEBUG(dbgs() << "Reshuffle failed: illegal shuffle mask\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle failed: illegal shuffle mask\n" ; } } while (false); | ||||
10432 | return SDValue(); | ||||
10433 | } | ||||
10434 | |||||
10435 | SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) }; | ||||
10436 | for (unsigned i = 0; i < Sources.size(); ++i) | ||||
10437 | ShuffleOps[i] = Sources[i].ShuffleVec; | ||||
10438 | |||||
10439 | SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0], | ||||
10440 | ShuffleOps[1], Mask); | ||||
10441 | SDValue V = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); | ||||
10442 | |||||
10443 | LLVM_DEBUG(dbgs() << "Reshuffle, creating node: "; Shuffle.dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle, creating node: " ; Shuffle.dump(); dbgs() << "Reshuffle, creating node: " ; V.dump();; } } while (false) | ||||
10444 | dbgs() << "Reshuffle, creating node: "; V.dump();)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Reshuffle, creating node: " ; Shuffle.dump(); dbgs() << "Reshuffle, creating node: " ; V.dump();; } } while (false); | ||||
10445 | |||||
10446 | return V; | ||||
10447 | } | ||||
10448 | |||||
10449 | // check if an EXT instruction can handle the shuffle mask when the | ||||
10450 | // vector sources of the shuffle are the same. | ||||
10451 | static bool isSingletonEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { | ||||
10452 | unsigned NumElts = VT.getVectorNumElements(); | ||||
10453 | |||||
10454 | // Assume that the first shuffle index is not UNDEF. Fail if it is. | ||||
10455 | if (M[0] < 0) | ||||
10456 | return false; | ||||
10457 | |||||
10458 | Imm = M[0]; | ||||
10459 | |||||
10460 | // If this is a VEXT shuffle, the immediate value is the index of the first | ||||
10461 | // element. The other shuffle indices must be the successive elements after | ||||
10462 | // the first one. | ||||
10463 | unsigned ExpectedElt = Imm; | ||||
10464 | for (unsigned i = 1; i < NumElts; ++i) { | ||||
10465 | // Increment the expected index. If it wraps around, just follow it | ||||
10466 | // back to index zero and keep going. | ||||
10467 | ++ExpectedElt; | ||||
10468 | if (ExpectedElt == NumElts) | ||||
10469 | ExpectedElt = 0; | ||||
10470 | |||||
10471 | if (M[i] < 0) | ||||
10472 | continue; // ignore UNDEF indices | ||||
10473 | if (ExpectedElt != static_cast<unsigned>(M[i])) | ||||
10474 | return false; | ||||
10475 | } | ||||
10476 | |||||
10477 | return true; | ||||
10478 | } | ||||
10479 | |||||
10480 | // Detect patterns of a0,a1,a2,a3,b0,b1,b2,b3,c0,c1,c2,c3,d0,d1,d2,d3 from | ||||
10481 | // v4i32s. This is really a truncate, which we can construct out of (legal) | ||||
10482 | // concats and truncate nodes. | ||||
10483 | static SDValue ReconstructTruncateFromBuildVector(SDValue V, SelectionDAG &DAG) { | ||||
10484 | if (V.getValueType() != MVT::v16i8) | ||||
10485 | return SDValue(); | ||||
10486 | assert(V.getNumOperands() == 16 && "Expected 16 operands on the BUILDVECTOR")(static_cast <bool> (V.getNumOperands() == 16 && "Expected 16 operands on the BUILDVECTOR") ? void (0) : __assert_fail ("V.getNumOperands() == 16 && \"Expected 16 operands on the BUILDVECTOR\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10486, __extension__ __PRETTY_FUNCTION__)); | ||||
10487 | |||||
10488 | for (unsigned X = 0; X < 4; X++) { | ||||
10489 | // Check the first item in each group is an extract from lane 0 of a v4i32 | ||||
10490 | // or v4i16. | ||||
10491 | SDValue BaseExt = V.getOperand(X * 4); | ||||
10492 | if (BaseExt.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | ||||
10493 | (BaseExt.getOperand(0).getValueType() != MVT::v4i16 && | ||||
10494 | BaseExt.getOperand(0).getValueType() != MVT::v4i32) || | ||||
10495 | !isa<ConstantSDNode>(BaseExt.getOperand(1)) || | ||||
10496 | BaseExt.getConstantOperandVal(1) != 0) | ||||
10497 | return SDValue(); | ||||
10498 | SDValue Base = BaseExt.getOperand(0); | ||||
10499 | // And check the other items are extracts from the same vector. | ||||
10500 | for (unsigned Y = 1; Y < 4; Y++) { | ||||
10501 | SDValue Ext = V.getOperand(X * 4 + Y); | ||||
10502 | if (Ext.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | ||||
10503 | Ext.getOperand(0) != Base || | ||||
10504 | !isa<ConstantSDNode>(Ext.getOperand(1)) || | ||||
10505 | Ext.getConstantOperandVal(1) != Y) | ||||
10506 | return SDValue(); | ||||
10507 | } | ||||
10508 | } | ||||
10509 | |||||
10510 | // Turn the buildvector into a series of truncates and concates, which will | ||||
10511 | // become uzip1's. Any v4i32s we found get truncated to v4i16, which are | ||||
10512 | // concat together to produce 2 v8i16. These are both truncated and concat | ||||
10513 | // together. | ||||
10514 | SDLoc DL(V); | ||||
10515 | SDValue Trunc[4] = { | ||||
10516 | V.getOperand(0).getOperand(0), V.getOperand(4).getOperand(0), | ||||
10517 | V.getOperand(8).getOperand(0), V.getOperand(12).getOperand(0)}; | ||||
10518 | for (SDValue &V : Trunc) | ||||
10519 | if (V.getValueType() == MVT::v4i32) | ||||
10520 | V = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i16, V); | ||||
10521 | SDValue Concat0 = | ||||
10522 | DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, Trunc[0], Trunc[1]); | ||||
10523 | SDValue Concat1 = | ||||
10524 | DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, Trunc[2], Trunc[3]); | ||||
10525 | SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, Concat0); | ||||
10526 | SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, Concat1); | ||||
10527 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, Trunc0, Trunc1); | ||||
10528 | } | ||||
10529 | |||||
10530 | /// Check if a vector shuffle corresponds to a DUP instructions with a larger | ||||
10531 | /// element width than the vector lane type. If that is the case the function | ||||
10532 | /// returns true and writes the value of the DUP instruction lane operand into | ||||
10533 | /// DupLaneOp | ||||
10534 | static bool isWideDUPMask(ArrayRef<int> M, EVT VT, unsigned BlockSize, | ||||
10535 | unsigned &DupLaneOp) { | ||||
10536 | assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&(static_cast <bool> ((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && "Only possible block sizes for wide DUP are: 16, 32, 64" ) ? void (0) : __assert_fail ("(BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && \"Only possible block sizes for wide DUP are: 16, 32, 64\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10537, __extension__ __PRETTY_FUNCTION__)) | ||||
10537 | "Only possible block sizes for wide DUP are: 16, 32, 64")(static_cast <bool> ((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && "Only possible block sizes for wide DUP are: 16, 32, 64" ) ? void (0) : __assert_fail ("(BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && \"Only possible block sizes for wide DUP are: 16, 32, 64\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10537, __extension__ __PRETTY_FUNCTION__)); | ||||
10538 | |||||
10539 | if (BlockSize <= VT.getScalarSizeInBits()) | ||||
10540 | return false; | ||||
10541 | if (BlockSize % VT.getScalarSizeInBits() != 0) | ||||
10542 | return false; | ||||
10543 | if (VT.getSizeInBits() % BlockSize != 0) | ||||
10544 | return false; | ||||
10545 | |||||
10546 | size_t SingleVecNumElements = VT.getVectorNumElements(); | ||||
10547 | size_t NumEltsPerBlock = BlockSize / VT.getScalarSizeInBits(); | ||||
10548 | size_t NumBlocks = VT.getSizeInBits() / BlockSize; | ||||
10549 | |||||
10550 | // We are looking for masks like | ||||
10551 | // [0, 1, 0, 1] or [2, 3, 2, 3] or [4, 5, 6, 7, 4, 5, 6, 7] where any element | ||||
10552 | // might be replaced by 'undefined'. BlockIndices will eventually contain | ||||
10553 | // lane indices of the duplicated block (i.e. [0, 1], [2, 3] and [4, 5, 6, 7] | ||||
10554 | // for the above examples) | ||||
10555 | SmallVector<int, 8> BlockElts(NumEltsPerBlock, -1); | ||||
10556 | for (size_t BlockIndex = 0; BlockIndex < NumBlocks; BlockIndex++) | ||||
10557 | for (size_t I = 0; I < NumEltsPerBlock; I++) { | ||||
10558 | int Elt = M[BlockIndex * NumEltsPerBlock + I]; | ||||
10559 | if (Elt < 0) | ||||
10560 | continue; | ||||
10561 | // For now we don't support shuffles that use the second operand | ||||
10562 | if ((unsigned)Elt >= SingleVecNumElements) | ||||
10563 | return false; | ||||
10564 | if (BlockElts[I] < 0) | ||||
10565 | BlockElts[I] = Elt; | ||||
10566 | else if (BlockElts[I] != Elt) | ||||
10567 | return false; | ||||
10568 | } | ||||
10569 | |||||
10570 | // We found a candidate block (possibly with some undefs). It must be a | ||||
10571 | // sequence of consecutive integers starting with a value divisible by | ||||
10572 | // NumEltsPerBlock with some values possibly replaced by undef-s. | ||||
10573 | |||||
10574 | // Find first non-undef element | ||||
10575 | auto FirstRealEltIter = find_if(BlockElts, [](int Elt) { return Elt >= 0; }); | ||||
10576 | assert(FirstRealEltIter != BlockElts.end() &&(static_cast <bool> (FirstRealEltIter != BlockElts.end( ) && "Shuffle with all-undefs must have been caught by previous cases, " "e.g. isSplat()") ? void (0) : __assert_fail ("FirstRealEltIter != BlockElts.end() && \"Shuffle with all-undefs must have been caught by previous cases, \" \"e.g. isSplat()\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10578, __extension__ __PRETTY_FUNCTION__)) | ||||
10577 | "Shuffle with all-undefs must have been caught by previous cases, "(static_cast <bool> (FirstRealEltIter != BlockElts.end( ) && "Shuffle with all-undefs must have been caught by previous cases, " "e.g. isSplat()") ? void (0) : __assert_fail ("FirstRealEltIter != BlockElts.end() && \"Shuffle with all-undefs must have been caught by previous cases, \" \"e.g. isSplat()\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10578, __extension__ __PRETTY_FUNCTION__)) | ||||
10578 | "e.g. isSplat()")(static_cast <bool> (FirstRealEltIter != BlockElts.end( ) && "Shuffle with all-undefs must have been caught by previous cases, " "e.g. isSplat()") ? void (0) : __assert_fail ("FirstRealEltIter != BlockElts.end() && \"Shuffle with all-undefs must have been caught by previous cases, \" \"e.g. isSplat()\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10578, __extension__ __PRETTY_FUNCTION__)); | ||||
10579 | if (FirstRealEltIter == BlockElts.end()) { | ||||
10580 | DupLaneOp = 0; | ||||
10581 | return true; | ||||
10582 | } | ||||
10583 | |||||
10584 | // Index of FirstRealElt in BlockElts | ||||
10585 | size_t FirstRealIndex = FirstRealEltIter - BlockElts.begin(); | ||||
10586 | |||||
10587 | if ((unsigned)*FirstRealEltIter < FirstRealIndex) | ||||
10588 | return false; | ||||
10589 | // BlockElts[0] must have the following value if it isn't undef: | ||||
10590 | size_t Elt0 = *FirstRealEltIter - FirstRealIndex; | ||||
10591 | |||||
10592 | // Check the first element | ||||
10593 | if (Elt0 % NumEltsPerBlock != 0) | ||||
10594 | return false; | ||||
10595 | // Check that the sequence indeed consists of consecutive integers (modulo | ||||
10596 | // undefs) | ||||
10597 | for (size_t I = 0; I < NumEltsPerBlock; I++) | ||||
10598 | if (BlockElts[I] >= 0 && (unsigned)BlockElts[I] != Elt0 + I) | ||||
10599 | return false; | ||||
10600 | |||||
10601 | DupLaneOp = Elt0 / NumEltsPerBlock; | ||||
10602 | return true; | ||||
10603 | } | ||||
10604 | |||||
10605 | // check if an EXT instruction can handle the shuffle mask when the | ||||
10606 | // vector sources of the shuffle are different. | ||||
10607 | static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT, | ||||
10608 | unsigned &Imm) { | ||||
10609 | // Look for the first non-undef element. | ||||
10610 | const int *FirstRealElt = find_if(M, [](int Elt) { return Elt >= 0; }); | ||||
10611 | |||||
10612 | // Benefit form APInt to handle overflow when calculating expected element. | ||||
10613 | unsigned NumElts = VT.getVectorNumElements(); | ||||
10614 | unsigned MaskBits = APInt(32, NumElts * 2).logBase2(); | ||||
10615 | APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1); | ||||
10616 | // The following shuffle indices must be the successive elements after the | ||||
10617 | // first real element. | ||||
10618 | bool FoundWrongElt = std::any_of(FirstRealElt + 1, M.end(), [&](int Elt) { | ||||
10619 | return Elt != ExpectedElt++ && Elt != -1; | ||||
10620 | }); | ||||
10621 | if (FoundWrongElt) | ||||
10622 | return false; | ||||
10623 | |||||
10624 | // The index of an EXT is the first element if it is not UNDEF. | ||||
10625 | // Watch out for the beginning UNDEFs. The EXT index should be the expected | ||||
10626 | // value of the first element. E.g. | ||||
10627 | // <-1, -1, 3, ...> is treated as <1, 2, 3, ...>. | ||||
10628 | // <-1, -1, 0, 1, ...> is treated as <2*NumElts-2, 2*NumElts-1, 0, 1, ...>. | ||||
10629 | // ExpectedElt is the last mask index plus 1. | ||||
10630 | Imm = ExpectedElt.getZExtValue(); | ||||
10631 | |||||
10632 | // There are two difference cases requiring to reverse input vectors. | ||||
10633 | // For example, for vector <4 x i32> we have the following cases, | ||||
10634 | // Case 1: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, -1, 0>) | ||||
10635 | // Case 2: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, 7, 0>) | ||||
10636 | // For both cases, we finally use mask <5, 6, 7, 0>, which requires | ||||
10637 | // to reverse two input vectors. | ||||
10638 | if (Imm < NumElts) | ||||
10639 | ReverseEXT = true; | ||||
10640 | else | ||||
10641 | Imm -= NumElts; | ||||
10642 | |||||
10643 | return true; | ||||
10644 | } | ||||
10645 | |||||
10646 | /// isREVMask - Check if a vector shuffle corresponds to a REV | ||||
10647 | /// instruction with the specified blocksize. (The order of the elements | ||||
10648 | /// within each block of the vector is reversed.) | ||||
10649 | static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { | ||||
10650 | assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64 ||(static_cast <bool> ((BlockSize == 16 || BlockSize == 32 || BlockSize == 64 || BlockSize == 128) && "Only possible block sizes for REV are: 16, 32, 64, 128" ) ? void (0) : __assert_fail ("(BlockSize == 16 || BlockSize == 32 || BlockSize == 64 || BlockSize == 128) && \"Only possible block sizes for REV are: 16, 32, 64, 128\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10652, __extension__ __PRETTY_FUNCTION__)) | ||||
10651 | BlockSize == 128) &&(static_cast <bool> ((BlockSize == 16 || BlockSize == 32 || BlockSize == 64 || BlockSize == 128) && "Only possible block sizes for REV are: 16, 32, 64, 128" ) ? void (0) : __assert_fail ("(BlockSize == 16 || BlockSize == 32 || BlockSize == 64 || BlockSize == 128) && \"Only possible block sizes for REV are: 16, 32, 64, 128\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10652, __extension__ __PRETTY_FUNCTION__)) | ||||
10652 | "Only possible block sizes for REV are: 16, 32, 64, 128")(static_cast <bool> ((BlockSize == 16 || BlockSize == 32 || BlockSize == 64 || BlockSize == 128) && "Only possible block sizes for REV are: 16, 32, 64, 128" ) ? void (0) : __assert_fail ("(BlockSize == 16 || BlockSize == 32 || BlockSize == 64 || BlockSize == 128) && \"Only possible block sizes for REV are: 16, 32, 64, 128\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10652, __extension__ __PRETTY_FUNCTION__)); | ||||
10653 | |||||
10654 | unsigned EltSz = VT.getScalarSizeInBits(); | ||||
10655 | unsigned NumElts = VT.getVectorNumElements(); | ||||
10656 | unsigned BlockElts = M[0] + 1; | ||||
10657 | // If the first shuffle index is UNDEF, be optimistic. | ||||
10658 | if (M[0] < 0) | ||||
10659 | BlockElts = BlockSize / EltSz; | ||||
10660 | |||||
10661 | if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) | ||||
10662 | return false; | ||||
10663 | |||||
10664 | for (unsigned i = 0; i < NumElts; ++i) { | ||||
10665 | if (M[i] < 0) | ||||
10666 | continue; // ignore UNDEF indices | ||||
10667 | if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts)) | ||||
10668 | return false; | ||||
10669 | } | ||||
10670 | |||||
10671 | return true; | ||||
10672 | } | ||||
10673 | |||||
10674 | static bool isZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { | ||||
10675 | unsigned NumElts = VT.getVectorNumElements(); | ||||
10676 | if (NumElts % 2 != 0) | ||||
10677 | return false; | ||||
10678 | WhichResult = (M[0] == 0 ? 0 : 1); | ||||
10679 | unsigned Idx = WhichResult * NumElts / 2; | ||||
10680 | for (unsigned i = 0; i != NumElts; i += 2) { | ||||
10681 | if ((M[i] >= 0 && (unsigned)M[i] != Idx) || | ||||
10682 | (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx + NumElts)) | ||||
10683 | return false; | ||||
10684 | Idx += 1; | ||||
10685 | } | ||||
10686 | |||||
10687 | return true; | ||||
10688 | } | ||||
10689 | |||||
10690 | static bool isUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { | ||||
10691 | unsigned NumElts = VT.getVectorNumElements(); | ||||
10692 | WhichResult = (M[0] == 0 ? 0 : 1); | ||||
10693 | for (unsigned i = 0; i != NumElts; ++i) { | ||||
10694 | if (M[i] < 0) | ||||
10695 | continue; // ignore UNDEF indices | ||||
10696 | if ((unsigned)M[i] != 2 * i + WhichResult) | ||||
10697 | return false; | ||||
10698 | } | ||||
10699 | |||||
10700 | return true; | ||||
10701 | } | ||||
10702 | |||||
10703 | static bool isTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { | ||||
10704 | unsigned NumElts = VT.getVectorNumElements(); | ||||
10705 | if (NumElts % 2 != 0) | ||||
10706 | return false; | ||||
10707 | WhichResult = (M[0] == 0 ? 0 : 1); | ||||
10708 | for (unsigned i = 0; i < NumElts; i += 2) { | ||||
10709 | if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) || | ||||
10710 | (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + NumElts + WhichResult)) | ||||
10711 | return false; | ||||
10712 | } | ||||
10713 | return true; | ||||
10714 | } | ||||
10715 | |||||
10716 | /// isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of | ||||
10717 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". | ||||
10718 | /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. | ||||
10719 | static bool isZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { | ||||
10720 | unsigned NumElts = VT.getVectorNumElements(); | ||||
10721 | if (NumElts % 2 != 0) | ||||
10722 | return false; | ||||
10723 | WhichResult = (M[0] == 0 ? 0 : 1); | ||||
10724 | unsigned Idx = WhichResult * NumElts / 2; | ||||
10725 | for (unsigned i = 0; i != NumElts; i += 2) { | ||||
10726 | if ((M[i] >= 0 && (unsigned)M[i] != Idx) || | ||||
10727 | (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx)) | ||||
10728 | return false; | ||||
10729 | Idx += 1; | ||||
10730 | } | ||||
10731 | |||||
10732 | return true; | ||||
10733 | } | ||||
10734 | |||||
10735 | /// isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of | ||||
10736 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". | ||||
10737 | /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, | ||||
10738 | static bool isUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { | ||||
10739 | unsigned Half = VT.getVectorNumElements() / 2; | ||||
10740 | WhichResult = (M[0] == 0 ? 0 : 1); | ||||
10741 | for (unsigned j = 0; j != 2; ++j) { | ||||
10742 | unsigned Idx = WhichResult; | ||||
10743 | for (unsigned i = 0; i != Half; ++i) { | ||||
10744 | int MIdx = M[i + j * Half]; | ||||
10745 | if (MIdx >= 0 && (unsigned)MIdx != Idx) | ||||
10746 | return false; | ||||
10747 | Idx += 2; | ||||
10748 | } | ||||
10749 | } | ||||
10750 | |||||
10751 | return true; | ||||
10752 | } | ||||
10753 | |||||
10754 | /// isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of | ||||
10755 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". | ||||
10756 | /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. | ||||
10757 | static bool isTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { | ||||
10758 | unsigned NumElts = VT.getVectorNumElements(); | ||||
10759 | if (NumElts % 2 != 0) | ||||
10760 | return false; | ||||
10761 | WhichResult = (M[0] == 0 ? 0 : 1); | ||||
10762 | for (unsigned i = 0; i < NumElts; i += 2) { | ||||
10763 | if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) || | ||||
10764 | (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + WhichResult)) | ||||
10765 | return false; | ||||
10766 | } | ||||
10767 | return true; | ||||
10768 | } | ||||
10769 | |||||
10770 | static bool isINSMask(ArrayRef<int> M, int NumInputElements, | ||||
10771 | bool &DstIsLeft, int &Anomaly) { | ||||
10772 | if (M.size() != static_cast<size_t>(NumInputElements)) | ||||
10773 | return false; | ||||
10774 | |||||
10775 | int NumLHSMatch = 0, NumRHSMatch = 0; | ||||
10776 | int LastLHSMismatch = -1, LastRHSMismatch = -1; | ||||
10777 | |||||
10778 | for (int i = 0; i < NumInputElements; ++i) { | ||||
10779 | if (M[i] == -1) { | ||||
10780 | ++NumLHSMatch; | ||||
10781 | ++NumRHSMatch; | ||||
10782 | continue; | ||||
10783 | } | ||||
10784 | |||||
10785 | if (M[i] == i) | ||||
10786 | ++NumLHSMatch; | ||||
10787 | else | ||||
10788 | LastLHSMismatch = i; | ||||
10789 | |||||
10790 | if (M[i] == i + NumInputElements) | ||||
10791 | ++NumRHSMatch; | ||||
10792 | else | ||||
10793 | LastRHSMismatch = i; | ||||
10794 | } | ||||
10795 | |||||
10796 | if (NumLHSMatch == NumInputElements - 1) { | ||||
10797 | DstIsLeft = true; | ||||
10798 | Anomaly = LastLHSMismatch; | ||||
10799 | return true; | ||||
10800 | } else if (NumRHSMatch == NumInputElements - 1) { | ||||
10801 | DstIsLeft = false; | ||||
10802 | Anomaly = LastRHSMismatch; | ||||
10803 | return true; | ||||
10804 | } | ||||
10805 | |||||
10806 | return false; | ||||
10807 | } | ||||
10808 | |||||
10809 | static bool isConcatMask(ArrayRef<int> Mask, EVT VT, bool SplitLHS) { | ||||
10810 | if (VT.getSizeInBits() != 128) | ||||
10811 | return false; | ||||
10812 | |||||
10813 | unsigned NumElts = VT.getVectorNumElements(); | ||||
10814 | |||||
10815 | for (int I = 0, E = NumElts / 2; I != E; I++) { | ||||
10816 | if (Mask[I] != I) | ||||
10817 | return false; | ||||
10818 | } | ||||
10819 | |||||
10820 | int Offset = NumElts / 2; | ||||
10821 | for (int I = NumElts / 2, E = NumElts; I != E; I++) { | ||||
10822 | if (Mask[I] != I + SplitLHS * Offset) | ||||
10823 | return false; | ||||
10824 | } | ||||
10825 | |||||
10826 | return true; | ||||
10827 | } | ||||
10828 | |||||
10829 | static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) { | ||||
10830 | SDLoc DL(Op); | ||||
10831 | EVT VT = Op.getValueType(); | ||||
10832 | SDValue V0 = Op.getOperand(0); | ||||
10833 | SDValue V1 = Op.getOperand(1); | ||||
10834 | ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask(); | ||||
10835 | |||||
10836 | if (VT.getVectorElementType() != V0.getValueType().getVectorElementType() || | ||||
10837 | VT.getVectorElementType() != V1.getValueType().getVectorElementType()) | ||||
10838 | return SDValue(); | ||||
10839 | |||||
10840 | bool SplitV0 = V0.getValueSizeInBits() == 128; | ||||
10841 | |||||
10842 | if (!isConcatMask(Mask, VT, SplitV0)) | ||||
10843 | return SDValue(); | ||||
10844 | |||||
10845 | EVT CastVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); | ||||
10846 | if (SplitV0) { | ||||
10847 | V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0, | ||||
10848 | DAG.getConstant(0, DL, MVT::i64)); | ||||
10849 | } | ||||
10850 | if (V1.getValueSizeInBits() == 128) { | ||||
10851 | V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1, | ||||
10852 | DAG.getConstant(0, DL, MVT::i64)); | ||||
10853 | } | ||||
10854 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, V0, V1); | ||||
10855 | } | ||||
10856 | |||||
10857 | /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit | ||||
10858 | /// the specified operations to build the shuffle. ID is the perfect-shuffle | ||||
10859 | //ID, V1 and V2 are the original shuffle inputs. PFEntry is the Perfect shuffle | ||||
10860 | //table entry and LHS/RHS are the immediate inputs for this stage of the | ||||
10861 | //shuffle. | ||||
10862 | static SDValue GeneratePerfectShuffle(unsigned ID, SDValue V1, | ||||
10863 | SDValue V2, unsigned PFEntry, SDValue LHS, | ||||
10864 | SDValue RHS, SelectionDAG &DAG, | ||||
10865 | const SDLoc &dl) { | ||||
10866 | unsigned OpNum = (PFEntry >> 26) & 0x0F; | ||||
10867 | unsigned LHSID = (PFEntry >> 13) & ((1 << 13) - 1); | ||||
10868 | unsigned RHSID = (PFEntry >> 0) & ((1 << 13) - 1); | ||||
10869 | |||||
10870 | enum { | ||||
10871 | OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> | ||||
10872 | OP_VREV, | ||||
10873 | OP_VDUP0, | ||||
10874 | OP_VDUP1, | ||||
10875 | OP_VDUP2, | ||||
10876 | OP_VDUP3, | ||||
10877 | OP_VEXT1, | ||||
10878 | OP_VEXT2, | ||||
10879 | OP_VEXT3, | ||||
10880 | OP_VUZPL, // VUZP, left result | ||||
10881 | OP_VUZPR, // VUZP, right result | ||||
10882 | OP_VZIPL, // VZIP, left result | ||||
10883 | OP_VZIPR, // VZIP, right result | ||||
10884 | OP_VTRNL, // VTRN, left result | ||||
10885 | OP_VTRNR, // VTRN, right result | ||||
10886 | OP_MOVLANE // Move lane. RHSID is the lane to move into | ||||
10887 | }; | ||||
10888 | |||||
10889 | if (OpNum == OP_COPY) { | ||||
10890 | if (LHSID == (1 * 9 + 2) * 9 + 3) | ||||
10891 | return LHS; | ||||
10892 | assert(LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 && "Illegal OP_COPY!")(static_cast <bool> (LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 && "Illegal OP_COPY!") ? void (0) : __assert_fail ("LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 && \"Illegal OP_COPY!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10892, __extension__ __PRETTY_FUNCTION__)); | ||||
10893 | return RHS; | ||||
10894 | } | ||||
10895 | |||||
10896 | if (OpNum == OP_MOVLANE) { | ||||
10897 | // Decompose a PerfectShuffle ID to get the Mask for lane Elt | ||||
10898 | auto getPFIDLane = [](unsigned ID, int Elt) -> int { | ||||
10899 | assert(Elt < 4 && "Expected Perfect Lanes to be less than 4")(static_cast <bool> (Elt < 4 && "Expected Perfect Lanes to be less than 4" ) ? void (0) : __assert_fail ("Elt < 4 && \"Expected Perfect Lanes to be less than 4\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10899, __extension__ __PRETTY_FUNCTION__)); | ||||
10900 | Elt = 3 - Elt; | ||||
10901 | while (Elt > 0) { | ||||
10902 | ID /= 9; | ||||
10903 | Elt--; | ||||
10904 | } | ||||
10905 | return (ID % 9 == 8) ? -1 : ID % 9; | ||||
10906 | }; | ||||
10907 | |||||
10908 | // For OP_MOVLANE shuffles, the RHSID represents the lane to move into. We | ||||
10909 | // get the lane to move from from the PFID, which is always from the | ||||
10910 | // original vectors (V1 or V2). | ||||
10911 | SDValue OpLHS = GeneratePerfectShuffle( | ||||
10912 | LHSID, V1, V2, PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); | ||||
10913 | EVT VT = OpLHS.getValueType(); | ||||
10914 | assert(RHSID < 8 && "Expected a lane index for RHSID!")(static_cast <bool> (RHSID < 8 && "Expected a lane index for RHSID!" ) ? void (0) : __assert_fail ("RHSID < 8 && \"Expected a lane index for RHSID!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10914, __extension__ __PRETTY_FUNCTION__)); | ||||
10915 | unsigned ExtLane = 0; | ||||
10916 | SDValue Input; | ||||
10917 | |||||
10918 | // OP_MOVLANE are either D movs (if bit 0x4 is set) or S movs. D movs | ||||
10919 | // convert into a higher type. | ||||
10920 | if (RHSID & 0x4) { | ||||
10921 | int MaskElt = getPFIDLane(ID, (RHSID & 0x01) << 1) >> 1; | ||||
10922 | if (MaskElt == -1) | ||||
10923 | MaskElt = (getPFIDLane(ID, ((RHSID & 0x01) << 1) + 1) - 1) >> 1; | ||||
10924 | assert(MaskElt >= 0 && "Didn't expect an undef movlane index!")(static_cast <bool> (MaskElt >= 0 && "Didn't expect an undef movlane index!" ) ? void (0) : __assert_fail ("MaskElt >= 0 && \"Didn't expect an undef movlane index!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10924, __extension__ __PRETTY_FUNCTION__)); | ||||
10925 | ExtLane = MaskElt < 2 ? MaskElt : (MaskElt - 2); | ||||
10926 | Input = MaskElt < 2 ? V1 : V2; | ||||
10927 | if (VT.getScalarSizeInBits() == 16) { | ||||
10928 | Input = DAG.getBitcast(MVT::v2f32, Input); | ||||
10929 | OpLHS = DAG.getBitcast(MVT::v2f32, OpLHS); | ||||
10930 | } else { | ||||
10931 | assert(VT.getScalarSizeInBits() == 32 &&(static_cast <bool> (VT.getScalarSizeInBits() == 32 && "Expected 16 or 32 bit shuffle elemements") ? void (0) : __assert_fail ("VT.getScalarSizeInBits() == 32 && \"Expected 16 or 32 bit shuffle elemements\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10932, __extension__ __PRETTY_FUNCTION__)) | ||||
10932 | "Expected 16 or 32 bit shuffle elemements")(static_cast <bool> (VT.getScalarSizeInBits() == 32 && "Expected 16 or 32 bit shuffle elemements") ? void (0) : __assert_fail ("VT.getScalarSizeInBits() == 32 && \"Expected 16 or 32 bit shuffle elemements\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10932, __extension__ __PRETTY_FUNCTION__)); | ||||
10933 | Input = DAG.getBitcast(MVT::v2f64, Input); | ||||
10934 | OpLHS = DAG.getBitcast(MVT::v2f64, OpLHS); | ||||
10935 | } | ||||
10936 | } else { | ||||
10937 | int MaskElt = getPFIDLane(ID, RHSID); | ||||
10938 | assert(MaskElt >= 0 && "Didn't expect an undef movlane index!")(static_cast <bool> (MaskElt >= 0 && "Didn't expect an undef movlane index!" ) ? void (0) : __assert_fail ("MaskElt >= 0 && \"Didn't expect an undef movlane index!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10938, __extension__ __PRETTY_FUNCTION__)); | ||||
10939 | ExtLane = MaskElt < 4 ? MaskElt : (MaskElt - 4); | ||||
10940 | Input = MaskElt < 4 ? V1 : V2; | ||||
10941 | // Be careful about creating illegal types. Use f16 instead of i16. | ||||
10942 | if (VT == MVT::v4i16) { | ||||
10943 | Input = DAG.getBitcast(MVT::v4f16, Input); | ||||
10944 | OpLHS = DAG.getBitcast(MVT::v4f16, OpLHS); | ||||
10945 | } | ||||
10946 | } | ||||
10947 | SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, | ||||
10948 | Input.getValueType().getVectorElementType(), | ||||
10949 | Input, DAG.getVectorIdxConstant(ExtLane, dl)); | ||||
10950 | SDValue Ins = | ||||
10951 | DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, Input.getValueType(), OpLHS, | ||||
10952 | Ext, DAG.getVectorIdxConstant(RHSID & 0x3, dl)); | ||||
10953 | return DAG.getBitcast(VT, Ins); | ||||
10954 | } | ||||
10955 | |||||
10956 | SDValue OpLHS, OpRHS; | ||||
10957 | OpLHS = GeneratePerfectShuffle(LHSID, V1, V2, PerfectShuffleTable[LHSID], LHS, | ||||
10958 | RHS, DAG, dl); | ||||
10959 | OpRHS = GeneratePerfectShuffle(RHSID, V1, V2, PerfectShuffleTable[RHSID], LHS, | ||||
10960 | RHS, DAG, dl); | ||||
10961 | EVT VT = OpLHS.getValueType(); | ||||
10962 | |||||
10963 | switch (OpNum) { | ||||
10964 | default: | ||||
10965 | llvm_unreachable("Unknown shuffle opcode!")::llvm::llvm_unreachable_internal("Unknown shuffle opcode!", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 10965); | ||||
10966 | case OP_VREV: | ||||
10967 | // VREV divides the vector in half and swaps within the half. | ||||
10968 | if (VT.getVectorElementType() == MVT::i32 || | ||||
10969 | VT.getVectorElementType() == MVT::f32) | ||||
10970 | return DAG.getNode(AArch64ISD::REV64, dl, VT, OpLHS); | ||||
10971 | // vrev <4 x i16> -> REV32 | ||||
10972 | if (VT.getVectorElementType() == MVT::i16 || | ||||
10973 | VT.getVectorElementType() == MVT::f16 || | ||||
10974 | VT.getVectorElementType() == MVT::bf16) | ||||
10975 | return DAG.getNode(AArch64ISD::REV32, dl, VT, OpLHS); | ||||
10976 | // vrev <4 x i8> -> REV16 | ||||
10977 | assert(VT.getVectorElementType() == MVT::i8)(static_cast <bool> (VT.getVectorElementType() == MVT:: i8) ? void (0) : __assert_fail ("VT.getVectorElementType() == MVT::i8" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10977, __extension__ __PRETTY_FUNCTION__)); | ||||
10978 | return DAG.getNode(AArch64ISD::REV16, dl, VT, OpLHS); | ||||
10979 | case OP_VDUP0: | ||||
10980 | case OP_VDUP1: | ||||
10981 | case OP_VDUP2: | ||||
10982 | case OP_VDUP3: { | ||||
10983 | EVT EltTy = VT.getVectorElementType(); | ||||
10984 | unsigned Opcode; | ||||
10985 | if (EltTy == MVT::i8) | ||||
10986 | Opcode = AArch64ISD::DUPLANE8; | ||||
10987 | else if (EltTy == MVT::i16 || EltTy == MVT::f16 || EltTy == MVT::bf16) | ||||
10988 | Opcode = AArch64ISD::DUPLANE16; | ||||
10989 | else if (EltTy == MVT::i32 || EltTy == MVT::f32) | ||||
10990 | Opcode = AArch64ISD::DUPLANE32; | ||||
10991 | else if (EltTy == MVT::i64 || EltTy == MVT::f64) | ||||
10992 | Opcode = AArch64ISD::DUPLANE64; | ||||
10993 | else | ||||
10994 | llvm_unreachable("Invalid vector element type?")::llvm::llvm_unreachable_internal("Invalid vector element type?" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 10994); | ||||
10995 | |||||
10996 | if (VT.getSizeInBits() == 64) | ||||
10997 | OpLHS = WidenVector(OpLHS, DAG); | ||||
10998 | SDValue Lane = DAG.getConstant(OpNum - OP_VDUP0, dl, MVT::i64); | ||||
10999 | return DAG.getNode(Opcode, dl, VT, OpLHS, Lane); | ||||
11000 | } | ||||
11001 | case OP_VEXT1: | ||||
11002 | case OP_VEXT2: | ||||
11003 | case OP_VEXT3: { | ||||
11004 | unsigned Imm = (OpNum - OP_VEXT1 + 1) * getExtFactor(OpLHS); | ||||
11005 | return DAG.getNode(AArch64ISD::EXT, dl, VT, OpLHS, OpRHS, | ||||
11006 | DAG.getConstant(Imm, dl, MVT::i32)); | ||||
11007 | } | ||||
11008 | case OP_VUZPL: | ||||
11009 | return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), OpLHS, | ||||
11010 | OpRHS); | ||||
11011 | case OP_VUZPR: | ||||
11012 | return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), OpLHS, | ||||
11013 | OpRHS); | ||||
11014 | case OP_VZIPL: | ||||
11015 | return DAG.getNode(AArch64ISD::ZIP1, dl, DAG.getVTList(VT, VT), OpLHS, | ||||
11016 | OpRHS); | ||||
11017 | case OP_VZIPR: | ||||
11018 | return DAG.getNode(AArch64ISD::ZIP2, dl, DAG.getVTList(VT, VT), OpLHS, | ||||
11019 | OpRHS); | ||||
11020 | case OP_VTRNL: | ||||
11021 | return DAG.getNode(AArch64ISD::TRN1, dl, DAG.getVTList(VT, VT), OpLHS, | ||||
11022 | OpRHS); | ||||
11023 | case OP_VTRNR: | ||||
11024 | return DAG.getNode(AArch64ISD::TRN2, dl, DAG.getVTList(VT, VT), OpLHS, | ||||
11025 | OpRHS); | ||||
11026 | } | ||||
11027 | } | ||||
11028 | |||||
11029 | static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask, | ||||
11030 | SelectionDAG &DAG) { | ||||
11031 | // Check to see if we can use the TBL instruction. | ||||
11032 | SDValue V1 = Op.getOperand(0); | ||||
11033 | SDValue V2 = Op.getOperand(1); | ||||
11034 | SDLoc DL(Op); | ||||
11035 | |||||
11036 | EVT EltVT = Op.getValueType().getVectorElementType(); | ||||
11037 | unsigned BytesPerElt = EltVT.getSizeInBits() / 8; | ||||
11038 | |||||
11039 | bool Swap = false; | ||||
11040 | if (V1.isUndef() || isZerosVector(V1.getNode())) { | ||||
11041 | std::swap(V1, V2); | ||||
11042 | Swap = true; | ||||
11043 | } | ||||
11044 | |||||
11045 | // If the V2 source is undef or zero then we can use a tbl1, as tbl1 will fill | ||||
11046 | // out of range values with 0s. We do need to make sure that any out-of-range | ||||
11047 | // values are really out-of-range for a v16i8 vector. | ||||
11048 | bool IsUndefOrZero = V2.isUndef() || isZerosVector(V2.getNode()); | ||||
11049 | MVT IndexVT = MVT::v8i8; | ||||
11050 | unsigned IndexLen = 8; | ||||
11051 | if (Op.getValueSizeInBits() == 128) { | ||||
11052 | IndexVT = MVT::v16i8; | ||||
11053 | IndexLen = 16; | ||||
11054 | } | ||||
11055 | |||||
11056 | SmallVector<SDValue, 8> TBLMask; | ||||
11057 | for (int Val : ShuffleMask) { | ||||
11058 | for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) { | ||||
11059 | unsigned Offset = Byte + Val * BytesPerElt; | ||||
11060 | if (Swap) | ||||
11061 | Offset = Offset < IndexLen ? Offset + IndexLen : Offset - IndexLen; | ||||
11062 | if (IsUndefOrZero && Offset >= IndexLen) | ||||
11063 | Offset = 255; | ||||
11064 | TBLMask.push_back(DAG.getConstant(Offset, DL, MVT::i32)); | ||||
11065 | } | ||||
11066 | } | ||||
11067 | |||||
11068 | SDValue V1Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V1); | ||||
11069 | SDValue V2Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V2); | ||||
11070 | |||||
11071 | SDValue Shuffle; | ||||
11072 | if (IsUndefOrZero) { | ||||
11073 | if (IndexLen == 8) | ||||
11074 | V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V1Cst); | ||||
11075 | Shuffle = DAG.getNode( | ||||
11076 | ISD::INTRINSIC_WO_CHAIN, DL, IndexVT, | ||||
11077 | DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst, | ||||
11078 | DAG.getBuildVector(IndexVT, DL, | ||||
11079 | makeArrayRef(TBLMask.data(), IndexLen))); | ||||
11080 | } else { | ||||
11081 | if (IndexLen == 8) { | ||||
11082 | V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V2Cst); | ||||
11083 | Shuffle = DAG.getNode( | ||||
11084 | ISD::INTRINSIC_WO_CHAIN, DL, IndexVT, | ||||
11085 | DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst, | ||||
11086 | DAG.getBuildVector(IndexVT, DL, | ||||
11087 | makeArrayRef(TBLMask.data(), IndexLen))); | ||||
11088 | } else { | ||||
11089 | // FIXME: We cannot, for the moment, emit a TBL2 instruction because we | ||||
11090 | // cannot currently represent the register constraints on the input | ||||
11091 | // table registers. | ||||
11092 | // Shuffle = DAG.getNode(AArch64ISD::TBL2, DL, IndexVT, V1Cst, V2Cst, | ||||
11093 | // DAG.getBuildVector(IndexVT, DL, &TBLMask[0], | ||||
11094 | // IndexLen)); | ||||
11095 | Shuffle = DAG.getNode( | ||||
11096 | ISD::INTRINSIC_WO_CHAIN, DL, IndexVT, | ||||
11097 | DAG.getConstant(Intrinsic::aarch64_neon_tbl2, DL, MVT::i32), V1Cst, | ||||
11098 | V2Cst, DAG.getBuildVector(IndexVT, DL, | ||||
11099 | makeArrayRef(TBLMask.data(), IndexLen))); | ||||
11100 | } | ||||
11101 | } | ||||
11102 | return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Shuffle); | ||||
11103 | } | ||||
11104 | |||||
11105 | static unsigned getDUPLANEOp(EVT EltType) { | ||||
11106 | if (EltType == MVT::i8) | ||||
11107 | return AArch64ISD::DUPLANE8; | ||||
11108 | if (EltType == MVT::i16 || EltType == MVT::f16 || EltType == MVT::bf16) | ||||
11109 | return AArch64ISD::DUPLANE16; | ||||
11110 | if (EltType == MVT::i32 || EltType == MVT::f32) | ||||
11111 | return AArch64ISD::DUPLANE32; | ||||
11112 | if (EltType == MVT::i64 || EltType == MVT::f64) | ||||
11113 | return AArch64ISD::DUPLANE64; | ||||
11114 | |||||
11115 | llvm_unreachable("Invalid vector element type?")::llvm::llvm_unreachable_internal("Invalid vector element type?" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 11115); | ||||
11116 | } | ||||
11117 | |||||
11118 | static SDValue constructDup(SDValue V, int Lane, SDLoc dl, EVT VT, | ||||
11119 | unsigned Opcode, SelectionDAG &DAG) { | ||||
11120 | // Try to eliminate a bitcasted extract subvector before a DUPLANE. | ||||
11121 | auto getScaledOffsetDup = [](SDValue BitCast, int &LaneC, MVT &CastVT) { | ||||
11122 | // Match: dup (bitcast (extract_subv X, C)), LaneC | ||||
11123 | if (BitCast.getOpcode() != ISD::BITCAST || | ||||
11124 | BitCast.getOperand(0).getOpcode() != ISD::EXTRACT_SUBVECTOR) | ||||
11125 | return false; | ||||
11126 | |||||
11127 | // The extract index must align in the destination type. That may not | ||||
11128 | // happen if the bitcast is from narrow to wide type. | ||||
11129 | SDValue Extract = BitCast.getOperand(0); | ||||
11130 | unsigned ExtIdx = Extract.getConstantOperandVal(1); | ||||
11131 | unsigned SrcEltBitWidth = Extract.getScalarValueSizeInBits(); | ||||
11132 | unsigned ExtIdxInBits = ExtIdx * SrcEltBitWidth; | ||||
11133 | unsigned CastedEltBitWidth = BitCast.getScalarValueSizeInBits(); | ||||
11134 | if (ExtIdxInBits % CastedEltBitWidth != 0) | ||||
11135 | return false; | ||||
11136 | |||||
11137 | // Can't handle cases where vector size is not 128-bit | ||||
11138 | if (!Extract.getOperand(0).getValueType().is128BitVector()) | ||||
11139 | return false; | ||||
11140 | |||||
11141 | // Update the lane value by offsetting with the scaled extract index. | ||||
11142 | LaneC += ExtIdxInBits / CastedEltBitWidth; | ||||
11143 | |||||
11144 | // Determine the casted vector type of the wide vector input. | ||||
11145 | // dup (bitcast (extract_subv X, C)), LaneC --> dup (bitcast X), LaneC' | ||||
11146 | // Examples: | ||||
11147 | // dup (bitcast (extract_subv v2f64 X, 1) to v2f32), 1 --> dup v4f32 X, 3 | ||||
11148 | // dup (bitcast (extract_subv v16i8 X, 8) to v4i16), 1 --> dup v8i16 X, 5 | ||||
11149 | unsigned SrcVecNumElts = | ||||
11150 | Extract.getOperand(0).getValueSizeInBits() / CastedEltBitWidth; | ||||
11151 | CastVT = MVT::getVectorVT(BitCast.getSimpleValueType().getScalarType(), | ||||
11152 | SrcVecNumElts); | ||||
11153 | return true; | ||||
11154 | }; | ||||
11155 | MVT CastVT; | ||||
11156 | if (getScaledOffsetDup(V, Lane, CastVT)) { | ||||
11157 | V = DAG.getBitcast(CastVT, V.getOperand(0).getOperand(0)); | ||||
11158 | } else if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR && | ||||
11159 | V.getOperand(0).getValueType().is128BitVector()) { | ||||
11160 | // The lane is incremented by the index of the extract. | ||||
11161 | // Example: dup v2f32 (extract v4f32 X, 2), 1 --> dup v4f32 X, 3 | ||||
11162 | Lane += V.getConstantOperandVal(1); | ||||
11163 | V = V.getOperand(0); | ||||
11164 | } else if (V.getOpcode() == ISD::CONCAT_VECTORS) { | ||||
11165 | // The lane is decremented if we are splatting from the 2nd operand. | ||||
11166 | // Example: dup v4i32 (concat v2i32 X, v2i32 Y), 3 --> dup v4i32 Y, 1 | ||||
11167 | unsigned Idx = Lane >= (int)VT.getVectorNumElements() / 2; | ||||
11168 | Lane -= Idx * VT.getVectorNumElements() / 2; | ||||
11169 | V = WidenVector(V.getOperand(Idx), DAG); | ||||
11170 | } else if (VT.getSizeInBits() == 64) { | ||||
11171 | // Widen the operand to 128-bit register with undef. | ||||
11172 | V = WidenVector(V, DAG); | ||||
11173 | } | ||||
11174 | return DAG.getNode(Opcode, dl, VT, V, DAG.getConstant(Lane, dl, MVT::i64)); | ||||
11175 | } | ||||
11176 | |||||
11177 | // Return true if we can get a new shuffle mask by checking the parameter mask | ||||
11178 | // array to test whether every two adjacent mask values are continuous and | ||||
11179 | // starting from an even number. | ||||
11180 | static bool isWideTypeMask(ArrayRef<int> M, EVT VT, | ||||
11181 | SmallVectorImpl<int> &NewMask) { | ||||
11182 | unsigned NumElts = VT.getVectorNumElements(); | ||||
11183 | if (NumElts % 2 != 0) | ||||
11184 | return false; | ||||
11185 | |||||
11186 | NewMask.clear(); | ||||
11187 | for (unsigned i = 0; i < NumElts; i += 2) { | ||||
11188 | int M0 = M[i]; | ||||
11189 | int M1 = M[i + 1]; | ||||
11190 | |||||
11191 | // If both elements are undef, new mask is undef too. | ||||
11192 | if (M0 == -1 && M1 == -1) { | ||||
11193 | NewMask.push_back(-1); | ||||
11194 | continue; | ||||
11195 | } | ||||
11196 | |||||
11197 | if (M0 == -1 && M1 != -1 && (M1 % 2) == 1) { | ||||
11198 | NewMask.push_back(M1 / 2); | ||||
11199 | continue; | ||||
11200 | } | ||||
11201 | |||||
11202 | if (M0 != -1 && (M0 % 2) == 0 && ((M0 + 1) == M1 || M1 == -1)) { | ||||
11203 | NewMask.push_back(M0 / 2); | ||||
11204 | continue; | ||||
11205 | } | ||||
11206 | |||||
11207 | NewMask.clear(); | ||||
11208 | return false; | ||||
11209 | } | ||||
11210 | |||||
11211 | assert(NewMask.size() == NumElts / 2 && "Incorrect size for mask!")(static_cast <bool> (NewMask.size() == NumElts / 2 && "Incorrect size for mask!") ? void (0) : __assert_fail ("NewMask.size() == NumElts / 2 && \"Incorrect size for mask!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 11211, __extension__ __PRETTY_FUNCTION__)); | ||||
11212 | return true; | ||||
11213 | } | ||||
11214 | |||||
11215 | // Try to widen element type to get a new mask value for a better permutation | ||||
11216 | // sequence, so that we can use NEON shuffle instructions, such as zip1/2, | ||||
11217 | // UZP1/2, TRN1/2, REV, INS, etc. | ||||
11218 | // For example: | ||||
11219 | // shufflevector <4 x i32> %a, <4 x i32> %b, | ||||
11220 | // <4 x i32> <i32 6, i32 7, i32 2, i32 3> | ||||
11221 | // is equivalent to: | ||||
11222 | // shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 1> | ||||
11223 | // Finally, we can get: | ||||
11224 | // mov v0.d[0], v1.d[1] | ||||
11225 | static SDValue tryWidenMaskForShuffle(SDValue Op, SelectionDAG &DAG) { | ||||
11226 | SDLoc DL(Op); | ||||
11227 | EVT VT = Op.getValueType(); | ||||
11228 | EVT ScalarVT = VT.getVectorElementType(); | ||||
11229 | unsigned ElementSize = ScalarVT.getFixedSizeInBits(); | ||||
11230 | SDValue V0 = Op.getOperand(0); | ||||
11231 | SDValue V1 = Op.getOperand(1); | ||||
11232 | ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask(); | ||||
11233 | |||||
11234 | // If combining adjacent elements, like two i16's -> i32, two i32's -> i64 ... | ||||
11235 | // We need to make sure the wider element type is legal. Thus, ElementSize | ||||
11236 | // should be not larger than 32 bits, and i1 type should also be excluded. | ||||
11237 | if (ElementSize > 32 || ElementSize == 1) | ||||
11238 | return SDValue(); | ||||
11239 | |||||
11240 | SmallVector<int, 8> NewMask; | ||||
11241 | if (isWideTypeMask(Mask, VT, NewMask)) { | ||||
11242 | MVT NewEltVT = VT.isFloatingPoint() | ||||
11243 | ? MVT::getFloatingPointVT(ElementSize * 2) | ||||
11244 | : MVT::getIntegerVT(ElementSize * 2); | ||||
11245 | MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2); | ||||
11246 | if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) { | ||||
11247 | V0 = DAG.getBitcast(NewVT, V0); | ||||
11248 | V1 = DAG.getBitcast(NewVT, V1); | ||||
11249 | return DAG.getBitcast(VT, | ||||
11250 | DAG.getVectorShuffle(NewVT, DL, V0, V1, NewMask)); | ||||
11251 | } | ||||
11252 | } | ||||
11253 | |||||
11254 | return SDValue(); | ||||
11255 | } | ||||
11256 | |||||
11257 | // Try to fold shuffle (tbl2, tbl2) into a single tbl4. | ||||
11258 | static SDValue tryToConvertShuffleOfTbl2ToTbl4(SDValue Op, | ||||
11259 | ArrayRef<int> ShuffleMask, | ||||
11260 | SelectionDAG &DAG) { | ||||
11261 | SDValue Tbl1 = Op->getOperand(0); | ||||
11262 | SDValue Tbl2 = Op->getOperand(1); | ||||
11263 | SDLoc dl(Op); | ||||
11264 | SDValue Tbl2ID = | ||||
11265 | DAG.getTargetConstant(Intrinsic::aarch64_neon_tbl2, dl, MVT::i64); | ||||
11266 | |||||
11267 | EVT VT = Op.getValueType(); | ||||
11268 | if (Tbl1->getOpcode() != ISD::INTRINSIC_WO_CHAIN || | ||||
11269 | Tbl1->getOperand(0) != Tbl2ID || | ||||
11270 | Tbl2->getOpcode() != ISD::INTRINSIC_WO_CHAIN || | ||||
11271 | Tbl2->getOperand(0) != Tbl2ID) | ||||
11272 | return SDValue(); | ||||
11273 | |||||
11274 | if (Tbl1->getValueType(0) != MVT::v16i8 || | ||||
11275 | Tbl2->getValueType(0) != MVT::v16i8) | ||||
11276 | return SDValue(); | ||||
11277 | |||||
11278 | SDValue Mask1 = Tbl1->getOperand(3); | ||||
11279 | SDValue Mask2 = Tbl2->getOperand(3); | ||||
11280 | SmallVector<SDValue, 16> TBLMaskParts(16, SDValue()); | ||||
11281 | for (unsigned I = 0; I < 16; I++) { | ||||
11282 | if (ShuffleMask[I] < 16) | ||||
11283 | TBLMaskParts[I] = Mask1->getOperand(ShuffleMask[I]); | ||||
11284 | else { | ||||
11285 | auto *C = | ||||
11286 | dyn_cast<ConstantSDNode>(Mask2->getOperand(ShuffleMask[I] - 16)); | ||||
11287 | if (!C) | ||||
11288 | return SDValue(); | ||||
11289 | TBLMaskParts[I] = DAG.getConstant(C->getSExtValue() + 32, dl, MVT::i32); | ||||
11290 | } | ||||
11291 | } | ||||
11292 | |||||
11293 | SDValue TBLMask = DAG.getBuildVector(VT, dl, TBLMaskParts); | ||||
11294 | SDValue ID = | ||||
11295 | DAG.getTargetConstant(Intrinsic::aarch64_neon_tbl4, dl, MVT::i64); | ||||
11296 | |||||
11297 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v16i8, | ||||
11298 | {ID, Tbl1->getOperand(1), Tbl1->getOperand(2), | ||||
11299 | Tbl2->getOperand(1), Tbl2->getOperand(2), TBLMask}); | ||||
11300 | } | ||||
11301 | |||||
11302 | SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, | ||||
11303 | SelectionDAG &DAG) const { | ||||
11304 | SDLoc dl(Op); | ||||
11305 | EVT VT = Op.getValueType(); | ||||
11306 | |||||
11307 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); | ||||
11308 | |||||
11309 | if (useSVEForFixedLengthVectorVT(VT, | ||||
11310 | Subtarget->forceStreamingCompatibleSVE())) | ||||
11311 | return LowerFixedLengthVECTOR_SHUFFLEToSVE(Op, DAG); | ||||
11312 | |||||
11313 | // Convert shuffles that are directly supported on NEON to target-specific | ||||
11314 | // DAG nodes, instead of keeping them as shuffles and matching them again | ||||
11315 | // during code selection. This is more efficient and avoids the possibility | ||||
11316 | // of inconsistencies between legalization and selection. | ||||
11317 | ArrayRef<int> ShuffleMask = SVN->getMask(); | ||||
11318 | |||||
11319 | SDValue V1 = Op.getOperand(0); | ||||
11320 | SDValue V2 = Op.getOperand(1); | ||||
11321 | |||||
11322 | assert(V1.getValueType() == VT && "Unexpected VECTOR_SHUFFLE type!")(static_cast <bool> (V1.getValueType() == VT && "Unexpected VECTOR_SHUFFLE type!") ? void (0) : __assert_fail ("V1.getValueType() == VT && \"Unexpected VECTOR_SHUFFLE type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 11322, __extension__ __PRETTY_FUNCTION__)); | ||||
11323 | assert(ShuffleMask.size() == VT.getVectorNumElements() &&(static_cast <bool> (ShuffleMask.size() == VT.getVectorNumElements () && "Unexpected VECTOR_SHUFFLE mask size!") ? void ( 0) : __assert_fail ("ShuffleMask.size() == VT.getVectorNumElements() && \"Unexpected VECTOR_SHUFFLE mask size!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 11324, __extension__ __PRETTY_FUNCTION__)) | ||||
11324 | "Unexpected VECTOR_SHUFFLE mask size!")(static_cast <bool> (ShuffleMask.size() == VT.getVectorNumElements () && "Unexpected VECTOR_SHUFFLE mask size!") ? void ( 0) : __assert_fail ("ShuffleMask.size() == VT.getVectorNumElements() && \"Unexpected VECTOR_SHUFFLE mask size!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 11324, __extension__ __PRETTY_FUNCTION__)); | ||||
11325 | |||||
11326 | if (SDValue Res = tryToConvertShuffleOfTbl2ToTbl4(Op, ShuffleMask, DAG)) | ||||
11327 | return Res; | ||||
11328 | |||||
11329 | if (SVN->isSplat()) { | ||||
11330 | int Lane = SVN->getSplatIndex(); | ||||
11331 | // If this is undef splat, generate it via "just" vdup, if possible. | ||||
11332 | if (Lane == -1) | ||||
11333 | Lane = 0; | ||||
11334 | |||||
11335 | if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) | ||||
11336 | return DAG.getNode(AArch64ISD::DUP, dl, V1.getValueType(), | ||||
11337 | V1.getOperand(0)); | ||||
11338 | // Test if V1 is a BUILD_VECTOR and the lane being referenced is a non- | ||||
11339 | // constant. If so, we can just reference the lane's definition directly. | ||||
11340 | if (V1.getOpcode() == ISD::BUILD_VECTOR && | ||||
11341 | !isa<ConstantSDNode>(V1.getOperand(Lane))) | ||||
11342 | return DAG.getNode(AArch64ISD::DUP, dl, VT, V1.getOperand(Lane)); | ||||
11343 | |||||
11344 | // Otherwise, duplicate from the lane of the input vector. | ||||
11345 | unsigned Opcode = getDUPLANEOp(V1.getValueType().getVectorElementType()); | ||||
11346 | return constructDup(V1, Lane, dl, VT, Opcode, DAG); | ||||
11347 | } | ||||
11348 | |||||
11349 | // Check if the mask matches a DUP for a wider element | ||||
11350 | for (unsigned LaneSize : {64U, 32U, 16U}) { | ||||
11351 | unsigned Lane = 0; | ||||
11352 | if (isWideDUPMask(ShuffleMask, VT, LaneSize, Lane)) { | ||||
11353 | unsigned Opcode = LaneSize == 64 ? AArch64ISD::DUPLANE64 | ||||
11354 | : LaneSize == 32 ? AArch64ISD::DUPLANE32 | ||||
11355 | : AArch64ISD::DUPLANE16; | ||||
11356 | // Cast V1 to an integer vector with required lane size | ||||
11357 | MVT NewEltTy = MVT::getIntegerVT(LaneSize); | ||||
11358 | unsigned NewEltCount = VT.getSizeInBits() / LaneSize; | ||||
11359 | MVT NewVecTy = MVT::getVectorVT(NewEltTy, NewEltCount); | ||||
11360 | V1 = DAG.getBitcast(NewVecTy, V1); | ||||
11361 | // Constuct the DUP instruction | ||||
11362 | V1 = constructDup(V1, Lane, dl, NewVecTy, Opcode, DAG); | ||||
11363 | // Cast back to the original type | ||||
11364 | return DAG.getBitcast(VT, V1); | ||||
11365 | } | ||||
11366 | } | ||||
11367 | |||||
11368 | if (isREVMask(ShuffleMask, VT, 64)) | ||||
11369 | return DAG.getNode(AArch64ISD::REV64, dl, V1.getValueType(), V1, V2); | ||||
11370 | if (isREVMask(ShuffleMask, VT, 32)) | ||||
11371 | return DAG.getNode(AArch64ISD::REV32, dl, V1.getValueType(), V1, V2); | ||||
11372 | if (isREVMask(ShuffleMask, VT, 16)) | ||||
11373 | return DAG.getNode(AArch64ISD::REV16, dl, V1.getValueType(), V1, V2); | ||||
11374 | |||||
11375 | if (((VT.getVectorNumElements() == 8 && VT.getScalarSizeInBits() == 16) || | ||||
11376 | (VT.getVectorNumElements() == 16 && VT.getScalarSizeInBits() == 8)) && | ||||
11377 | ShuffleVectorInst::isReverseMask(ShuffleMask)) { | ||||
11378 | SDValue Rev = DAG.getNode(AArch64ISD::REV64, dl, VT, V1); | ||||
11379 | return DAG.getNode(AArch64ISD::EXT, dl, VT, Rev, Rev, | ||||
11380 | DAG.getConstant(8, dl, MVT::i32)); | ||||
11381 | } | ||||
11382 | |||||
11383 | bool ReverseEXT = false; | ||||
11384 | unsigned Imm; | ||||
11385 | if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm)) { | ||||
11386 | if (ReverseEXT) | ||||
11387 | std::swap(V1, V2); | ||||
11388 | Imm *= getExtFactor(V1); | ||||
11389 | return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V2, | ||||
11390 | DAG.getConstant(Imm, dl, MVT::i32)); | ||||
11391 | } else if (V2->isUndef() && isSingletonEXTMask(ShuffleMask, VT, Imm)) { | ||||
11392 | Imm *= getExtFactor(V1); | ||||
11393 | return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V1, | ||||
11394 | DAG.getConstant(Imm, dl, MVT::i32)); | ||||
11395 | } | ||||
11396 | |||||
11397 | unsigned WhichResult; | ||||
11398 | if (isZIPMask(ShuffleMask, VT, WhichResult)) { | ||||
11399 | unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2; | ||||
11400 | return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2); | ||||
11401 | } | ||||
11402 | if (isUZPMask(ShuffleMask, VT, WhichResult)) { | ||||
11403 | unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; | ||||
11404 | return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2); | ||||
11405 | } | ||||
11406 | if (isTRNMask(ShuffleMask, VT, WhichResult)) { | ||||
11407 | unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2; | ||||
11408 | return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2); | ||||
11409 | } | ||||
11410 | |||||
11411 | if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) { | ||||
11412 | unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2; | ||||
11413 | return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1); | ||||
11414 | } | ||||
11415 | if (isUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) { | ||||
11416 | unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; | ||||
11417 | return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1); | ||||
11418 | } | ||||
11419 | if (isTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) { | ||||
11420 | unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2; | ||||
11421 | return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1); | ||||
11422 | } | ||||
11423 | |||||
11424 | if (SDValue Concat = tryFormConcatFromShuffle(Op, DAG)) | ||||
11425 | return Concat; | ||||
11426 | |||||
11427 | bool DstIsLeft; | ||||
11428 | int Anomaly; | ||||
11429 | int NumInputElements = V1.getValueType().getVectorNumElements(); | ||||
11430 | if (isINSMask(ShuffleMask, NumInputElements, DstIsLeft, Anomaly)) { | ||||
11431 | SDValue DstVec = DstIsLeft ? V1 : V2; | ||||
11432 | SDValue DstLaneV = DAG.getConstant(Anomaly, dl, MVT::i64); | ||||
11433 | |||||
11434 | SDValue SrcVec = V1; | ||||
11435 | int SrcLane = ShuffleMask[Anomaly]; | ||||
11436 | if (SrcLane >= NumInputElements) { | ||||
11437 | SrcVec = V2; | ||||
11438 | SrcLane -= VT.getVectorNumElements(); | ||||
11439 | } | ||||
11440 | SDValue SrcLaneV = DAG.getConstant(SrcLane, dl, MVT::i64); | ||||
11441 | |||||
11442 | EVT ScalarVT = VT.getVectorElementType(); | ||||
11443 | |||||
11444 | if (ScalarVT.getFixedSizeInBits() < 32 && ScalarVT.isInteger()) | ||||
11445 | ScalarVT = MVT::i32; | ||||
11446 | |||||
11447 | return DAG.getNode( | ||||
11448 | ISD::INSERT_VECTOR_ELT, dl, VT, DstVec, | ||||
11449 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, SrcVec, SrcLaneV), | ||||
11450 | DstLaneV); | ||||
11451 | } | ||||
11452 | |||||
11453 | if (SDValue NewSD = tryWidenMaskForShuffle(Op, DAG)) | ||||
11454 | return NewSD; | ||||
11455 | |||||
11456 | // If the shuffle is not directly supported and it has 4 elements, use | ||||
11457 | // the PerfectShuffle-generated table to synthesize it from other shuffles. | ||||
11458 | unsigned NumElts = VT.getVectorNumElements(); | ||||
11459 | if (NumElts == 4) { | ||||
11460 | unsigned PFIndexes[4]; | ||||
11461 | for (unsigned i = 0; i != 4; ++i) { | ||||
11462 | if (ShuffleMask[i] < 0) | ||||
11463 | PFIndexes[i] = 8; | ||||
11464 | else | ||||
11465 | PFIndexes[i] = ShuffleMask[i]; | ||||
11466 | } | ||||
11467 | |||||
11468 | // Compute the index in the perfect shuffle table. | ||||
11469 | unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 + | ||||
11470 | PFIndexes[2] * 9 + PFIndexes[3]; | ||||
11471 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; | ||||
11472 | return GeneratePerfectShuffle(PFTableIndex, V1, V2, PFEntry, V1, V2, DAG, | ||||
11473 | dl); | ||||
11474 | } | ||||
11475 | |||||
11476 | return GenerateTBL(Op, ShuffleMask, DAG); | ||||
11477 | } | ||||
11478 | |||||
11479 | SDValue AArch64TargetLowering::LowerSPLAT_VECTOR(SDValue Op, | ||||
11480 | SelectionDAG &DAG) const { | ||||
11481 | EVT VT = Op.getValueType(); | ||||
11482 | |||||
11483 | if (useSVEForFixedLengthVectorVT(VT, | ||||
11484 | Subtarget->forceStreamingCompatibleSVE())) | ||||
11485 | return LowerToScalableOp(Op, DAG); | ||||
11486 | |||||
11487 | assert(VT.isScalableVector() && VT.getVectorElementType() == MVT::i1 &&(static_cast <bool> (VT.isScalableVector() && VT .getVectorElementType() == MVT::i1 && "Unexpected vector type!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && VT.getVectorElementType() == MVT::i1 && \"Unexpected vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 11488, __extension__ __PRETTY_FUNCTION__)) | ||||
11488 | "Unexpected vector type!")(static_cast <bool> (VT.isScalableVector() && VT .getVectorElementType() == MVT::i1 && "Unexpected vector type!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && VT.getVectorElementType() == MVT::i1 && \"Unexpected vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 11488, __extension__ __PRETTY_FUNCTION__)); | ||||
11489 | |||||
11490 | // We can handle the constant cases during isel. | ||||
11491 | if (isa<ConstantSDNode>(Op.getOperand(0))) | ||||
11492 | return Op; | ||||
11493 | |||||
11494 | // There isn't a natural way to handle the general i1 case, so we use some | ||||
11495 | // trickery with whilelo. | ||||
11496 | SDLoc DL(Op); | ||||
11497 | SDValue SplatVal = DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, MVT::i64); | ||||
11498 | SplatVal = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, SplatVal, | ||||
11499 | DAG.getValueType(MVT::i1)); | ||||
11500 | SDValue ID = | ||||
11501 | DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, DL, MVT::i64); | ||||
11502 | SDValue Zero = DAG.getConstant(0, DL, MVT::i64); | ||||
11503 | if (VT == MVT::nxv1i1) | ||||
11504 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::nxv1i1, | ||||
11505 | DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::nxv2i1, ID, | ||||
11506 | Zero, SplatVal), | ||||
11507 | Zero); | ||||
11508 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, ID, Zero, SplatVal); | ||||
11509 | } | ||||
11510 | |||||
11511 | SDValue AArch64TargetLowering::LowerDUPQLane(SDValue Op, | ||||
11512 | SelectionDAG &DAG) const { | ||||
11513 | SDLoc DL(Op); | ||||
11514 | |||||
11515 | EVT VT = Op.getValueType(); | ||||
11516 | if (!isTypeLegal(VT) || !VT.isScalableVector()) | ||||
11517 | return SDValue(); | ||||
11518 | |||||
11519 | // Current lowering only supports the SVE-ACLE types. | ||||
11520 | if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock) | ||||
11521 | return SDValue(); | ||||
11522 | |||||
11523 | // The DUPQ operation is indepedent of element type so normalise to i64s. | ||||
11524 | SDValue Idx128 = Op.getOperand(2); | ||||
11525 | |||||
11526 | // DUPQ can be used when idx is in range. | ||||
11527 | auto *CIdx = dyn_cast<ConstantSDNode>(Idx128); | ||||
11528 | if (CIdx && (CIdx->getZExtValue() <= 3)) { | ||||
11529 | SDValue CI = DAG.getTargetConstant(CIdx->getZExtValue(), DL, MVT::i64); | ||||
11530 | return DAG.getNode(AArch64ISD::DUPLANE128, DL, VT, Op.getOperand(1), CI); | ||||
11531 | } | ||||
11532 | |||||
11533 | SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::nxv2i64, Op.getOperand(1)); | ||||
11534 | |||||
11535 | // The ACLE says this must produce the same result as: | ||||
11536 | // svtbl(data, svadd_x(svptrue_b64(), | ||||
11537 | // svand_x(svptrue_b64(), svindex_u64(0, 1), 1), | ||||
11538 | // index * 2)) | ||||
11539 | SDValue One = DAG.getConstant(1, DL, MVT::i64); | ||||
11540 | SDValue SplatOne = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, One); | ||||
11541 | |||||
11542 | // create the vector 0,1,0,1,... | ||||
11543 | SDValue SV = DAG.getStepVector(DL, MVT::nxv2i64); | ||||
11544 | SV = DAG.getNode(ISD::AND, DL, MVT::nxv2i64, SV, SplatOne); | ||||
11545 | |||||
11546 | // create the vector idx64,idx64+1,idx64,idx64+1,... | ||||
11547 | SDValue Idx64 = DAG.getNode(ISD::ADD, DL, MVT::i64, Idx128, Idx128); | ||||
11548 | SDValue SplatIdx64 = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, Idx64); | ||||
11549 | SDValue ShuffleMask = DAG.getNode(ISD::ADD, DL, MVT::nxv2i64, SV, SplatIdx64); | ||||
11550 | |||||
11551 | // create the vector Val[idx64],Val[idx64+1],Val[idx64],Val[idx64+1],... | ||||
11552 | SDValue TBL = DAG.getNode(AArch64ISD::TBL, DL, MVT::nxv2i64, V, ShuffleMask); | ||||
11553 | return DAG.getNode(ISD::BITCAST, DL, VT, TBL); | ||||
11554 | } | ||||
11555 | |||||
11556 | |||||
11557 | static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits, | ||||
11558 | APInt &UndefBits) { | ||||
11559 | EVT VT = BVN->getValueType(0); | ||||
11560 | APInt SplatBits, SplatUndef; | ||||
11561 | unsigned SplatBitSize; | ||||
11562 | bool HasAnyUndefs; | ||||
11563 | if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { | ||||
11564 | unsigned NumSplats = VT.getSizeInBits() / SplatBitSize; | ||||
11565 | |||||
11566 | for (unsigned i = 0; i < NumSplats; ++i) { | ||||
11567 | CnstBits <<= SplatBitSize; | ||||
11568 | UndefBits <<= SplatBitSize; | ||||
11569 | CnstBits |= SplatBits.zextOrTrunc(VT.getSizeInBits()); | ||||
11570 | UndefBits |= (SplatBits ^ SplatUndef).zextOrTrunc(VT.getSizeInBits()); | ||||
11571 | } | ||||
11572 | |||||
11573 | return true; | ||||
11574 | } | ||||
11575 | |||||
11576 | return false; | ||||
11577 | } | ||||
11578 | |||||
11579 | // Try 64-bit splatted SIMD immediate. | ||||
11580 | static SDValue tryAdvSIMDModImm64(unsigned NewOp, SDValue Op, SelectionDAG &DAG, | ||||
11581 | const APInt &Bits) { | ||||
11582 | if (Bits.getHiBits(64) == Bits.getLoBits(64)) { | ||||
11583 | uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); | ||||
11584 | EVT VT = Op.getValueType(); | ||||
11585 | MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v2i64 : MVT::f64; | ||||
11586 | |||||
11587 | if (AArch64_AM::isAdvSIMDModImmType10(Value)) { | ||||
11588 | Value = AArch64_AM::encodeAdvSIMDModImmType10(Value); | ||||
11589 | |||||
11590 | SDLoc dl(Op); | ||||
11591 | SDValue Mov = DAG.getNode(NewOp, dl, MovTy, | ||||
11592 | DAG.getConstant(Value, dl, MVT::i32)); | ||||
11593 | return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); | ||||
11594 | } | ||||
11595 | } | ||||
11596 | |||||
11597 | return SDValue(); | ||||
11598 | } | ||||
11599 | |||||
11600 | // Try 32-bit splatted SIMD immediate. | ||||
11601 | static SDValue tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG, | ||||
11602 | const APInt &Bits, | ||||
11603 | const SDValue *LHS = nullptr) { | ||||
11604 | EVT VT = Op.getValueType(); | ||||
11605 | if (VT.isFixedLengthVector() && | ||||
11606 | DAG.getSubtarget<AArch64Subtarget>().forceStreamingCompatibleSVE()) | ||||
11607 | return SDValue(); | ||||
11608 | |||||
11609 | if (Bits.getHiBits(64) == Bits.getLoBits(64)) { | ||||
11610 | uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); | ||||
11611 | MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; | ||||
11612 | bool isAdvSIMDModImm = false; | ||||
11613 | uint64_t Shift; | ||||
11614 | |||||
11615 | if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType1(Value))) { | ||||
11616 | Value = AArch64_AM::encodeAdvSIMDModImmType1(Value); | ||||
11617 | Shift = 0; | ||||
11618 | } | ||||
11619 | else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType2(Value))) { | ||||
11620 | Value = AArch64_AM::encodeAdvSIMDModImmType2(Value); | ||||
11621 | Shift = 8; | ||||
11622 | } | ||||
11623 | else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType3(Value))) { | ||||
11624 | Value = AArch64_AM::encodeAdvSIMDModImmType3(Value); | ||||
11625 | Shift = 16; | ||||
11626 | } | ||||
11627 | else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType4(Value))) { | ||||
11628 | Value = AArch64_AM::encodeAdvSIMDModImmType4(Value); | ||||
11629 | Shift = 24; | ||||
11630 | } | ||||
11631 | |||||
11632 | if (isAdvSIMDModImm) { | ||||
11633 | SDLoc dl(Op); | ||||
11634 | SDValue Mov; | ||||
11635 | |||||
11636 | if (LHS) | ||||
11637 | Mov = DAG.getNode(NewOp, dl, MovTy, *LHS, | ||||
11638 | DAG.getConstant(Value, dl, MVT::i32), | ||||
11639 | DAG.getConstant(Shift, dl, MVT::i32)); | ||||
11640 | else | ||||
11641 | Mov = DAG.getNode(NewOp, dl, MovTy, | ||||
11642 | DAG.getConstant(Value, dl, MVT::i32), | ||||
11643 | DAG.getConstant(Shift, dl, MVT::i32)); | ||||
11644 | |||||
11645 | return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); | ||||
11646 | } | ||||
11647 | } | ||||
11648 | |||||
11649 | return SDValue(); | ||||
11650 | } | ||||
11651 | |||||
11652 | // Try 16-bit splatted SIMD immediate. | ||||
11653 | static SDValue tryAdvSIMDModImm16(unsigned NewOp, SDValue Op, SelectionDAG &DAG, | ||||
11654 | const APInt &Bits, | ||||
11655 | const SDValue *LHS = nullptr) { | ||||
11656 | EVT VT = Op.getValueType(); | ||||
11657 | if (VT.isFixedLengthVector() && | ||||
11658 | DAG.getSubtarget<AArch64Subtarget>().forceStreamingCompatibleSVE()) | ||||
11659 | return SDValue(); | ||||
11660 | |||||
11661 | if (Bits.getHiBits(64) == Bits.getLoBits(64)) { | ||||
11662 | uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); | ||||
11663 | MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16; | ||||
11664 | bool isAdvSIMDModImm = false; | ||||
11665 | uint64_t Shift; | ||||
11666 | |||||
11667 | if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType5(Value))) { | ||||
11668 | Value = AArch64_AM::encodeAdvSIMDModImmType5(Value); | ||||
11669 | Shift = 0; | ||||
11670 | } | ||||
11671 | else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType6(Value))) { | ||||
11672 | Value = AArch64_AM::encodeAdvSIMDModImmType6(Value); | ||||
11673 | Shift = 8; | ||||
11674 | } | ||||
11675 | |||||
11676 | if (isAdvSIMDModImm) { | ||||
11677 | SDLoc dl(Op); | ||||
11678 | SDValue Mov; | ||||
11679 | |||||
11680 | if (LHS) | ||||
11681 | Mov = DAG.getNode(NewOp, dl, MovTy, *LHS, | ||||
11682 | DAG.getConstant(Value, dl, MVT::i32), | ||||
11683 | DAG.getConstant(Shift, dl, MVT::i32)); | ||||
11684 | else | ||||
11685 | Mov = DAG.getNode(NewOp, dl, MovTy, | ||||
11686 | DAG.getConstant(Value, dl, MVT::i32), | ||||
11687 | DAG.getConstant(Shift, dl, MVT::i32)); | ||||
11688 | |||||
11689 | return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); | ||||
11690 | } | ||||
11691 | } | ||||
11692 | |||||
11693 | return SDValue(); | ||||
11694 | } | ||||
11695 | |||||
11696 | // Try 32-bit splatted SIMD immediate with shifted ones. | ||||
11697 | static SDValue tryAdvSIMDModImm321s(unsigned NewOp, SDValue Op, | ||||
11698 | SelectionDAG &DAG, const APInt &Bits) { | ||||
11699 | if (Bits.getHiBits(64) == Bits.getLoBits(64)) { | ||||
11700 | uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); | ||||
11701 | EVT VT = Op.getValueType(); | ||||
11702 | MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; | ||||
11703 | bool isAdvSIMDModImm = false; | ||||
11704 | uint64_t Shift; | ||||
11705 | |||||
11706 | if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType7(Value))) { | ||||
11707 | Value = AArch64_AM::encodeAdvSIMDModImmType7(Value); | ||||
11708 | Shift = 264; | ||||
11709 | } | ||||
11710 | else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType8(Value))) { | ||||
11711 | Value = AArch64_AM::encodeAdvSIMDModImmType8(Value); | ||||
11712 | Shift = 272; | ||||
11713 | } | ||||
11714 | |||||
11715 | if (isAdvSIMDModImm) { | ||||
11716 | SDLoc dl(Op); | ||||
11717 | SDValue Mov = DAG.getNode(NewOp, dl, MovTy, | ||||
11718 | DAG.getConstant(Value, dl, MVT::i32), | ||||
11719 | DAG.getConstant(Shift, dl, MVT::i32)); | ||||
11720 | return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); | ||||
11721 | } | ||||
11722 | } | ||||
11723 | |||||
11724 | return SDValue(); | ||||
11725 | } | ||||
11726 | |||||
11727 | // Try 8-bit splatted SIMD immediate. | ||||
11728 | static SDValue tryAdvSIMDModImm8(unsigned NewOp, SDValue Op, SelectionDAG &DAG, | ||||
11729 | const APInt &Bits) { | ||||
11730 | if (Bits.getHiBits(64) == Bits.getLoBits(64)) { | ||||
11731 | uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); | ||||
11732 | EVT VT = Op.getValueType(); | ||||
11733 | MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v16i8 : MVT::v8i8; | ||||
11734 | |||||
11735 | if (AArch64_AM::isAdvSIMDModImmType9(Value)) { | ||||
11736 | Value = AArch64_AM::encodeAdvSIMDModImmType9(Value); | ||||
11737 | |||||
11738 | SDLoc dl(Op); | ||||
11739 | SDValue Mov = DAG.getNode(NewOp, dl, MovTy, | ||||
11740 | DAG.getConstant(Value, dl, MVT::i32)); | ||||
11741 | return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); | ||||
11742 | } | ||||
11743 | } | ||||
11744 | |||||
11745 | return SDValue(); | ||||
11746 | } | ||||
11747 | |||||
11748 | // Try FP splatted SIMD immediate. | ||||
11749 | static SDValue tryAdvSIMDModImmFP(unsigned NewOp, SDValue Op, SelectionDAG &DAG, | ||||
11750 | const APInt &Bits) { | ||||
11751 | if (Bits.getHiBits(64) == Bits.getLoBits(64)) { | ||||
11752 | uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); | ||||
11753 | EVT VT = Op.getValueType(); | ||||
11754 | bool isWide = (VT.getSizeInBits() == 128); | ||||
11755 | MVT MovTy; | ||||
11756 | bool isAdvSIMDModImm = false; | ||||
11757 | |||||
11758 | if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType11(Value))) { | ||||
11759 | Value = AArch64_AM::encodeAdvSIMDModImmType11(Value); | ||||
11760 | MovTy = isWide ? MVT::v4f32 : MVT::v2f32; | ||||
11761 | } | ||||
11762 | else if (isWide && | ||||
11763 | (isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType12(Value))) { | ||||
11764 | Value = AArch64_AM::encodeAdvSIMDModImmType12(Value); | ||||
11765 | MovTy = MVT::v2f64; | ||||
11766 | } | ||||
11767 | |||||
11768 | if (isAdvSIMDModImm) { | ||||
11769 | SDLoc dl(Op); | ||||
11770 | SDValue Mov = DAG.getNode(NewOp, dl, MovTy, | ||||
11771 | DAG.getConstant(Value, dl, MVT::i32)); | ||||
11772 | return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); | ||||
11773 | } | ||||
11774 | } | ||||
11775 | |||||
11776 | return SDValue(); | ||||
11777 | } | ||||
11778 | |||||
11779 | // Specialized code to quickly find if PotentialBVec is a BuildVector that | ||||
11780 | // consists of only the same constant int value, returned in reference arg | ||||
11781 | // ConstVal | ||||
11782 | static bool isAllConstantBuildVector(const SDValue &PotentialBVec, | ||||
11783 | uint64_t &ConstVal) { | ||||
11784 | BuildVectorSDNode *Bvec = dyn_cast<BuildVectorSDNode>(PotentialBVec); | ||||
11785 | if (!Bvec) | ||||
11786 | return false; | ||||
11787 | ConstantSDNode *FirstElt = dyn_cast<ConstantSDNode>(Bvec->getOperand(0)); | ||||
11788 | if (!FirstElt) | ||||
11789 | return false; | ||||
11790 | EVT VT = Bvec->getValueType(0); | ||||
11791 | unsigned NumElts = VT.getVectorNumElements(); | ||||
11792 | for (unsigned i = 1; i < NumElts; ++i) | ||||
11793 | if (dyn_cast<ConstantSDNode>(Bvec->getOperand(i)) != FirstElt) | ||||
11794 | return false; | ||||
11795 | ConstVal = FirstElt->getZExtValue(); | ||||
11796 | return true; | ||||
11797 | } | ||||
11798 | |||||
11799 | // Attempt to form a vector S[LR]I from (or (and X, BvecC1), (lsl Y, C2)), | ||||
11800 | // to (SLI X, Y, C2), where X and Y have matching vector types, BvecC1 is a | ||||
11801 | // BUILD_VECTORs with constant element C1, C2 is a constant, and: | ||||
11802 | // - for the SLI case: C1 == ~(Ones(ElemSizeInBits) << C2) | ||||
11803 | // - for the SRI case: C1 == ~(Ones(ElemSizeInBits) >> C2) | ||||
11804 | // The (or (lsl Y, C2), (and X, BvecC1)) case is also handled. | ||||
11805 | static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) { | ||||
11806 | EVT VT = N->getValueType(0); | ||||
11807 | |||||
11808 | if (!VT.isVector()) | ||||
11809 | return SDValue(); | ||||
11810 | |||||
11811 | SDLoc DL(N); | ||||
11812 | |||||
11813 | SDValue And; | ||||
11814 | SDValue Shift; | ||||
11815 | |||||
11816 | SDValue FirstOp = N->getOperand(0); | ||||
11817 | unsigned FirstOpc = FirstOp.getOpcode(); | ||||
11818 | SDValue SecondOp = N->getOperand(1); | ||||
11819 | unsigned SecondOpc = SecondOp.getOpcode(); | ||||
11820 | |||||
11821 | // Is one of the operands an AND or a BICi? The AND may have been optimised to | ||||
11822 | // a BICi in order to use an immediate instead of a register. | ||||
11823 | // Is the other operand an shl or lshr? This will have been turned into: | ||||
11824 | // AArch64ISD::VSHL vector, #shift or AArch64ISD::VLSHR vector, #shift. | ||||
11825 | if ((FirstOpc == ISD::AND || FirstOpc == AArch64ISD::BICi) && | ||||
11826 | (SecondOpc == AArch64ISD::VSHL || SecondOpc == AArch64ISD::VLSHR)) { | ||||
11827 | And = FirstOp; | ||||
11828 | Shift = SecondOp; | ||||
11829 | |||||
11830 | } else if ((SecondOpc == ISD::AND || SecondOpc == AArch64ISD::BICi) && | ||||
11831 | (FirstOpc == AArch64ISD::VSHL || FirstOpc == AArch64ISD::VLSHR)) { | ||||
11832 | And = SecondOp; | ||||
11833 | Shift = FirstOp; | ||||
11834 | } else | ||||
11835 | return SDValue(); | ||||
11836 | |||||
11837 | bool IsAnd = And.getOpcode() == ISD::AND; | ||||
11838 | bool IsShiftRight = Shift.getOpcode() == AArch64ISD::VLSHR; | ||||
11839 | |||||
11840 | // Is the shift amount constant? | ||||
11841 | ConstantSDNode *C2node = dyn_cast<ConstantSDNode>(Shift.getOperand(1)); | ||||
11842 | if (!C2node) | ||||
11843 | return SDValue(); | ||||
11844 | |||||
11845 | uint64_t C1; | ||||
11846 | if (IsAnd) { | ||||
11847 | // Is the and mask vector all constant? | ||||
11848 | if (!isAllConstantBuildVector(And.getOperand(1), C1)) | ||||
11849 | return SDValue(); | ||||
11850 | } else { | ||||
11851 | // Reconstruct the corresponding AND immediate from the two BICi immediates. | ||||
11852 | ConstantSDNode *C1nodeImm = dyn_cast<ConstantSDNode>(And.getOperand(1)); | ||||
11853 | ConstantSDNode *C1nodeShift = dyn_cast<ConstantSDNode>(And.getOperand(2)); | ||||
11854 | assert(C1nodeImm && C1nodeShift)(static_cast <bool> (C1nodeImm && C1nodeShift) ? void (0) : __assert_fail ("C1nodeImm && C1nodeShift" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 11854, __extension__ __PRETTY_FUNCTION__)); | ||||
11855 | C1 = ~(C1nodeImm->getZExtValue() << C1nodeShift->getZExtValue()); | ||||
11856 | } | ||||
11857 | |||||
11858 | // Is C1 == ~(Ones(ElemSizeInBits) << C2) or | ||||
11859 | // C1 == ~(Ones(ElemSizeInBits) >> C2), taking into account | ||||
11860 | // how much one can shift elements of a particular size? | ||||
11861 | uint64_t C2 = C2node->getZExtValue(); | ||||
11862 | unsigned ElemSizeInBits = VT.getScalarSizeInBits(); | ||||
11863 | if (C2 > ElemSizeInBits) | ||||
11864 | return SDValue(); | ||||
11865 | |||||
11866 | APInt C1AsAPInt(ElemSizeInBits, C1); | ||||
11867 | APInt RequiredC1 = IsShiftRight ? APInt::getHighBitsSet(ElemSizeInBits, C2) | ||||
11868 | : APInt::getLowBitsSet(ElemSizeInBits, C2); | ||||
11869 | if (C1AsAPInt != RequiredC1) | ||||
11870 | return SDValue(); | ||||
11871 | |||||
11872 | SDValue X = And.getOperand(0); | ||||
11873 | SDValue Y = Shift.getOperand(0); | ||||
11874 | |||||
11875 | unsigned Inst = IsShiftRight ? AArch64ISD::VSRI : AArch64ISD::VSLI; | ||||
11876 | SDValue ResultSLI = DAG.getNode(Inst, DL, VT, X, Y, Shift.getOperand(1)); | ||||
11877 | |||||
11878 | LLVM_DEBUG(dbgs() << "aarch64-lower: transformed: \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "aarch64-lower: transformed: \n" ; } } while (false); | ||||
11879 | LLVM_DEBUG(N->dump(&DAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { N->dump(&DAG); } } while (false); | ||||
11880 | LLVM_DEBUG(dbgs() << "into: \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "into: \n"; } } while (false ); | ||||
11881 | LLVM_DEBUG(ResultSLI->dump(&DAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { ResultSLI->dump(&DAG); } } while ( false); | ||||
11882 | |||||
11883 | ++NumShiftInserts; | ||||
11884 | return ResultSLI; | ||||
11885 | } | ||||
11886 | |||||
11887 | SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op, | ||||
11888 | SelectionDAG &DAG) const { | ||||
11889 | if (useSVEForFixedLengthVectorVT(Op.getValueType(), | ||||
11890 | Subtarget->forceStreamingCompatibleSVE())) | ||||
11891 | return LowerToScalableOp(Op, DAG); | ||||
11892 | |||||
11893 | // Attempt to form a vector S[LR]I from (or (and X, C1), (lsl Y, C2)) | ||||
11894 | if (SDValue Res = tryLowerToSLI(Op.getNode(), DAG)) | ||||
11895 | return Res; | ||||
11896 | |||||
11897 | EVT VT = Op.getValueType(); | ||||
11898 | |||||
11899 | SDValue LHS = Op.getOperand(0); | ||||
11900 | BuildVectorSDNode *BVN = | ||||
11901 | dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode()); | ||||
11902 | if (!BVN) { | ||||
11903 | // OR commutes, so try swapping the operands. | ||||
11904 | LHS = Op.getOperand(1); | ||||
11905 | BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(0).getNode()); | ||||
11906 | } | ||||
11907 | if (!BVN) | ||||
11908 | return Op; | ||||
11909 | |||||
11910 | APInt DefBits(VT.getSizeInBits(), 0); | ||||
11911 | APInt UndefBits(VT.getSizeInBits(), 0); | ||||
11912 | if (resolveBuildVector(BVN, DefBits, UndefBits)) { | ||||
11913 | SDValue NewOp; | ||||
11914 | |||||
11915 | if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG, | ||||
11916 | DefBits, &LHS)) || | ||||
11917 | (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG, | ||||
11918 | DefBits, &LHS))) | ||||
11919 | return NewOp; | ||||
11920 | |||||
11921 | if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG, | ||||
11922 | UndefBits, &LHS)) || | ||||
11923 | (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG, | ||||
11924 | UndefBits, &LHS))) | ||||
11925 | return NewOp; | ||||
11926 | } | ||||
11927 | |||||
11928 | // We can always fall back to a non-immediate OR. | ||||
11929 | return Op; | ||||
11930 | } | ||||
11931 | |||||
11932 | // Normalize the operands of BUILD_VECTOR. The value of constant operands will | ||||
11933 | // be truncated to fit element width. | ||||
11934 | static SDValue NormalizeBuildVector(SDValue Op, | ||||
11935 | SelectionDAG &DAG) { | ||||
11936 | assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!")(static_cast <bool> (Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!") ? void (0) : __assert_fail ("Op.getOpcode() == ISD::BUILD_VECTOR && \"Unknown opcode!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 11936, __extension__ __PRETTY_FUNCTION__)); | ||||
11937 | SDLoc dl(Op); | ||||
11938 | EVT VT = Op.getValueType(); | ||||
11939 | EVT EltTy= VT.getVectorElementType(); | ||||
11940 | |||||
11941 | if (EltTy.isFloatingPoint() || EltTy.getSizeInBits() > 16) | ||||
11942 | return Op; | ||||
11943 | |||||
11944 | SmallVector<SDValue, 16> Ops; | ||||
11945 | for (SDValue Lane : Op->ops()) { | ||||
11946 | // For integer vectors, type legalization would have promoted the | ||||
11947 | // operands already. Otherwise, if Op is a floating-point splat | ||||
11948 | // (with operands cast to integers), then the only possibilities | ||||
11949 | // are constants and UNDEFs. | ||||
11950 | if (auto *CstLane = dyn_cast<ConstantSDNode>(Lane)) { | ||||
11951 | APInt LowBits(EltTy.getSizeInBits(), | ||||
11952 | CstLane->getZExtValue()); | ||||
11953 | Lane = DAG.getConstant(LowBits.getZExtValue(), dl, MVT::i32); | ||||
11954 | } else if (Lane.getNode()->isUndef()) { | ||||
11955 | Lane = DAG.getUNDEF(MVT::i32); | ||||
11956 | } else { | ||||
11957 | assert(Lane.getValueType() == MVT::i32 &&(static_cast <bool> (Lane.getValueType() == MVT::i32 && "Unexpected BUILD_VECTOR operand type") ? void (0) : __assert_fail ("Lane.getValueType() == MVT::i32 && \"Unexpected BUILD_VECTOR operand type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 11958, __extension__ __PRETTY_FUNCTION__)) | ||||
11958 | "Unexpected BUILD_VECTOR operand type")(static_cast <bool> (Lane.getValueType() == MVT::i32 && "Unexpected BUILD_VECTOR operand type") ? void (0) : __assert_fail ("Lane.getValueType() == MVT::i32 && \"Unexpected BUILD_VECTOR operand type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 11958, __extension__ __PRETTY_FUNCTION__)); | ||||
11959 | } | ||||
11960 | Ops.push_back(Lane); | ||||
11961 | } | ||||
11962 | return DAG.getBuildVector(VT, dl, Ops); | ||||
11963 | } | ||||
11964 | |||||
11965 | static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG) { | ||||
11966 | EVT VT = Op.getValueType(); | ||||
11967 | |||||
11968 | APInt DefBits(VT.getSizeInBits(), 0); | ||||
11969 | APInt UndefBits(VT.getSizeInBits(), 0); | ||||
11970 | BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); | ||||
11971 | if (resolveBuildVector(BVN, DefBits, UndefBits)) { | ||||
11972 | SDValue NewOp; | ||||
11973 | if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) || | ||||
11974 | (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) || | ||||
11975 | (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) || | ||||
11976 | (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) || | ||||
11977 | (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) || | ||||
11978 | (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits))) | ||||
11979 | return NewOp; | ||||
11980 | |||||
11981 | DefBits = ~DefBits; | ||||
11982 | if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) || | ||||
11983 | (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) || | ||||
11984 | (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits))) | ||||
11985 | return NewOp; | ||||
11986 | |||||
11987 | DefBits = UndefBits; | ||||
11988 | if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) || | ||||
11989 | (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) || | ||||
11990 | (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) || | ||||
11991 | (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) || | ||||
11992 | (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) || | ||||
11993 | (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits))) | ||||
11994 | return NewOp; | ||||
11995 | |||||
11996 | DefBits = ~UndefBits; | ||||
11997 | if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) || | ||||
11998 | (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) || | ||||
11999 | (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits))) | ||||
12000 | return NewOp; | ||||
12001 | } | ||||
12002 | |||||
12003 | return SDValue(); | ||||
12004 | } | ||||
12005 | |||||
12006 | SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, | ||||
12007 | SelectionDAG &DAG) const { | ||||
12008 | EVT VT = Op.getValueType(); | ||||
12009 | |||||
12010 | if (useSVEForFixedLengthVectorVT(VT, | ||||
12011 | Subtarget->forceStreamingCompatibleSVE())) { | ||||
12012 | if (auto SeqInfo = cast<BuildVectorSDNode>(Op)->isConstantSequence()) { | ||||
12013 | SDLoc DL(Op); | ||||
12014 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); | ||||
12015 | SDValue Start = DAG.getConstant(SeqInfo->first, DL, ContainerVT); | ||||
12016 | SDValue Steps = DAG.getStepVector(DL, ContainerVT, SeqInfo->second); | ||||
12017 | SDValue Seq = DAG.getNode(ISD::ADD, DL, ContainerVT, Start, Steps); | ||||
12018 | return convertFromScalableVector(DAG, Op.getValueType(), Seq); | ||||
12019 | } | ||||
12020 | |||||
12021 | // Revert to common legalisation for all other variants. | ||||
12022 | return SDValue(); | ||||
12023 | } | ||||
12024 | |||||
12025 | // Try to build a simple constant vector. | ||||
12026 | Op = NormalizeBuildVector(Op, DAG); | ||||
12027 | // Thought this might return a non-BUILD_VECTOR (e.g. CONCAT_VECTORS), if so, | ||||
12028 | // abort. | ||||
12029 | if (Op.getOpcode() != ISD::BUILD_VECTOR) | ||||
12030 | return SDValue(); | ||||
12031 | |||||
12032 | if (VT.isInteger()) { | ||||
12033 | // Certain vector constants, used to express things like logical NOT and | ||||
12034 | // arithmetic NEG, are passed through unmodified. This allows special | ||||
12035 | // patterns for these operations to match, which will lower these constants | ||||
12036 | // to whatever is proven necessary. | ||||
12037 | BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); | ||||
12038 | if (BVN->isConstant()) | ||||
12039 | if (ConstantSDNode *Const = BVN->getConstantSplatNode()) { | ||||
12040 | unsigned BitSize = VT.getVectorElementType().getSizeInBits(); | ||||
12041 | APInt Val(BitSize, | ||||
12042 | Const->getAPIntValue().zextOrTrunc(BitSize).getZExtValue()); | ||||
12043 | if (Val.isZero() || Val.isAllOnes()) | ||||
12044 | return Op; | ||||
12045 | } | ||||
12046 | } | ||||
12047 | |||||
12048 | if (SDValue V = ConstantBuildVector(Op, DAG)) | ||||
12049 | return V; | ||||
12050 | |||||
12051 | // Scan through the operands to find some interesting properties we can | ||||
12052 | // exploit: | ||||
12053 | // 1) If only one value is used, we can use a DUP, or | ||||
12054 | // 2) if only the low element is not undef, we can just insert that, or | ||||
12055 | // 3) if only one constant value is used (w/ some non-constant lanes), | ||||
12056 | // we can splat the constant value into the whole vector then fill | ||||
12057 | // in the non-constant lanes. | ||||
12058 | // 4) FIXME: If different constant values are used, but we can intelligently | ||||
12059 | // select the values we'll be overwriting for the non-constant | ||||
12060 | // lanes such that we can directly materialize the vector | ||||
12061 | // some other way (MOVI, e.g.), we can be sneaky. | ||||
12062 | // 5) if all operands are EXTRACT_VECTOR_ELT, check for VUZP. | ||||
12063 | SDLoc dl(Op); | ||||
12064 | unsigned NumElts = VT.getVectorNumElements(); | ||||
12065 | bool isOnlyLowElement = true; | ||||
12066 | bool usesOnlyOneValue = true; | ||||
12067 | bool usesOnlyOneConstantValue = true; | ||||
12068 | bool isConstant = true; | ||||
12069 | bool AllLanesExtractElt = true; | ||||
12070 | unsigned NumConstantLanes = 0; | ||||
12071 | unsigned NumDifferentLanes = 0; | ||||
12072 | unsigned NumUndefLanes = 0; | ||||
12073 | SDValue Value; | ||||
12074 | SDValue ConstantValue; | ||||
12075 | for (unsigned i = 0; i < NumElts; ++i) { | ||||
12076 | SDValue V = Op.getOperand(i); | ||||
12077 | if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) | ||||
12078 | AllLanesExtractElt = false; | ||||
12079 | if (V.isUndef()) { | ||||
12080 | ++NumUndefLanes; | ||||
12081 | continue; | ||||
12082 | } | ||||
12083 | if (i > 0) | ||||
12084 | isOnlyLowElement = false; | ||||
12085 | if (!isIntOrFPConstant(V)) | ||||
12086 | isConstant = false; | ||||
12087 | |||||
12088 | if (isIntOrFPConstant(V)) { | ||||
12089 | ++NumConstantLanes; | ||||
12090 | if (!ConstantValue.getNode()) | ||||
12091 | ConstantValue = V; | ||||
12092 | else if (ConstantValue != V) | ||||
12093 | usesOnlyOneConstantValue = false; | ||||
12094 | } | ||||
12095 | |||||
12096 | if (!Value.getNode()) | ||||
12097 | Value = V; | ||||
12098 | else if (V != Value) { | ||||
12099 | usesOnlyOneValue = false; | ||||
12100 | ++NumDifferentLanes; | ||||
12101 | } | ||||
12102 | } | ||||
12103 | |||||
12104 | if (!Value.getNode()) { | ||||
12105 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: value undefined, creating undef node\n" ; } } while (false) | ||||
12106 | dbgs() << "LowerBUILD_VECTOR: value undefined, creating undef node\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: value undefined, creating undef node\n" ; } } while (false); | ||||
12107 | return DAG.getUNDEF(VT); | ||||
12108 | } | ||||
12109 | |||||
12110 | // Convert BUILD_VECTOR where all elements but the lowest are undef into | ||||
12111 | // SCALAR_TO_VECTOR, except for when we have a single-element constant vector | ||||
12112 | // as SimplifyDemandedBits will just turn that back into BUILD_VECTOR. | ||||
12113 | if (isOnlyLowElement && !(NumElts == 1 && isIntOrFPConstant(Value))) { | ||||
12114 | LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: only low element used, creating 1 "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: only low element used, creating 1 " "SCALAR_TO_VECTOR node\n"; } } while (false) | ||||
12115 | "SCALAR_TO_VECTOR node\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: only low element used, creating 1 " "SCALAR_TO_VECTOR node\n"; } } while (false); | ||||
12116 | return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); | ||||
12117 | } | ||||
12118 | |||||
12119 | if (AllLanesExtractElt) { | ||||
12120 | SDNode *Vector = nullptr; | ||||
12121 | bool Even = false; | ||||
12122 | bool Odd = false; | ||||
12123 | // Check whether the extract elements match the Even pattern <0,2,4,...> or | ||||
12124 | // the Odd pattern <1,3,5,...>. | ||||
12125 | for (unsigned i = 0; i < NumElts; ++i) { | ||||
12126 | SDValue V = Op.getOperand(i); | ||||
12127 | const SDNode *N = V.getNode(); | ||||
12128 | if (!isa<ConstantSDNode>(N->getOperand(1))) | ||||
12129 | break; | ||||
12130 | SDValue N0 = N->getOperand(0); | ||||
12131 | |||||
12132 | // All elements are extracted from the same vector. | ||||
12133 | if (!Vector) { | ||||
12134 | Vector = N0.getNode(); | ||||
12135 | // Check that the type of EXTRACT_VECTOR_ELT matches the type of | ||||
12136 | // BUILD_VECTOR. | ||||
12137 | if (VT.getVectorElementType() != | ||||
12138 | N0.getValueType().getVectorElementType()) | ||||
12139 | break; | ||||
12140 | } else if (Vector != N0.getNode()) { | ||||
12141 | Odd = false; | ||||
12142 | Even = false; | ||||
12143 | break; | ||||
12144 | } | ||||
12145 | |||||
12146 | // Extracted values are either at Even indices <0,2,4,...> or at Odd | ||||
12147 | // indices <1,3,5,...>. | ||||
12148 | uint64_t Val = N->getConstantOperandVal(1); | ||||
12149 | if (Val == 2 * i) { | ||||
12150 | Even = true; | ||||
12151 | continue; | ||||
12152 | } | ||||
12153 | if (Val - 1 == 2 * i) { | ||||
12154 | Odd = true; | ||||
12155 | continue; | ||||
12156 | } | ||||
12157 | |||||
12158 | // Something does not match: abort. | ||||
12159 | Odd = false; | ||||
12160 | Even = false; | ||||
12161 | break; | ||||
12162 | } | ||||
12163 | if (Even || Odd) { | ||||
12164 | SDValue LHS = | ||||
12165 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0), | ||||
12166 | DAG.getConstant(0, dl, MVT::i64)); | ||||
12167 | SDValue RHS = | ||||
12168 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0), | ||||
12169 | DAG.getConstant(NumElts, dl, MVT::i64)); | ||||
12170 | |||||
12171 | if (Even && !Odd) | ||||
12172 | return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), LHS, | ||||
12173 | RHS); | ||||
12174 | if (Odd && !Even) | ||||
12175 | return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), LHS, | ||||
12176 | RHS); | ||||
12177 | } | ||||
12178 | } | ||||
12179 | |||||
12180 | // Use DUP for non-constant splats. For f32 constant splats, reduce to | ||||
12181 | // i32 and try again. | ||||
12182 | if (usesOnlyOneValue) { | ||||
12183 | if (!isConstant) { | ||||
12184 | if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | ||||
12185 | Value.getValueType() != VT) { | ||||
12186 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: use DUP for non-constant splats\n" ; } } while (false) | ||||
12187 | dbgs() << "LowerBUILD_VECTOR: use DUP for non-constant splats\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: use DUP for non-constant splats\n" ; } } while (false); | ||||
12188 | return DAG.getNode(AArch64ISD::DUP, dl, VT, Value); | ||||
12189 | } | ||||
12190 | |||||
12191 | // This is actually a DUPLANExx operation, which keeps everything vectory. | ||||
12192 | |||||
12193 | SDValue Lane = Value.getOperand(1); | ||||
12194 | Value = Value.getOperand(0); | ||||
12195 | if (Value.getValueSizeInBits() == 64) { | ||||
12196 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, " "widening it\n"; } } while (false) | ||||
12197 | dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, " "widening it\n"; } } while (false) | ||||
12198 | "widening it\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, " "widening it\n"; } } while (false); | ||||
12199 | Value = WidenVector(Value, DAG); | ||||
12200 | } | ||||
12201 | |||||
12202 | unsigned Opcode = getDUPLANEOp(VT.getVectorElementType()); | ||||
12203 | return DAG.getNode(Opcode, dl, VT, Value, Lane); | ||||
12204 | } | ||||
12205 | |||||
12206 | if (VT.getVectorElementType().isFloatingPoint()) { | ||||
12207 | SmallVector<SDValue, 8> Ops; | ||||
12208 | EVT EltTy = VT.getVectorElementType(); | ||||
12209 | assert ((EltTy == MVT::f16 || EltTy == MVT::bf16 || EltTy == MVT::f32 ||(static_cast <bool> ((EltTy == MVT::f16 || EltTy == MVT ::bf16 || EltTy == MVT::f32 || EltTy == MVT::f64) && "Unsupported floating-point vector type" ) ? void (0) : __assert_fail ("(EltTy == MVT::f16 || EltTy == MVT::bf16 || EltTy == MVT::f32 || EltTy == MVT::f64) && \"Unsupported floating-point vector type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12210, __extension__ __PRETTY_FUNCTION__)) | ||||
12210 | EltTy == MVT::f64) && "Unsupported floating-point vector type")(static_cast <bool> ((EltTy == MVT::f16 || EltTy == MVT ::bf16 || EltTy == MVT::f32 || EltTy == MVT::f64) && "Unsupported floating-point vector type" ) ? void (0) : __assert_fail ("(EltTy == MVT::f16 || EltTy == MVT::bf16 || EltTy == MVT::f32 || EltTy == MVT::f64) && \"Unsupported floating-point vector type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12210, __extension__ __PRETTY_FUNCTION__)); | ||||
12211 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int " "BITCASTS, and try again\n"; } } while (false) | ||||
12212 | dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int " "BITCASTS, and try again\n"; } } while (false) | ||||
12213 | "BITCASTS, and try again\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int " "BITCASTS, and try again\n"; } } while (false); | ||||
12214 | MVT NewType = MVT::getIntegerVT(EltTy.getSizeInBits()); | ||||
12215 | for (unsigned i = 0; i < NumElts; ++i) | ||||
12216 | Ops.push_back(DAG.getNode(ISD::BITCAST, dl, NewType, Op.getOperand(i))); | ||||
12217 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), NewType, NumElts); | ||||
12218 | SDValue Val = DAG.getBuildVector(VecVT, dl, Ops); | ||||
12219 | LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: trying to lower new vector: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: trying to lower new vector: " ; Val.dump();; } } while (false) | ||||
12220 | Val.dump();)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: trying to lower new vector: " ; Val.dump();; } } while (false); | ||||
12221 | Val = LowerBUILD_VECTOR(Val, DAG); | ||||
12222 | if (Val.getNode()) | ||||
12223 | return DAG.getNode(ISD::BITCAST, dl, VT, Val); | ||||
12224 | } | ||||
12225 | } | ||||
12226 | |||||
12227 | // If we need to insert a small number of different non-constant elements and | ||||
12228 | // the vector width is sufficiently large, prefer using DUP with the common | ||||
12229 | // value and INSERT_VECTOR_ELT for the different lanes. If DUP is preferred, | ||||
12230 | // skip the constant lane handling below. | ||||
12231 | bool PreferDUPAndInsert = | ||||
12232 | !isConstant && NumDifferentLanes >= 1 && | ||||
12233 | NumDifferentLanes < ((NumElts - NumUndefLanes) / 2) && | ||||
12234 | NumDifferentLanes >= NumConstantLanes; | ||||
12235 | |||||
12236 | // If there was only one constant value used and for more than one lane, | ||||
12237 | // start by splatting that value, then replace the non-constant lanes. This | ||||
12238 | // is better than the default, which will perform a separate initialization | ||||
12239 | // for each lane. | ||||
12240 | if (!PreferDUPAndInsert && NumConstantLanes > 0 && usesOnlyOneConstantValue) { | ||||
12241 | // Firstly, try to materialize the splat constant. | ||||
12242 | SDValue Vec = DAG.getSplatBuildVector(VT, dl, ConstantValue), | ||||
12243 | Val = ConstantBuildVector(Vec, DAG); | ||||
12244 | if (!Val) { | ||||
12245 | // Otherwise, materialize the constant and splat it. | ||||
12246 | Val = DAG.getNode(AArch64ISD::DUP, dl, VT, ConstantValue); | ||||
12247 | DAG.ReplaceAllUsesWith(Vec.getNode(), &Val); | ||||
12248 | } | ||||
12249 | |||||
12250 | // Now insert the non-constant lanes. | ||||
12251 | for (unsigned i = 0; i < NumElts; ++i) { | ||||
12252 | SDValue V = Op.getOperand(i); | ||||
12253 | SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64); | ||||
12254 | if (!isIntOrFPConstant(V)) | ||||
12255 | // Note that type legalization likely mucked about with the VT of the | ||||
12256 | // source operand, so we may have to convert it here before inserting. | ||||
12257 | Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, V, LaneIdx); | ||||
12258 | } | ||||
12259 | return Val; | ||||
12260 | } | ||||
12261 | |||||
12262 | // This will generate a load from the constant pool. | ||||
12263 | if (isConstant) { | ||||
12264 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default " "expansion\n"; } } while (false) | ||||
12265 | dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default " "expansion\n"; } } while (false) | ||||
12266 | "expansion\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default " "expansion\n"; } } while (false); | ||||
12267 | return SDValue(); | ||||
12268 | } | ||||
12269 | |||||
12270 | // Detect patterns of a0,a1,a2,a3,b0,b1,b2,b3,c0,c1,c2,c3,d0,d1,d2,d3 from | ||||
12271 | // v4i32s. This is really a truncate, which we can construct out of (legal) | ||||
12272 | // concats and truncate nodes. | ||||
12273 | if (SDValue M = ReconstructTruncateFromBuildVector(Op, DAG)) | ||||
12274 | return M; | ||||
12275 | |||||
12276 | // Empirical tests suggest this is rarely worth it for vectors of length <= 2. | ||||
12277 | if (NumElts >= 4) { | ||||
12278 | if (SDValue shuffle = ReconstructShuffle(Op, DAG)) | ||||
12279 | return shuffle; | ||||
12280 | } | ||||
12281 | |||||
12282 | if (PreferDUPAndInsert) { | ||||
12283 | // First, build a constant vector with the common element. | ||||
12284 | SmallVector<SDValue, 8> Ops(NumElts, Value); | ||||
12285 | SDValue NewVector = LowerBUILD_VECTOR(DAG.getBuildVector(VT, dl, Ops), DAG); | ||||
12286 | // Next, insert the elements that do not match the common value. | ||||
12287 | for (unsigned I = 0; I < NumElts; ++I) | ||||
12288 | if (Op.getOperand(I) != Value) | ||||
12289 | NewVector = | ||||
12290 | DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, NewVector, | ||||
12291 | Op.getOperand(I), DAG.getConstant(I, dl, MVT::i64)); | ||||
12292 | |||||
12293 | return NewVector; | ||||
12294 | } | ||||
12295 | |||||
12296 | // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we | ||||
12297 | // know the default expansion would otherwise fall back on something even | ||||
12298 | // worse. For a vector with one or two non-undef values, that's | ||||
12299 | // scalar_to_vector for the elements followed by a shuffle (provided the | ||||
12300 | // shuffle is valid for the target) and materialization element by element | ||||
12301 | // on the stack followed by a load for everything else. | ||||
12302 | if (!isConstant && !usesOnlyOneValue) { | ||||
12303 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence " "of INSERT_VECTOR_ELT\n"; } } while (false) | ||||
12304 | dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence " "of INSERT_VECTOR_ELT\n"; } } while (false) | ||||
12305 | "of INSERT_VECTOR_ELT\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence " "of INSERT_VECTOR_ELT\n"; } } while (false); | ||||
12306 | |||||
12307 | SDValue Vec = DAG.getUNDEF(VT); | ||||
12308 | SDValue Op0 = Op.getOperand(0); | ||||
12309 | unsigned i = 0; | ||||
12310 | |||||
12311 | // Use SCALAR_TO_VECTOR for lane zero to | ||||
12312 | // a) Avoid a RMW dependency on the full vector register, and | ||||
12313 | // b) Allow the register coalescer to fold away the copy if the | ||||
12314 | // value is already in an S or D register, and we're forced to emit an | ||||
12315 | // INSERT_SUBREG that we can't fold anywhere. | ||||
12316 | // | ||||
12317 | // We also allow types like i8 and i16 which are illegal scalar but legal | ||||
12318 | // vector element types. After type-legalization the inserted value is | ||||
12319 | // extended (i32) and it is safe to cast them to the vector type by ignoring | ||||
12320 | // the upper bits of the lowest lane (e.g. v8i8, v4i16). | ||||
12321 | if (!Op0.isUndef()) { | ||||
12322 | LLVM_DEBUG(dbgs() << "Creating node for op0, it is not undefined:\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Creating node for op0, it is not undefined:\n" ; } } while (false); | ||||
12323 | Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op0); | ||||
12324 | ++i; | ||||
12325 | } | ||||
12326 | LLVM_DEBUG(if (i < NumElts) dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { if (i < NumElts) dbgs() << "Creating nodes for the other vector elements:\n" ;; } } while (false) | ||||
12327 | << "Creating nodes for the other vector elements:\n";)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { if (i < NumElts) dbgs() << "Creating nodes for the other vector elements:\n" ;; } } while (false); | ||||
12328 | for (; i < NumElts; ++i) { | ||||
12329 | SDValue V = Op.getOperand(i); | ||||
12330 | if (V.isUndef()) | ||||
12331 | continue; | ||||
12332 | SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64); | ||||
12333 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); | ||||
12334 | } | ||||
12335 | return Vec; | ||||
12336 | } | ||||
12337 | |||||
12338 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find " "better alternative\n"; } } while (false) | ||||
12339 | dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find " "better alternative\n"; } } while (false) | ||||
12340 | "better alternative\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find " "better alternative\n"; } } while (false); | ||||
12341 | return SDValue(); | ||||
12342 | } | ||||
12343 | |||||
12344 | SDValue AArch64TargetLowering::LowerCONCAT_VECTORS(SDValue Op, | ||||
12345 | SelectionDAG &DAG) const { | ||||
12346 | if (useSVEForFixedLengthVectorVT(Op.getValueType(), | ||||
12347 | Subtarget->forceStreamingCompatibleSVE())) | ||||
12348 | return LowerFixedLengthConcatVectorsToSVE(Op, DAG); | ||||
12349 | |||||
12350 | assert(Op.getValueType().isScalableVector() &&(static_cast <bool> (Op.getValueType().isScalableVector () && isTypeLegal(Op.getValueType()) && "Expected legal scalable vector type!" ) ? void (0) : __assert_fail ("Op.getValueType().isScalableVector() && isTypeLegal(Op.getValueType()) && \"Expected legal scalable vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12352, __extension__ __PRETTY_FUNCTION__)) | ||||
12351 | isTypeLegal(Op.getValueType()) &&(static_cast <bool> (Op.getValueType().isScalableVector () && isTypeLegal(Op.getValueType()) && "Expected legal scalable vector type!" ) ? void (0) : __assert_fail ("Op.getValueType().isScalableVector() && isTypeLegal(Op.getValueType()) && \"Expected legal scalable vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12352, __extension__ __PRETTY_FUNCTION__)) | ||||
12352 | "Expected legal scalable vector type!")(static_cast <bool> (Op.getValueType().isScalableVector () && isTypeLegal(Op.getValueType()) && "Expected legal scalable vector type!" ) ? void (0) : __assert_fail ("Op.getValueType().isScalableVector() && isTypeLegal(Op.getValueType()) && \"Expected legal scalable vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12352, __extension__ __PRETTY_FUNCTION__)); | ||||
12353 | |||||
12354 | if (isTypeLegal(Op.getOperand(0).getValueType())) { | ||||
12355 | unsigned NumOperands = Op->getNumOperands(); | ||||
12356 | assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&(static_cast <bool> (NumOperands > 1 && isPowerOf2_32 (NumOperands) && "Unexpected number of operands in CONCAT_VECTORS" ) ? void (0) : __assert_fail ("NumOperands > 1 && isPowerOf2_32(NumOperands) && \"Unexpected number of operands in CONCAT_VECTORS\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12357, __extension__ __PRETTY_FUNCTION__)) | ||||
12357 | "Unexpected number of operands in CONCAT_VECTORS")(static_cast <bool> (NumOperands > 1 && isPowerOf2_32 (NumOperands) && "Unexpected number of operands in CONCAT_VECTORS" ) ? void (0) : __assert_fail ("NumOperands > 1 && isPowerOf2_32(NumOperands) && \"Unexpected number of operands in CONCAT_VECTORS\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12357, __extension__ __PRETTY_FUNCTION__)); | ||||
12358 | |||||
12359 | if (NumOperands == 2) | ||||
12360 | return Op; | ||||
12361 | |||||
12362 | // Concat each pair of subvectors and pack into the lower half of the array. | ||||
12363 | SmallVector<SDValue> ConcatOps(Op->op_begin(), Op->op_end()); | ||||
12364 | while (ConcatOps.size() > 1) { | ||||
12365 | for (unsigned I = 0, E = ConcatOps.size(); I != E; I += 2) { | ||||
12366 | SDValue V1 = ConcatOps[I]; | ||||
12367 | SDValue V2 = ConcatOps[I + 1]; | ||||
12368 | EVT SubVT = V1.getValueType(); | ||||
12369 | EVT PairVT = SubVT.getDoubleNumVectorElementsVT(*DAG.getContext()); | ||||
12370 | ConcatOps[I / 2] = | ||||
12371 | DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), PairVT, V1, V2); | ||||
12372 | } | ||||
12373 | ConcatOps.resize(ConcatOps.size() / 2); | ||||
12374 | } | ||||
12375 | return ConcatOps[0]; | ||||
12376 | } | ||||
12377 | |||||
12378 | return SDValue(); | ||||
12379 | } | ||||
12380 | |||||
12381 | SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, | ||||
12382 | SelectionDAG &DAG) const { | ||||
12383 | assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!")(static_cast <bool> (Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!") ? void (0) : __assert_fail ("Op.getOpcode() == ISD::INSERT_VECTOR_ELT && \"Unknown opcode!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12383, __extension__ __PRETTY_FUNCTION__)); | ||||
12384 | |||||
12385 | if (useSVEForFixedLengthVectorVT(Op.getValueType(), | ||||
12386 | Subtarget->forceStreamingCompatibleSVE())) | ||||
12387 | return LowerFixedLengthInsertVectorElt(Op, DAG); | ||||
12388 | |||||
12389 | // Check for non-constant or out of range lane. | ||||
12390 | EVT VT = Op.getOperand(0).getValueType(); | ||||
12391 | |||||
12392 | if (VT.getScalarType() == MVT::i1) { | ||||
12393 | EVT VectorVT = getPromotedVTForPredicate(VT); | ||||
12394 | SDLoc DL(Op); | ||||
12395 | SDValue ExtendedVector = | ||||
12396 | DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, VectorVT); | ||||
12397 | SDValue ExtendedValue = | ||||
12398 | DAG.getAnyExtOrTrunc(Op.getOperand(1), DL, | ||||
12399 | VectorVT.getScalarType().getSizeInBits() < 32 | ||||
12400 | ? MVT::i32 | ||||
12401 | : VectorVT.getScalarType()); | ||||
12402 | ExtendedVector = | ||||
12403 | DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VectorVT, ExtendedVector, | ||||
12404 | ExtendedValue, Op.getOperand(2)); | ||||
12405 | return DAG.getAnyExtOrTrunc(ExtendedVector, DL, VT); | ||||
12406 | } | ||||
12407 | |||||
12408 | ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(2)); | ||||
12409 | if (!CI || CI->getZExtValue() >= VT.getVectorNumElements()) | ||||
12410 | return SDValue(); | ||||
12411 | |||||
12412 | // Insertion/extraction are legal for V128 types. | ||||
12413 | if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 || | ||||
12414 | VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 || | ||||
12415 | VT == MVT::v8f16 || VT == MVT::v8bf16) | ||||
12416 | return Op; | ||||
12417 | |||||
12418 | if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 && | ||||
12419 | VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16 && | ||||
12420 | VT != MVT::v4bf16) | ||||
12421 | return SDValue(); | ||||
12422 | |||||
12423 | // For V64 types, we perform insertion by expanding the value | ||||
12424 | // to a V128 type and perform the insertion on that. | ||||
12425 | SDLoc DL(Op); | ||||
12426 | SDValue WideVec = WidenVector(Op.getOperand(0), DAG); | ||||
12427 | EVT WideTy = WideVec.getValueType(); | ||||
12428 | |||||
12429 | SDValue Node = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideTy, WideVec, | ||||
12430 | Op.getOperand(1), Op.getOperand(2)); | ||||
12431 | // Re-narrow the resultant vector. | ||||
12432 | return NarrowVector(Node, DAG); | ||||
12433 | } | ||||
12434 | |||||
12435 | SDValue | ||||
12436 | AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, | ||||
12437 | SelectionDAG &DAG) const { | ||||
12438 | assert(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unknown opcode!")(static_cast <bool> (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unknown opcode!") ? void (0) : __assert_fail ("Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && \"Unknown opcode!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12438, __extension__ __PRETTY_FUNCTION__)); | ||||
12439 | EVT VT = Op.getOperand(0).getValueType(); | ||||
12440 | |||||
12441 | if (VT.getScalarType() == MVT::i1) { | ||||
12442 | // We can't directly extract from an SVE predicate; extend it first. | ||||
12443 | // (This isn't the only possible lowering, but it's straightforward.) | ||||
12444 | EVT VectorVT = getPromotedVTForPredicate(VT); | ||||
12445 | SDLoc DL(Op); | ||||
12446 | SDValue Extend = | ||||
12447 | DAG.getNode(ISD::ANY_EXTEND, DL, VectorVT, Op.getOperand(0)); | ||||
12448 | MVT ExtractTy = VectorVT == MVT::nxv2i64 ? MVT::i64 : MVT::i32; | ||||
12449 | SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractTy, | ||||
12450 | Extend, Op.getOperand(1)); | ||||
12451 | return DAG.getAnyExtOrTrunc(Extract, DL, Op.getValueType()); | ||||
12452 | } | ||||
12453 | |||||
12454 | if (useSVEForFixedLengthVectorVT(VT, | ||||
12455 | Subtarget->forceStreamingCompatibleSVE())) | ||||
12456 | return LowerFixedLengthExtractVectorElt(Op, DAG); | ||||
12457 | |||||
12458 | // Check for non-constant or out of range lane. | ||||
12459 | ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(1)); | ||||
12460 | if (!CI || CI->getZExtValue() >= VT.getVectorNumElements()) | ||||
12461 | return SDValue(); | ||||
12462 | |||||
12463 | // Insertion/extraction are legal for V128 types. | ||||
12464 | if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 || | ||||
12465 | VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 || | ||||
12466 | VT == MVT::v8f16 || VT == MVT::v8bf16) | ||||
12467 | return Op; | ||||
12468 | |||||
12469 | if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 && | ||||
12470 | VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16 && | ||||
12471 | VT != MVT::v4bf16) | ||||
12472 | return SDValue(); | ||||
12473 | |||||
12474 | // For V64 types, we perform extraction by expanding the value | ||||
12475 | // to a V128 type and perform the extraction on that. | ||||
12476 | SDLoc DL(Op); | ||||
12477 | SDValue WideVec = WidenVector(Op.getOperand(0), DAG); | ||||
12478 | EVT WideTy = WideVec.getValueType(); | ||||
12479 | |||||
12480 | EVT ExtrTy = WideTy.getVectorElementType(); | ||||
12481 | if (ExtrTy == MVT::i16 || ExtrTy == MVT::i8) | ||||
12482 | ExtrTy = MVT::i32; | ||||
12483 | |||||
12484 | // For extractions, we just return the result directly. | ||||
12485 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtrTy, WideVec, | ||||
12486 | Op.getOperand(1)); | ||||
12487 | } | ||||
12488 | |||||
12489 | SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, | ||||
12490 | SelectionDAG &DAG) const { | ||||
12491 | assert(Op.getValueType().isFixedLengthVector() &&(static_cast <bool> (Op.getValueType().isFixedLengthVector () && "Only cases that extract a fixed length vector are supported!" ) ? void (0) : __assert_fail ("Op.getValueType().isFixedLengthVector() && \"Only cases that extract a fixed length vector are supported!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12492, __extension__ __PRETTY_FUNCTION__)) | ||||
12492 | "Only cases that extract a fixed length vector are supported!")(static_cast <bool> (Op.getValueType().isFixedLengthVector () && "Only cases that extract a fixed length vector are supported!" ) ? void (0) : __assert_fail ("Op.getValueType().isFixedLengthVector() && \"Only cases that extract a fixed length vector are supported!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12492, __extension__ __PRETTY_FUNCTION__)); | ||||
12493 | |||||
12494 | EVT InVT = Op.getOperand(0).getValueType(); | ||||
12495 | unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); | ||||
12496 | unsigned Size = Op.getValueSizeInBits(); | ||||
12497 | |||||
12498 | // If we don't have legal types yet, do nothing | ||||
12499 | if (!DAG.getTargetLoweringInfo().isTypeLegal(InVT)) | ||||
12500 | return SDValue(); | ||||
12501 | |||||
12502 | if (InVT.isScalableVector()) { | ||||
12503 | // This will be matched by custom code during ISelDAGToDAG. | ||||
12504 | if (Idx == 0 && isPackedVectorType(InVT, DAG)) | ||||
12505 | return Op; | ||||
12506 | |||||
12507 | return SDValue(); | ||||
12508 | } | ||||
12509 | |||||
12510 | // This will get lowered to an appropriate EXTRACT_SUBREG in ISel. | ||||
12511 | if (Idx == 0 && InVT.getSizeInBits() <= 128) | ||||
12512 | return Op; | ||||
12513 | |||||
12514 | // If this is extracting the upper 64-bits of a 128-bit vector, we match | ||||
12515 | // that directly. | ||||
12516 | if (Size == 64 && Idx * InVT.getScalarSizeInBits() == 64 && | ||||
12517 | InVT.getSizeInBits() == 128 && !Subtarget->forceStreamingCompatibleSVE()) | ||||
12518 | return Op; | ||||
12519 | |||||
12520 | if (useSVEForFixedLengthVectorVT(InVT, | ||||
12521 | Subtarget->forceStreamingCompatibleSVE())) { | ||||
12522 | SDLoc DL(Op); | ||||
12523 | |||||
12524 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT); | ||||
12525 | SDValue NewInVec = | ||||
12526 | convertToScalableVector(DAG, ContainerVT, Op.getOperand(0)); | ||||
12527 | |||||
12528 | SDValue Splice = DAG.getNode(ISD::VECTOR_SPLICE, DL, ContainerVT, NewInVec, | ||||
12529 | NewInVec, DAG.getConstant(Idx, DL, MVT::i64)); | ||||
12530 | return convertFromScalableVector(DAG, Op.getValueType(), Splice); | ||||
12531 | } | ||||
12532 | |||||
12533 | return SDValue(); | ||||
12534 | } | ||||
12535 | |||||
12536 | SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, | ||||
12537 | SelectionDAG &DAG) const { | ||||
12538 | assert(Op.getValueType().isScalableVector() &&(static_cast <bool> (Op.getValueType().isScalableVector () && "Only expect to lower inserts into scalable vectors!" ) ? void (0) : __assert_fail ("Op.getValueType().isScalableVector() && \"Only expect to lower inserts into scalable vectors!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12539, __extension__ __PRETTY_FUNCTION__)) | ||||
12539 | "Only expect to lower inserts into scalable vectors!")(static_cast <bool> (Op.getValueType().isScalableVector () && "Only expect to lower inserts into scalable vectors!" ) ? void (0) : __assert_fail ("Op.getValueType().isScalableVector() && \"Only expect to lower inserts into scalable vectors!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12539, __extension__ __PRETTY_FUNCTION__)); | ||||
12540 | |||||
12541 | EVT InVT = Op.getOperand(1).getValueType(); | ||||
12542 | unsigned Idx = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); | ||||
12543 | |||||
12544 | SDValue Vec0 = Op.getOperand(0); | ||||
12545 | SDValue Vec1 = Op.getOperand(1); | ||||
12546 | SDLoc DL(Op); | ||||
12547 | EVT VT = Op.getValueType(); | ||||
12548 | |||||
12549 | if (InVT.isScalableVector()) { | ||||
12550 | if (!isTypeLegal(VT)) | ||||
12551 | return SDValue(); | ||||
12552 | |||||
12553 | // Break down insert_subvector into simpler parts. | ||||
12554 | if (VT.getVectorElementType() == MVT::i1) { | ||||
12555 | unsigned NumElts = VT.getVectorMinNumElements(); | ||||
12556 | EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); | ||||
12557 | |||||
12558 | SDValue Lo, Hi; | ||||
12559 | Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, Vec0, | ||||
12560 | DAG.getVectorIdxConstant(0, DL)); | ||||
12561 | Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, Vec0, | ||||
12562 | DAG.getVectorIdxConstant(NumElts / 2, DL)); | ||||
12563 | if (Idx < (NumElts / 2)) { | ||||
12564 | SDValue NewLo = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, HalfVT, Lo, Vec1, | ||||
12565 | DAG.getVectorIdxConstant(Idx, DL)); | ||||
12566 | return DAG.getNode(AArch64ISD::UZP1, DL, VT, NewLo, Hi); | ||||
12567 | } else { | ||||
12568 | SDValue NewHi = | ||||
12569 | DAG.getNode(ISD::INSERT_SUBVECTOR, DL, HalfVT, Hi, Vec1, | ||||
12570 | DAG.getVectorIdxConstant(Idx - (NumElts / 2), DL)); | ||||
12571 | return DAG.getNode(AArch64ISD::UZP1, DL, VT, Lo, NewHi); | ||||
12572 | } | ||||
12573 | } | ||||
12574 | |||||
12575 | // Ensure the subvector is half the size of the main vector. | ||||
12576 | if (VT.getVectorElementCount() != (InVT.getVectorElementCount() * 2)) | ||||
12577 | return SDValue(); | ||||
12578 | |||||
12579 | // Here narrow and wide refers to the vector element types. After "casting" | ||||
12580 | // both vectors must have the same bit length and so because the subvector | ||||
12581 | // has fewer elements, those elements need to be bigger. | ||||
12582 | EVT NarrowVT = getPackedSVEVectorVT(VT.getVectorElementCount()); | ||||
12583 | EVT WideVT = getPackedSVEVectorVT(InVT.getVectorElementCount()); | ||||
12584 | |||||
12585 | // NOP cast operands to the largest legal vector of the same element count. | ||||
12586 | if (VT.isFloatingPoint()) { | ||||
12587 | Vec0 = getSVESafeBitCast(NarrowVT, Vec0, DAG); | ||||
12588 | Vec1 = getSVESafeBitCast(WideVT, Vec1, DAG); | ||||
12589 | } else { | ||||
12590 | // Legal integer vectors are already their largest so Vec0 is fine as is. | ||||
12591 | Vec1 = DAG.getNode(ISD::ANY_EXTEND, DL, WideVT, Vec1); | ||||
12592 | } | ||||
12593 | |||||
12594 | // To replace the top/bottom half of vector V with vector SubV we widen the | ||||
12595 | // preserved half of V, concatenate this to SubV (the order depending on the | ||||
12596 | // half being replaced) and then narrow the result. | ||||
12597 | SDValue Narrow; | ||||
12598 | if (Idx == 0) { | ||||
12599 | SDValue HiVec0 = DAG.getNode(AArch64ISD::UUNPKHI, DL, WideVT, Vec0); | ||||
12600 | Narrow = DAG.getNode(AArch64ISD::UZP1, DL, NarrowVT, Vec1, HiVec0); | ||||
12601 | } else { | ||||
12602 | assert(Idx == InVT.getVectorMinNumElements() &&(static_cast <bool> (Idx == InVT.getVectorMinNumElements () && "Invalid subvector index!") ? void (0) : __assert_fail ("Idx == InVT.getVectorMinNumElements() && \"Invalid subvector index!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12603, __extension__ __PRETTY_FUNCTION__)) | ||||
12603 | "Invalid subvector index!")(static_cast <bool> (Idx == InVT.getVectorMinNumElements () && "Invalid subvector index!") ? void (0) : __assert_fail ("Idx == InVT.getVectorMinNumElements() && \"Invalid subvector index!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12603, __extension__ __PRETTY_FUNCTION__)); | ||||
12604 | SDValue LoVec0 = DAG.getNode(AArch64ISD::UUNPKLO, DL, WideVT, Vec0); | ||||
12605 | Narrow = DAG.getNode(AArch64ISD::UZP1, DL, NarrowVT, LoVec0, Vec1); | ||||
12606 | } | ||||
12607 | |||||
12608 | return getSVESafeBitCast(VT, Narrow, DAG); | ||||
12609 | } | ||||
12610 | |||||
12611 | if (Idx == 0 && isPackedVectorType(VT, DAG)) { | ||||
12612 | // This will be matched by custom code during ISelDAGToDAG. | ||||
12613 | if (Vec0.isUndef()) | ||||
12614 | return Op; | ||||
12615 | |||||
12616 | std::optional<unsigned> PredPattern = | ||||
12617 | getSVEPredPatternFromNumElements(InVT.getVectorNumElements()); | ||||
12618 | auto PredTy = VT.changeVectorElementType(MVT::i1); | ||||
12619 | SDValue PTrue = getPTrue(DAG, DL, PredTy, *PredPattern); | ||||
12620 | SDValue ScalableVec1 = convertToScalableVector(DAG, VT, Vec1); | ||||
12621 | return DAG.getNode(ISD::VSELECT, DL, VT, PTrue, ScalableVec1, Vec0); | ||||
12622 | } | ||||
12623 | |||||
12624 | return SDValue(); | ||||
12625 | } | ||||
12626 | |||||
12627 | static bool isPow2Splat(SDValue Op, uint64_t &SplatVal, bool &Negated) { | ||||
12628 | if (Op.getOpcode() != AArch64ISD::DUP && | ||||
12629 | Op.getOpcode() != ISD::SPLAT_VECTOR && | ||||
12630 | Op.getOpcode() != ISD::BUILD_VECTOR) | ||||
12631 | return false; | ||||
12632 | |||||
12633 | if (Op.getOpcode() == ISD::BUILD_VECTOR && | ||||
12634 | !isAllConstantBuildVector(Op, SplatVal)) | ||||
12635 | return false; | ||||
12636 | |||||
12637 | if (Op.getOpcode() != ISD::BUILD_VECTOR && | ||||
12638 | !isa<ConstantSDNode>(Op->getOperand(0))) | ||||
12639 | return false; | ||||
12640 | |||||
12641 | SplatVal = Op->getConstantOperandVal(0); | ||||
12642 | if (Op.getValueType().getVectorElementType() != MVT::i64) | ||||
12643 | SplatVal = (int32_t)SplatVal; | ||||
12644 | |||||
12645 | Negated = false; | ||||
12646 | if (isPowerOf2_64(SplatVal)) | ||||
12647 | return true; | ||||
12648 | |||||
12649 | Negated = true; | ||||
12650 | if (isPowerOf2_64(-SplatVal)) { | ||||
12651 | SplatVal = -SplatVal; | ||||
12652 | return true; | ||||
12653 | } | ||||
12654 | |||||
12655 | return false; | ||||
12656 | } | ||||
12657 | |||||
12658 | SDValue AArch64TargetLowering::LowerDIV(SDValue Op, SelectionDAG &DAG) const { | ||||
12659 | EVT VT = Op.getValueType(); | ||||
12660 | SDLoc dl(Op); | ||||
12661 | |||||
12662 | if (useSVEForFixedLengthVectorVT(VT, /*OverrideNEON=*/true)) | ||||
12663 | return LowerFixedLengthVectorIntDivideToSVE(Op, DAG); | ||||
12664 | |||||
12665 | assert(VT.isScalableVector() && "Expected a scalable vector.")(static_cast <bool> (VT.isScalableVector() && "Expected a scalable vector." ) ? void (0) : __assert_fail ("VT.isScalableVector() && \"Expected a scalable vector.\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12665, __extension__ __PRETTY_FUNCTION__)); | ||||
12666 | |||||
12667 | bool Signed = Op.getOpcode() == ISD::SDIV; | ||||
12668 | unsigned PredOpcode = Signed ? AArch64ISD::SDIV_PRED : AArch64ISD::UDIV_PRED; | ||||
12669 | |||||
12670 | bool Negated; | ||||
12671 | uint64_t SplatVal; | ||||
12672 | if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated)) { | ||||
12673 | SDValue Pg = getPredicateForScalableVector(DAG, dl, VT); | ||||
12674 | SDValue Res = | ||||
12675 | DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, dl, VT, Pg, Op->getOperand(0), | ||||
12676 | DAG.getTargetConstant(Log2_64(SplatVal), dl, MVT::i32)); | ||||
12677 | if (Negated) | ||||
12678 | Res = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), Res); | ||||
12679 | |||||
12680 | return Res; | ||||
12681 | } | ||||
12682 | |||||
12683 | if (VT == MVT::nxv4i32 || VT == MVT::nxv2i64) | ||||
12684 | return LowerToPredicatedOp(Op, DAG, PredOpcode); | ||||
12685 | |||||
12686 | // SVE doesn't have i8 and i16 DIV operations; widen them to 32-bit | ||||
12687 | // operations, and truncate the result. | ||||
12688 | EVT WidenedVT; | ||||
12689 | if (VT == MVT::nxv16i8) | ||||
12690 | WidenedVT = MVT::nxv8i16; | ||||
12691 | else if (VT == MVT::nxv8i16) | ||||
12692 | WidenedVT = MVT::nxv4i32; | ||||
12693 | else | ||||
12694 | llvm_unreachable("Unexpected Custom DIV operation")::llvm::llvm_unreachable_internal("Unexpected Custom DIV operation" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12694); | ||||
12695 | |||||
12696 | unsigned UnpkLo = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO; | ||||
12697 | unsigned UnpkHi = Signed ? AArch64ISD::SUNPKHI : AArch64ISD::UUNPKHI; | ||||
12698 | SDValue Op0Lo = DAG.getNode(UnpkLo, dl, WidenedVT, Op.getOperand(0)); | ||||
12699 | SDValue Op1Lo = DAG.getNode(UnpkLo, dl, WidenedVT, Op.getOperand(1)); | ||||
12700 | SDValue Op0Hi = DAG.getNode(UnpkHi, dl, WidenedVT, Op.getOperand(0)); | ||||
12701 | SDValue Op1Hi = DAG.getNode(UnpkHi, dl, WidenedVT, Op.getOperand(1)); | ||||
12702 | SDValue ResultLo = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0Lo, Op1Lo); | ||||
12703 | SDValue ResultHi = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0Hi, Op1Hi); | ||||
12704 | return DAG.getNode(AArch64ISD::UZP1, dl, VT, ResultLo, ResultHi); | ||||
12705 | } | ||||
12706 | |||||
12707 | bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { | ||||
12708 | // Currently no fixed length shuffles that require SVE are legal. | ||||
12709 | if (useSVEForFixedLengthVectorVT(VT, | ||||
12710 | Subtarget->forceStreamingCompatibleSVE())) | ||||
12711 | return false; | ||||
12712 | |||||
12713 | if (VT.getVectorNumElements() == 4 && | ||||
12714 | (VT.is128BitVector() || VT.is64BitVector())) { | ||||
12715 | unsigned Cost = getPerfectShuffleCost(M); | ||||
12716 | if (Cost <= 1) | ||||
12717 | return true; | ||||
12718 | } | ||||
12719 | |||||
12720 | bool DummyBool; | ||||
12721 | int DummyInt; | ||||
12722 | unsigned DummyUnsigned; | ||||
12723 | |||||
12724 | return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) || isREVMask(M, VT, 64) || | ||||
12725 | isREVMask(M, VT, 32) || isREVMask(M, VT, 16) || | ||||
12726 | isEXTMask(M, VT, DummyBool, DummyUnsigned) || | ||||
12727 | // isTBLMask(M, VT) || // FIXME: Port TBL support from ARM. | ||||
12728 | isTRNMask(M, VT, DummyUnsigned) || isUZPMask(M, VT, DummyUnsigned) || | ||||
12729 | isZIPMask(M, VT, DummyUnsigned) || | ||||
12730 | isTRN_v_undef_Mask(M, VT, DummyUnsigned) || | ||||
12731 | isUZP_v_undef_Mask(M, VT, DummyUnsigned) || | ||||
12732 | isZIP_v_undef_Mask(M, VT, DummyUnsigned) || | ||||
12733 | isINSMask(M, VT.getVectorNumElements(), DummyBool, DummyInt) || | ||||
12734 | isConcatMask(M, VT, VT.getSizeInBits() == 128)); | ||||
12735 | } | ||||
12736 | |||||
12737 | bool AArch64TargetLowering::isVectorClearMaskLegal(ArrayRef<int> M, | ||||
12738 | EVT VT) const { | ||||
12739 | // Just delegate to the generic legality, clear masks aren't special. | ||||
12740 | return isShuffleMaskLegal(M, VT); | ||||
12741 | } | ||||
12742 | |||||
12743 | /// getVShiftImm - Check if this is a valid build_vector for the immediate | ||||
12744 | /// operand of a vector shift operation, where all the elements of the | ||||
12745 | /// build_vector must have the same constant integer value. | ||||
12746 | static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { | ||||
12747 | // Ignore bit_converts. | ||||
12748 | while (Op.getOpcode() == ISD::BITCAST) | ||||
12749 | Op = Op.getOperand(0); | ||||
12750 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); | ||||
12751 | APInt SplatBits, SplatUndef; | ||||
12752 | unsigned SplatBitSize; | ||||
12753 | bool HasAnyUndefs; | ||||
12754 | if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, | ||||
12755 | HasAnyUndefs, ElementBits) || | ||||
12756 | SplatBitSize > ElementBits) | ||||
12757 | return false; | ||||
12758 | Cnt = SplatBits.getSExtValue(); | ||||
12759 | return true; | ||||
12760 | } | ||||
12761 | |||||
12762 | /// isVShiftLImm - Check if this is a valid build_vector for the immediate | ||||
12763 | /// operand of a vector shift left operation. That value must be in the range: | ||||
12764 | /// 0 <= Value < ElementBits for a left shift; or | ||||
12765 | /// 0 <= Value <= ElementBits for a long left shift. | ||||
12766 | static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { | ||||
12767 | assert(VT.isVector() && "vector shift count is not a vector type")(static_cast <bool> (VT.isVector() && "vector shift count is not a vector type" ) ? void (0) : __assert_fail ("VT.isVector() && \"vector shift count is not a vector type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12767, __extension__ __PRETTY_FUNCTION__)); | ||||
12768 | int64_t ElementBits = VT.getScalarSizeInBits(); | ||||
12769 | if (!getVShiftImm(Op, ElementBits, Cnt)) | ||||
12770 | return false; | ||||
12771 | return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits); | ||||
12772 | } | ||||
12773 | |||||
12774 | /// isVShiftRImm - Check if this is a valid build_vector for the immediate | ||||
12775 | /// operand of a vector shift right operation. The value must be in the range: | ||||
12776 | /// 1 <= Value <= ElementBits for a right shift; or | ||||
12777 | static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt) { | ||||
12778 | assert(VT.isVector() && "vector shift count is not a vector type")(static_cast <bool> (VT.isVector() && "vector shift count is not a vector type" ) ? void (0) : __assert_fail ("VT.isVector() && \"vector shift count is not a vector type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12778, __extension__ __PRETTY_FUNCTION__)); | ||||
12779 | int64_t ElementBits = VT.getScalarSizeInBits(); | ||||
12780 | if (!getVShiftImm(Op, ElementBits, Cnt)) | ||||
12781 | return false; | ||||
12782 | return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits)); | ||||
12783 | } | ||||
12784 | |||||
12785 | SDValue AArch64TargetLowering::LowerTRUNCATE(SDValue Op, | ||||
12786 | SelectionDAG &DAG) const { | ||||
12787 | EVT VT = Op.getValueType(); | ||||
12788 | |||||
12789 | if (VT.getScalarType() == MVT::i1) { | ||||
12790 | // Lower i1 truncate to `(x & 1) != 0`. | ||||
12791 | SDLoc dl(Op); | ||||
12792 | EVT OpVT = Op.getOperand(0).getValueType(); | ||||
12793 | SDValue Zero = DAG.getConstant(0, dl, OpVT); | ||||
12794 | SDValue One = DAG.getConstant(1, dl, OpVT); | ||||
12795 | SDValue And = DAG.getNode(ISD::AND, dl, OpVT, Op.getOperand(0), One); | ||||
12796 | return DAG.getSetCC(dl, VT, And, Zero, ISD::SETNE); | ||||
12797 | } | ||||
12798 | |||||
12799 | if (!VT.isVector() || VT.isScalableVector()) | ||||
12800 | return SDValue(); | ||||
12801 | |||||
12802 | if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType(), | ||||
12803 | Subtarget->forceStreamingCompatibleSVE())) | ||||
12804 | return LowerFixedLengthVectorTruncateToSVE(Op, DAG); | ||||
12805 | |||||
12806 | return SDValue(); | ||||
12807 | } | ||||
12808 | |||||
12809 | SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op, | ||||
12810 | SelectionDAG &DAG) const { | ||||
12811 | EVT VT = Op.getValueType(); | ||||
12812 | SDLoc DL(Op); | ||||
12813 | int64_t Cnt; | ||||
12814 | |||||
12815 | if (!Op.getOperand(1).getValueType().isVector()) | ||||
12816 | return Op; | ||||
12817 | unsigned EltSize = VT.getScalarSizeInBits(); | ||||
12818 | |||||
12819 | switch (Op.getOpcode()) { | ||||
12820 | case ISD::SHL: | ||||
12821 | if (VT.isScalableVector() || | ||||
12822 | useSVEForFixedLengthVectorVT(VT, | ||||
12823 | Subtarget->forceStreamingCompatibleSVE())) | ||||
12824 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::SHL_PRED); | ||||
12825 | |||||
12826 | if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize) | ||||
12827 | return DAG.getNode(AArch64ISD::VSHL, DL, VT, Op.getOperand(0), | ||||
12828 | DAG.getConstant(Cnt, DL, MVT::i32)); | ||||
12829 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, | ||||
12830 | DAG.getConstant(Intrinsic::aarch64_neon_ushl, DL, | ||||
12831 | MVT::i32), | ||||
12832 | Op.getOperand(0), Op.getOperand(1)); | ||||
12833 | case ISD::SRA: | ||||
12834 | case ISD::SRL: | ||||
12835 | if (VT.isScalableVector() || | ||||
12836 | useSVEForFixedLengthVectorVT( | ||||
12837 | VT, Subtarget->forceStreamingCompatibleSVE())) { | ||||
12838 | unsigned Opc = Op.getOpcode() == ISD::SRA ? AArch64ISD::SRA_PRED | ||||
12839 | : AArch64ISD::SRL_PRED; | ||||
12840 | return LowerToPredicatedOp(Op, DAG, Opc); | ||||
12841 | } | ||||
12842 | |||||
12843 | // Right shift immediate | ||||
12844 | if (isVShiftRImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize) { | ||||
12845 | unsigned Opc = | ||||
12846 | (Op.getOpcode() == ISD::SRA) ? AArch64ISD::VASHR : AArch64ISD::VLSHR; | ||||
12847 | return DAG.getNode(Opc, DL, VT, Op.getOperand(0), | ||||
12848 | DAG.getConstant(Cnt, DL, MVT::i32)); | ||||
12849 | } | ||||
12850 | |||||
12851 | // Right shift register. Note, there is not a shift right register | ||||
12852 | // instruction, but the shift left register instruction takes a signed | ||||
12853 | // value, where negative numbers specify a right shift. | ||||
12854 | unsigned Opc = (Op.getOpcode() == ISD::SRA) ? Intrinsic::aarch64_neon_sshl | ||||
12855 | : Intrinsic::aarch64_neon_ushl; | ||||
12856 | // negate the shift amount | ||||
12857 | SDValue NegShift = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), | ||||
12858 | Op.getOperand(1)); | ||||
12859 | SDValue NegShiftLeft = | ||||
12860 | DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, | ||||
12861 | DAG.getConstant(Opc, DL, MVT::i32), Op.getOperand(0), | ||||
12862 | NegShift); | ||||
12863 | return NegShiftLeft; | ||||
12864 | } | ||||
12865 | |||||
12866 | llvm_unreachable("unexpected shift opcode")::llvm::llvm_unreachable_internal("unexpected shift opcode", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 12866); | ||||
12867 | } | ||||
12868 | |||||
12869 | static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS, | ||||
12870 | AArch64CC::CondCode CC, bool NoNans, EVT VT, | ||||
12871 | const SDLoc &dl, SelectionDAG &DAG) { | ||||
12872 | EVT SrcVT = LHS.getValueType(); | ||||
12873 | assert(VT.getSizeInBits() == SrcVT.getSizeInBits() &&(static_cast <bool> (VT.getSizeInBits() == SrcVT.getSizeInBits () && "function only supposed to emit natural comparisons" ) ? void (0) : __assert_fail ("VT.getSizeInBits() == SrcVT.getSizeInBits() && \"function only supposed to emit natural comparisons\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12874, __extension__ __PRETTY_FUNCTION__)) | ||||
12874 | "function only supposed to emit natural comparisons")(static_cast <bool> (VT.getSizeInBits() == SrcVT.getSizeInBits () && "function only supposed to emit natural comparisons" ) ? void (0) : __assert_fail ("VT.getSizeInBits() == SrcVT.getSizeInBits() && \"function only supposed to emit natural comparisons\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12874, __extension__ __PRETTY_FUNCTION__)); | ||||
12875 | |||||
12876 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode()); | ||||
12877 | APInt CnstBits(VT.getSizeInBits(), 0); | ||||
12878 | APInt UndefBits(VT.getSizeInBits(), 0); | ||||
12879 | bool IsCnst = BVN && resolveBuildVector(BVN, CnstBits, UndefBits); | ||||
12880 | bool IsZero = IsCnst && (CnstBits == 0); | ||||
12881 | |||||
12882 | if (SrcVT.getVectorElementType().isFloatingPoint()) { | ||||
12883 | switch (CC) { | ||||
12884 | default: | ||||
12885 | return SDValue(); | ||||
12886 | case AArch64CC::NE: { | ||||
12887 | SDValue Fcmeq; | ||||
12888 | if (IsZero) | ||||
12889 | Fcmeq = DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS); | ||||
12890 | else | ||||
12891 | Fcmeq = DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS); | ||||
12892 | return DAG.getNOT(dl, Fcmeq, VT); | ||||
12893 | } | ||||
12894 | case AArch64CC::EQ: | ||||
12895 | if (IsZero) | ||||
12896 | return DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS); | ||||
12897 | return DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS); | ||||
12898 | case AArch64CC::GE: | ||||
12899 | if (IsZero) | ||||
12900 | return DAG.getNode(AArch64ISD::FCMGEz, dl, VT, LHS); | ||||
12901 | return DAG.getNode(AArch64ISD::FCMGE, dl, VT, LHS, RHS); | ||||
12902 | case AArch64CC::GT: | ||||
12903 | if (IsZero) | ||||
12904 | return DAG.getNode(AArch64ISD::FCMGTz, dl, VT, LHS); | ||||
12905 | return DAG.getNode(AArch64ISD::FCMGT, dl, VT, LHS, RHS); | ||||
12906 | case AArch64CC::LE: | ||||
12907 | if (!NoNans) | ||||
12908 | return SDValue(); | ||||
12909 | // If we ignore NaNs then we can use to the LS implementation. | ||||
12910 | [[fallthrough]]; | ||||
12911 | case AArch64CC::LS: | ||||
12912 | if (IsZero) | ||||
12913 | return DAG.getNode(AArch64ISD::FCMLEz, dl, VT, LHS); | ||||
12914 | return DAG.getNode(AArch64ISD::FCMGE, dl, VT, RHS, LHS); | ||||
12915 | case AArch64CC::LT: | ||||
12916 | if (!NoNans) | ||||
12917 | return SDValue(); | ||||
12918 | // If we ignore NaNs then we can use to the MI implementation. | ||||
12919 | [[fallthrough]]; | ||||
12920 | case AArch64CC::MI: | ||||
12921 | if (IsZero) | ||||
12922 | return DAG.getNode(AArch64ISD::FCMLTz, dl, VT, LHS); | ||||
12923 | return DAG.getNode(AArch64ISD::FCMGT, dl, VT, RHS, LHS); | ||||
12924 | } | ||||
12925 | } | ||||
12926 | |||||
12927 | switch (CC) { | ||||
12928 | default: | ||||
12929 | return SDValue(); | ||||
12930 | case AArch64CC::NE: { | ||||
12931 | SDValue Cmeq; | ||||
12932 | if (IsZero) | ||||
12933 | Cmeq = DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS); | ||||
12934 | else | ||||
12935 | Cmeq = DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS); | ||||
12936 | return DAG.getNOT(dl, Cmeq, VT); | ||||
12937 | } | ||||
12938 | case AArch64CC::EQ: | ||||
12939 | if (IsZero) | ||||
12940 | return DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS); | ||||
12941 | return DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS); | ||||
12942 | case AArch64CC::GE: | ||||
12943 | if (IsZero) | ||||
12944 | return DAG.getNode(AArch64ISD::CMGEz, dl, VT, LHS); | ||||
12945 | return DAG.getNode(AArch64ISD::CMGE, dl, VT, LHS, RHS); | ||||
12946 | case AArch64CC::GT: | ||||
12947 | if (IsZero) | ||||
12948 | return DAG.getNode(AArch64ISD::CMGTz, dl, VT, LHS); | ||||
12949 | return DAG.getNode(AArch64ISD::CMGT, dl, VT, LHS, RHS); | ||||
12950 | case AArch64CC::LE: | ||||
12951 | if (IsZero) | ||||
12952 | return DAG.getNode(AArch64ISD::CMLEz, dl, VT, LHS); | ||||
12953 | return DAG.getNode(AArch64ISD::CMGE, dl, VT, RHS, LHS); | ||||
12954 | case AArch64CC::LS: | ||||
12955 | return DAG.getNode(AArch64ISD::CMHS, dl, VT, RHS, LHS); | ||||
12956 | case AArch64CC::LO: | ||||
12957 | return DAG.getNode(AArch64ISD::CMHI, dl, VT, RHS, LHS); | ||||
12958 | case AArch64CC::LT: | ||||
12959 | if (IsZero) | ||||
12960 | return DAG.getNode(AArch64ISD::CMLTz, dl, VT, LHS); | ||||
12961 | return DAG.getNode(AArch64ISD::CMGT, dl, VT, RHS, LHS); | ||||
12962 | case AArch64CC::HI: | ||||
12963 | return DAG.getNode(AArch64ISD::CMHI, dl, VT, LHS, RHS); | ||||
12964 | case AArch64CC::HS: | ||||
12965 | return DAG.getNode(AArch64ISD::CMHS, dl, VT, LHS, RHS); | ||||
12966 | } | ||||
12967 | } | ||||
12968 | |||||
12969 | SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op, | ||||
12970 | SelectionDAG &DAG) const { | ||||
12971 | if (Op.getValueType().isScalableVector()) | ||||
12972 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::SETCC_MERGE_ZERO); | ||||
12973 | |||||
12974 | if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType(), | ||||
12975 | Subtarget->forceStreamingCompatibleSVE())) | ||||
12976 | return LowerFixedLengthVectorSetccToSVE(Op, DAG); | ||||
12977 | |||||
12978 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); | ||||
12979 | SDValue LHS = Op.getOperand(0); | ||||
12980 | SDValue RHS = Op.getOperand(1); | ||||
12981 | EVT CmpVT = LHS.getValueType().changeVectorElementTypeToInteger(); | ||||
12982 | SDLoc dl(Op); | ||||
12983 | |||||
12984 | if (LHS.getValueType().getVectorElementType().isInteger()) { | ||||
12985 | assert(LHS.getValueType() == RHS.getValueType())(static_cast <bool> (LHS.getValueType() == RHS.getValueType ()) ? void (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType()" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 12985, __extension__ __PRETTY_FUNCTION__)); | ||||
12986 | AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC); | ||||
12987 | SDValue Cmp = | ||||
12988 | EmitVectorComparison(LHS, RHS, AArch64CC, false, CmpVT, dl, DAG); | ||||
12989 | return DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType()); | ||||
12990 | } | ||||
12991 | |||||
12992 | const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16(); | ||||
12993 | |||||
12994 | // Make v4f16 (only) fcmp operations utilise vector instructions | ||||
12995 | // v8f16 support will be a litle more complicated | ||||
12996 | if (!FullFP16 && LHS.getValueType().getVectorElementType() == MVT::f16) { | ||||
12997 | if (LHS.getValueType().getVectorNumElements() == 4) { | ||||
12998 | LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, LHS); | ||||
12999 | RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, RHS); | ||||
13000 | SDValue NewSetcc = DAG.getSetCC(dl, MVT::v4i16, LHS, RHS, CC); | ||||
13001 | DAG.ReplaceAllUsesWith(Op, NewSetcc); | ||||
13002 | CmpVT = MVT::v4i32; | ||||
13003 | } else | ||||
13004 | return SDValue(); | ||||
13005 | } | ||||
13006 | |||||
13007 | assert((!FullFP16 && LHS.getValueType().getVectorElementType() != MVT::f16) ||(static_cast <bool> ((!FullFP16 && LHS.getValueType ().getVectorElementType() != MVT::f16) || LHS.getValueType(). getVectorElementType() != MVT::f128) ? void (0) : __assert_fail ("(!FullFP16 && LHS.getValueType().getVectorElementType() != MVT::f16) || LHS.getValueType().getVectorElementType() != MVT::f128" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 13008, __extension__ __PRETTY_FUNCTION__)) | ||||
13008 | LHS.getValueType().getVectorElementType() != MVT::f128)(static_cast <bool> ((!FullFP16 && LHS.getValueType ().getVectorElementType() != MVT::f16) || LHS.getValueType(). getVectorElementType() != MVT::f128) ? void (0) : __assert_fail ("(!FullFP16 && LHS.getValueType().getVectorElementType() != MVT::f16) || LHS.getValueType().getVectorElementType() != MVT::f128" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 13008, __extension__ __PRETTY_FUNCTION__)); | ||||
13009 | |||||
13010 | // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally | ||||
13011 | // clean. Some of them require two branches to implement. | ||||
13012 | AArch64CC::CondCode CC1, CC2; | ||||
13013 | bool ShouldInvert; | ||||
13014 | changeVectorFPCCToAArch64CC(CC, CC1, CC2, ShouldInvert); | ||||
13015 | |||||
13016 | bool NoNaNs = getTargetMachine().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs(); | ||||
13017 | SDValue Cmp = | ||||
13018 | EmitVectorComparison(LHS, RHS, CC1, NoNaNs, CmpVT, dl, DAG); | ||||
13019 | if (!Cmp.getNode()) | ||||
13020 | return SDValue(); | ||||
13021 | |||||
13022 | if (CC2 != AArch64CC::AL) { | ||||
13023 | SDValue Cmp2 = | ||||
13024 | EmitVectorComparison(LHS, RHS, CC2, NoNaNs, CmpVT, dl, DAG); | ||||
13025 | if (!Cmp2.getNode()) | ||||
13026 | return SDValue(); | ||||
13027 | |||||
13028 | Cmp = DAG.getNode(ISD::OR, dl, CmpVT, Cmp, Cmp2); | ||||
13029 | } | ||||
13030 | |||||
13031 | Cmp = DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType()); | ||||
13032 | |||||
13033 | if (ShouldInvert) | ||||
13034 | Cmp = DAG.getNOT(dl, Cmp, Cmp.getValueType()); | ||||
13035 | |||||
13036 | return Cmp; | ||||
13037 | } | ||||
13038 | |||||
13039 | static SDValue getReductionSDNode(unsigned Op, SDLoc DL, SDValue ScalarOp, | ||||
13040 | SelectionDAG &DAG) { | ||||
13041 | SDValue VecOp = ScalarOp.getOperand(0); | ||||
13042 | auto Rdx = DAG.getNode(Op, DL, VecOp.getSimpleValueType(), VecOp); | ||||
13043 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarOp.getValueType(), Rdx, | ||||
13044 | DAG.getConstant(0, DL, MVT::i64)); | ||||
13045 | } | ||||
13046 | |||||
13047 | SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op, | ||||
13048 | SelectionDAG &DAG) const { | ||||
13049 | SDValue Src = Op.getOperand(0); | ||||
13050 | |||||
13051 | // Try to lower fixed length reductions to SVE. | ||||
13052 | EVT SrcVT = Src.getValueType(); | ||||
13053 | bool OverrideNEON = Subtarget->forceStreamingCompatibleSVE() || | ||||
13054 | Op.getOpcode() == ISD::VECREDUCE_AND || | ||||
13055 | Op.getOpcode() == ISD::VECREDUCE_OR || | ||||
13056 | Op.getOpcode() == ISD::VECREDUCE_XOR || | ||||
13057 | Op.getOpcode() == ISD::VECREDUCE_FADD || | ||||
13058 | (Op.getOpcode() != ISD::VECREDUCE_ADD && | ||||
13059 | SrcVT.getVectorElementType() == MVT::i64); | ||||
13060 | if (SrcVT.isScalableVector() || | ||||
13061 | useSVEForFixedLengthVectorVT( | ||||
13062 | SrcVT, OverrideNEON && Subtarget->useSVEForFixedLengthVectors())) { | ||||
13063 | |||||
13064 | if (SrcVT.getVectorElementType() == MVT::i1) | ||||
13065 | return LowerPredReductionToSVE(Op, DAG); | ||||
13066 | |||||
13067 | switch (Op.getOpcode()) { | ||||
13068 | case ISD::VECREDUCE_ADD: | ||||
13069 | return LowerReductionToSVE(AArch64ISD::UADDV_PRED, Op, DAG); | ||||
13070 | case ISD::VECREDUCE_AND: | ||||
13071 | return LowerReductionToSVE(AArch64ISD::ANDV_PRED, Op, DAG); | ||||
13072 | case ISD::VECREDUCE_OR: | ||||
13073 | return LowerReductionToSVE(AArch64ISD::ORV_PRED, Op, DAG); | ||||
13074 | case ISD::VECREDUCE_SMAX: | ||||
13075 | return LowerReductionToSVE(AArch64ISD::SMAXV_PRED, Op, DAG); | ||||
13076 | case ISD::VECREDUCE_SMIN: | ||||
13077 | return LowerReductionToSVE(AArch64ISD::SMINV_PRED, Op, DAG); | ||||
13078 | case ISD::VECREDUCE_UMAX: | ||||
13079 | return LowerReductionToSVE(AArch64ISD::UMAXV_PRED, Op, DAG); | ||||
13080 | case ISD::VECREDUCE_UMIN: | ||||
13081 | return LowerReductionToSVE(AArch64ISD::UMINV_PRED, Op, DAG); | ||||
13082 | case ISD::VECREDUCE_XOR: | ||||
13083 | return LowerReductionToSVE(AArch64ISD::EORV_PRED, Op, DAG); | ||||
13084 | case ISD::VECREDUCE_FADD: | ||||
13085 | return LowerReductionToSVE(AArch64ISD::FADDV_PRED, Op, DAG); | ||||
13086 | case ISD::VECREDUCE_FMAX: | ||||
13087 | return LowerReductionToSVE(AArch64ISD::FMAXNMV_PRED, Op, DAG); | ||||
13088 | case ISD::VECREDUCE_FMIN: | ||||
13089 | return LowerReductionToSVE(AArch64ISD::FMINNMV_PRED, Op, DAG); | ||||
13090 | default: | ||||
13091 | llvm_unreachable("Unhandled fixed length reduction")::llvm::llvm_unreachable_internal("Unhandled fixed length reduction" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 13091); | ||||
13092 | } | ||||
13093 | } | ||||
13094 | |||||
13095 | // Lower NEON reductions. | ||||
13096 | SDLoc dl(Op); | ||||
13097 | switch (Op.getOpcode()) { | ||||
13098 | case ISD::VECREDUCE_ADD: | ||||
13099 | return getReductionSDNode(AArch64ISD::UADDV, dl, Op, DAG); | ||||
13100 | case ISD::VECREDUCE_SMAX: | ||||
13101 | return getReductionSDNode(AArch64ISD::SMAXV, dl, Op, DAG); | ||||
13102 | case ISD::VECREDUCE_SMIN: | ||||
13103 | return getReductionSDNode(AArch64ISD::SMINV, dl, Op, DAG); | ||||
13104 | case ISD::VECREDUCE_UMAX: | ||||
13105 | return getReductionSDNode(AArch64ISD::UMAXV, dl, Op, DAG); | ||||
13106 | case ISD::VECREDUCE_UMIN: | ||||
13107 | return getReductionSDNode(AArch64ISD::UMINV, dl, Op, DAG); | ||||
13108 | case ISD::VECREDUCE_FMAX: { | ||||
13109 | return DAG.getNode( | ||||
13110 | ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(), | ||||
13111 | DAG.getConstant(Intrinsic::aarch64_neon_fmaxnmv, dl, MVT::i32), | ||||
13112 | Src); | ||||
13113 | } | ||||
13114 | case ISD::VECREDUCE_FMIN: { | ||||
13115 | return DAG.getNode( | ||||
13116 | ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(), | ||||
13117 | DAG.getConstant(Intrinsic::aarch64_neon_fminnmv, dl, MVT::i32), | ||||
13118 | Src); | ||||
13119 | } | ||||
13120 | default: | ||||
13121 | llvm_unreachable("Unhandled reduction")::llvm::llvm_unreachable_internal("Unhandled reduction", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 13121); | ||||
13122 | } | ||||
13123 | } | ||||
13124 | |||||
13125 | SDValue AArch64TargetLowering::LowerATOMIC_LOAD_SUB(SDValue Op, | ||||
13126 | SelectionDAG &DAG) const { | ||||
13127 | auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>(); | ||||
13128 | if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics()) | ||||
13129 | return SDValue(); | ||||
13130 | |||||
13131 | // LSE has an atomic load-add instruction, but not a load-sub. | ||||
13132 | SDLoc dl(Op); | ||||
13133 | MVT VT = Op.getSimpleValueType(); | ||||
13134 | SDValue RHS = Op.getOperand(2); | ||||
13135 | AtomicSDNode *AN = cast<AtomicSDNode>(Op.getNode()); | ||||
13136 | RHS = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), RHS); | ||||
13137 | return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, AN->getMemoryVT(), | ||||
13138 | Op.getOperand(0), Op.getOperand(1), RHS, | ||||
13139 | AN->getMemOperand()); | ||||
13140 | } | ||||
13141 | |||||
13142 | SDValue AArch64TargetLowering::LowerATOMIC_LOAD_AND(SDValue Op, | ||||
13143 | SelectionDAG &DAG) const { | ||||
13144 | auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>(); | ||||
13145 | if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics()) | ||||
13146 | return SDValue(); | ||||
13147 | |||||
13148 | // LSE has an atomic load-clear instruction, but not a load-and. | ||||
13149 | SDLoc dl(Op); | ||||
13150 | MVT VT = Op.getSimpleValueType(); | ||||
13151 | SDValue RHS = Op.getOperand(2); | ||||
13152 | AtomicSDNode *AN = cast<AtomicSDNode>(Op.getNode()); | ||||
13153 | RHS = DAG.getNode(ISD::XOR, dl, VT, DAG.getConstant(-1ULL, dl, VT), RHS); | ||||
13154 | return DAG.getAtomic(ISD::ATOMIC_LOAD_CLR, dl, AN->getMemoryVT(), | ||||
13155 | Op.getOperand(0), Op.getOperand(1), RHS, | ||||
13156 | AN->getMemOperand()); | ||||
13157 | } | ||||
13158 | |||||
13159 | SDValue AArch64TargetLowering::LowerWindowsDYNAMIC_STACKALLOC( | ||||
13160 | SDValue Op, SDValue Chain, SDValue &Size, SelectionDAG &DAG) const { | ||||
13161 | SDLoc dl(Op); | ||||
13162 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | ||||
13163 | SDValue Callee = DAG.getTargetExternalSymbol(Subtarget->getChkStkName(), | ||||
13164 | PtrVT, 0); | ||||
13165 | |||||
13166 | const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); | ||||
13167 | const uint32_t *Mask = TRI->getWindowsStackProbePreservedMask(); | ||||
13168 | if (Subtarget->hasCustomCallingConv()) | ||||
13169 | TRI->UpdateCustomCallPreservedMask(DAG.getMachineFunction(), &Mask); | ||||
13170 | |||||
13171 | Size = DAG.getNode(ISD::SRL, dl, MVT::i64, Size, | ||||
13172 | DAG.getConstant(4, dl, MVT::i64)); | ||||
13173 | Chain = DAG.getCopyToReg(Chain, dl, AArch64::X15, Size, SDValue()); | ||||
13174 | Chain = | ||||
13175 | DAG.getNode(AArch64ISD::CALL, dl, DAG.getVTList(MVT::Other, MVT::Glue), | ||||
13176 | Chain, Callee, DAG.getRegister(AArch64::X15, MVT::i64), | ||||
13177 | DAG.getRegisterMask(Mask), Chain.getValue(1)); | ||||
13178 | // To match the actual intent better, we should read the output from X15 here | ||||
13179 | // again (instead of potentially spilling it to the stack), but rereading Size | ||||
13180 | // from X15 here doesn't work at -O0, since it thinks that X15 is undefined | ||||
13181 | // here. | ||||
13182 | |||||
13183 | Size = DAG.getNode(ISD::SHL, dl, MVT::i64, Size, | ||||
13184 | DAG.getConstant(4, dl, MVT::i64)); | ||||
13185 | return Chain; | ||||
13186 | } | ||||
13187 | |||||
13188 | SDValue | ||||
13189 | AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, | ||||
13190 | SelectionDAG &DAG) const { | ||||
13191 | assert(Subtarget->isTargetWindows() &&(static_cast <bool> (Subtarget->isTargetWindows() && "Only Windows alloca probing supported") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"Only Windows alloca probing supported\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 13192, __extension__ __PRETTY_FUNCTION__)) | ||||
13192 | "Only Windows alloca probing supported")(static_cast <bool> (Subtarget->isTargetWindows() && "Only Windows alloca probing supported") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"Only Windows alloca probing supported\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 13192, __extension__ __PRETTY_FUNCTION__)); | ||||
13193 | SDLoc dl(Op); | ||||
13194 | // Get the inputs. | ||||
13195 | SDNode *Node = Op.getNode(); | ||||
13196 | SDValue Chain = Op.getOperand(0); | ||||
13197 | SDValue Size = Op.getOperand(1); | ||||
13198 | MaybeAlign Align = | ||||
13199 | cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue(); | ||||
13200 | EVT VT = Node->getValueType(0); | ||||
13201 | |||||
13202 | if (DAG.getMachineFunction().getFunction().hasFnAttribute( | ||||
13203 | "no-stack-arg-probe")) { | ||||
13204 | SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64); | ||||
13205 | Chain = SP.getValue(1); | ||||
13206 | SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size); | ||||
13207 | if (Align) | ||||
13208 | SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), | ||||
13209 | DAG.getConstant(-(uint64_t)Align->value(), dl, VT)); | ||||
13210 | Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP); | ||||
13211 | SDValue Ops[2] = {SP, Chain}; | ||||
13212 | return DAG.getMergeValues(Ops, dl); | ||||
13213 | } | ||||
13214 | |||||
13215 | Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl); | ||||
13216 | |||||
13217 | Chain = LowerWindowsDYNAMIC_STACKALLOC(Op, Chain, Size, DAG); | ||||
13218 | |||||
13219 | SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64); | ||||
13220 | Chain = SP.getValue(1); | ||||
13221 | SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size); | ||||
13222 | if (Align) | ||||
13223 | SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), | ||||
13224 | DAG.getConstant(-(uint64_t)Align->value(), dl, VT)); | ||||
13225 | Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP); | ||||
13226 | |||||
13227 | Chain = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), dl); | ||||
13228 | |||||
13229 | SDValue Ops[2] = {SP, Chain}; | ||||
13230 | return DAG.getMergeValues(Ops, dl); | ||||
13231 | } | ||||
13232 | |||||
13233 | SDValue AArch64TargetLowering::LowerVSCALE(SDValue Op, | ||||
13234 | SelectionDAG &DAG) const { | ||||
13235 | EVT VT = Op.getValueType(); | ||||
13236 | assert(VT != MVT::i64 && "Expected illegal VSCALE node")(static_cast <bool> (VT != MVT::i64 && "Expected illegal VSCALE node" ) ? void (0) : __assert_fail ("VT != MVT::i64 && \"Expected illegal VSCALE node\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 13236, __extension__ __PRETTY_FUNCTION__)); | ||||
13237 | |||||
13238 | SDLoc DL(Op); | ||||
13239 | APInt MulImm = cast<ConstantSDNode>(Op.getOperand(0))->getAPIntValue(); | ||||
13240 | return DAG.getZExtOrTrunc(DAG.getVScale(DL, MVT::i64, MulImm.sext(64)), DL, | ||||
13241 | VT); | ||||
13242 | } | ||||
13243 | |||||
13244 | /// Set the IntrinsicInfo for the `aarch64_sve_st<N>` intrinsics. | ||||
13245 | template <unsigned NumVecs> | ||||
13246 | static bool | ||||
13247 | setInfoSVEStN(const AArch64TargetLowering &TLI, const DataLayout &DL, | ||||
13248 | AArch64TargetLowering::IntrinsicInfo &Info, const CallInst &CI) { | ||||
13249 | Info.opc = ISD::INTRINSIC_VOID; | ||||
13250 | // Retrieve EC from first vector argument. | ||||
13251 | const EVT VT = TLI.getMemValueType(DL, CI.getArgOperand(0)->getType()); | ||||
13252 | ElementCount EC = VT.getVectorElementCount(); | ||||
13253 | #ifndef NDEBUG | ||||
13254 | // Check the assumption that all input vectors are the same type. | ||||
13255 | for (unsigned I = 0; I < NumVecs; ++I) | ||||
13256 | assert(VT == TLI.getMemValueType(DL, CI.getArgOperand(I)->getType()) &&(static_cast <bool> (VT == TLI.getMemValueType(DL, CI.getArgOperand (I)->getType()) && "Invalid type.") ? void (0) : __assert_fail ("VT == TLI.getMemValueType(DL, CI.getArgOperand(I)->getType()) && \"Invalid type.\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 13257, __extension__ __PRETTY_FUNCTION__)) | ||||
13257 | "Invalid type.")(static_cast <bool> (VT == TLI.getMemValueType(DL, CI.getArgOperand (I)->getType()) && "Invalid type.") ? void (0) : __assert_fail ("VT == TLI.getMemValueType(DL, CI.getArgOperand(I)->getType()) && \"Invalid type.\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 13257, __extension__ __PRETTY_FUNCTION__)); | ||||
13258 | #endif | ||||
13259 | // memVT is `NumVecs * VT`. | ||||
13260 | Info.memVT = EVT::getVectorVT(CI.getType()->getContext(), VT.getScalarType(), | ||||
13261 | EC * NumVecs); | ||||
13262 | Info.ptrVal = CI.getArgOperand(CI.arg_size() - 1); | ||||
13263 | Info.offset = 0; | ||||
13264 | Info.align.reset(); | ||||
13265 | Info.flags = MachineMemOperand::MOStore; | ||||
13266 | return true; | ||||
13267 | } | ||||
13268 | |||||
13269 | /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as | ||||
13270 | /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment | ||||
13271 | /// specified in the intrinsic calls. | ||||
13272 | bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, | ||||
13273 | const CallInst &I, | ||||
13274 | MachineFunction &MF, | ||||
13275 | unsigned Intrinsic) const { | ||||
13276 | auto &DL = I.getModule()->getDataLayout(); | ||||
13277 | switch (Intrinsic) { | ||||
13278 | case Intrinsic::aarch64_sve_st2: | ||||
13279 | return setInfoSVEStN<2>(*this, DL, Info, I); | ||||
13280 | case Intrinsic::aarch64_sve_st3: | ||||
13281 | return setInfoSVEStN<3>(*this, DL, Info, I); | ||||
13282 | case Intrinsic::aarch64_sve_st4: | ||||
13283 | return setInfoSVEStN<4>(*this, DL, Info, I); | ||||
13284 | case Intrinsic::aarch64_neon_ld2: | ||||
13285 | case Intrinsic::aarch64_neon_ld3: | ||||
13286 | case Intrinsic::aarch64_neon_ld4: | ||||
13287 | case Intrinsic::aarch64_neon_ld1x2: | ||||
13288 | case Intrinsic::aarch64_neon_ld1x3: | ||||
13289 | case Intrinsic::aarch64_neon_ld1x4: | ||||
13290 | case Intrinsic::aarch64_neon_ld2lane: | ||||
13291 | case Intrinsic::aarch64_neon_ld3lane: | ||||
13292 | case Intrinsic::aarch64_neon_ld4lane: | ||||
13293 | case Intrinsic::aarch64_neon_ld2r: | ||||
13294 | case Intrinsic::aarch64_neon_ld3r: | ||||
13295 | case Intrinsic::aarch64_neon_ld4r: { | ||||
13296 | Info.opc = ISD::INTRINSIC_W_CHAIN; | ||||
13297 | // Conservatively set memVT to the entire set of vectors loaded. | ||||
13298 | uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; | ||||
13299 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); | ||||
13300 | Info.ptrVal = I.getArgOperand(I.arg_size() - 1); | ||||
13301 | Info.offset = 0; | ||||
13302 | Info.align.reset(); | ||||
13303 | // volatile loads with NEON intrinsics not supported | ||||
13304 | Info.flags = MachineMemOperand::MOLoad; | ||||
13305 | return true; | ||||
13306 | } | ||||
13307 | case Intrinsic::aarch64_neon_st2: | ||||
13308 | case Intrinsic::aarch64_neon_st3: | ||||
13309 | case Intrinsic::aarch64_neon_st4: | ||||
13310 | case Intrinsic::aarch64_neon_st1x2: | ||||
13311 | case Intrinsic::aarch64_neon_st1x3: | ||||
13312 | case Intrinsic::aarch64_neon_st1x4: | ||||
13313 | case Intrinsic::aarch64_neon_st2lane: | ||||
13314 | case Intrinsic::aarch64_neon_st3lane: | ||||
13315 | case Intrinsic::aarch64_neon_st4lane: { | ||||
13316 | Info.opc = ISD::INTRINSIC_VOID; | ||||
13317 | // Conservatively set memVT to the entire set of vectors stored. | ||||
13318 | unsigned NumElts = 0; | ||||
13319 | for (const Value *Arg : I.args()) { | ||||
13320 | Type *ArgTy = Arg->getType(); | ||||
13321 | if (!ArgTy->isVectorTy()) | ||||
13322 | break; | ||||
13323 | NumElts += DL.getTypeSizeInBits(ArgTy) / 64; | ||||
13324 | } | ||||
13325 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); | ||||
13326 | Info.ptrVal = I.getArgOperand(I.arg_size() - 1); | ||||
13327 | Info.offset = 0; | ||||
13328 | Info.align.reset(); | ||||
13329 | // volatile stores with NEON intrinsics not supported | ||||
13330 | Info.flags = MachineMemOperand::MOStore; | ||||
13331 | return true; | ||||
13332 | } | ||||
13333 | case Intrinsic::aarch64_ldaxr: | ||||
13334 | case Intrinsic::aarch64_ldxr: { | ||||
13335 | Type *ValTy = I.getParamElementType(0); | ||||
13336 | Info.opc = ISD::INTRINSIC_W_CHAIN; | ||||
13337 | Info.memVT = MVT::getVT(ValTy); | ||||
13338 | Info.ptrVal = I.getArgOperand(0); | ||||
13339 | Info.offset = 0; | ||||
13340 | Info.align = DL.getABITypeAlign(ValTy); | ||||
13341 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; | ||||
13342 | return true; | ||||
13343 | } | ||||
13344 | case Intrinsic::aarch64_stlxr: | ||||
13345 | case Intrinsic::aarch64_stxr: { | ||||
13346 | Type *ValTy = I.getParamElementType(1); | ||||
13347 | Info.opc = ISD::INTRINSIC_W_CHAIN; | ||||
13348 | Info.memVT = MVT::getVT(ValTy); | ||||
13349 | Info.ptrVal = I.getArgOperand(1); | ||||
13350 | Info.offset = 0; | ||||
13351 | Info.align = DL.getABITypeAlign(ValTy); | ||||
13352 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; | ||||
13353 | return true; | ||||
13354 | } | ||||
13355 | case Intrinsic::aarch64_ldaxp: | ||||
13356 | case Intrinsic::aarch64_ldxp: | ||||
13357 | Info.opc = ISD::INTRINSIC_W_CHAIN; | ||||
13358 | Info.memVT = MVT::i128; | ||||
13359 | Info.ptrVal = I.getArgOperand(0); | ||||
13360 | Info.offset = 0; | ||||
13361 | Info.align = Align(16); | ||||
13362 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; | ||||
13363 | return true; | ||||
13364 | case Intrinsic::aarch64_stlxp: | ||||
13365 | case Intrinsic::aarch64_stxp: | ||||
13366 | Info.opc = ISD::INTRINSIC_W_CHAIN; | ||||
13367 | Info.memVT = MVT::i128; | ||||
13368 | Info.ptrVal = I.getArgOperand(2); | ||||
13369 | Info.offset = 0; | ||||
13370 | Info.align = Align(16); | ||||
13371 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; | ||||
13372 | return true; | ||||
13373 | case Intrinsic::aarch64_sve_ldnt1: { | ||||
13374 | Type *ElTy = cast<VectorType>(I.getType())->getElementType(); | ||||
13375 | Info.opc = ISD::INTRINSIC_W_CHAIN; | ||||
13376 | Info.memVT = MVT::getVT(I.getType()); | ||||
13377 | Info.ptrVal = I.getArgOperand(1); | ||||
13378 | Info.offset = 0; | ||||
13379 | Info.align = DL.getABITypeAlign(ElTy); | ||||
13380 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MONonTemporal; | ||||
13381 | return true; | ||||
13382 | } | ||||
13383 | case Intrinsic::aarch64_sve_stnt1: { | ||||
13384 | Type *ElTy = | ||||
13385 | cast<VectorType>(I.getArgOperand(0)->getType())->getElementType(); | ||||
13386 | Info.opc = ISD::INTRINSIC_W_CHAIN; | ||||
13387 | Info.memVT = MVT::getVT(I.getOperand(0)->getType()); | ||||
13388 | Info.ptrVal = I.getArgOperand(2); | ||||
13389 | Info.offset = 0; | ||||
13390 | Info.align = DL.getABITypeAlign(ElTy); | ||||
13391 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MONonTemporal; | ||||
13392 | return true; | ||||
13393 | } | ||||
13394 | case Intrinsic::aarch64_mops_memset_tag: { | ||||
13395 | Value *Dst = I.getArgOperand(0); | ||||
13396 | Value *Val = I.getArgOperand(1); | ||||
13397 | Info.opc = ISD::INTRINSIC_W_CHAIN; | ||||
13398 | Info.memVT = MVT::getVT(Val->getType()); | ||||
13399 | Info.ptrVal = Dst; | ||||
13400 | Info.offset = 0; | ||||
13401 | Info.align = I.getParamAlign(0).valueOrOne(); | ||||
13402 | Info.flags = MachineMemOperand::MOStore; | ||||
13403 | // The size of the memory being operated on is unknown at this point | ||||
13404 | Info.size = MemoryLocation::UnknownSize; | ||||
13405 | return true; | ||||
13406 | } | ||||
13407 | default: | ||||
13408 | break; | ||||
13409 | } | ||||
13410 | |||||
13411 | return false; | ||||
13412 | } | ||||
13413 | |||||
13414 | bool AArch64TargetLowering::shouldReduceLoadWidth(SDNode *Load, | ||||
13415 | ISD::LoadExtType ExtTy, | ||||
13416 | EVT NewVT) const { | ||||
13417 | // TODO: This may be worth removing. Check regression tests for diffs. | ||||
13418 | if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT)) | ||||
13419 | return false; | ||||
13420 | |||||
13421 | // If we're reducing the load width in order to avoid having to use an extra | ||||
13422 | // instruction to do extension then it's probably a good idea. | ||||
13423 | if (ExtTy != ISD::NON_EXTLOAD) | ||||
13424 | return true; | ||||
13425 | // Don't reduce load width if it would prevent us from combining a shift into | ||||
13426 | // the offset. | ||||
13427 | MemSDNode *Mem = dyn_cast<MemSDNode>(Load); | ||||
13428 | assert(Mem)(static_cast <bool> (Mem) ? void (0) : __assert_fail ("Mem" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 13428, __extension__ __PRETTY_FUNCTION__)); | ||||
13429 | const SDValue &Base = Mem->getBasePtr(); | ||||
13430 | if (Base.getOpcode() == ISD::ADD && | ||||
13431 | Base.getOperand(1).getOpcode() == ISD::SHL && | ||||
13432 | Base.getOperand(1).hasOneUse() && | ||||
13433 | Base.getOperand(1).getOperand(1).getOpcode() == ISD::Constant) { | ||||
13434 | // It's unknown whether a scalable vector has a power-of-2 bitwidth. | ||||
13435 | if (Mem->getMemoryVT().isScalableVector()) | ||||
13436 | return false; | ||||
13437 | // The shift can be combined if it matches the size of the value being | ||||
13438 | // loaded (and so reducing the width would make it not match). | ||||
13439 | uint64_t ShiftAmount = Base.getOperand(1).getConstantOperandVal(1); | ||||
13440 | uint64_t LoadBytes = Mem->getMemoryVT().getSizeInBits()/8; | ||||
13441 | if (ShiftAmount == Log2_32(LoadBytes)) | ||||
13442 | return false; | ||||
13443 | } | ||||
13444 | // We have no reason to disallow reducing the load width, so allow it. | ||||
13445 | return true; | ||||
13446 | } | ||||
13447 | |||||
13448 | // Truncations from 64-bit GPR to 32-bit GPR is free. | ||||
13449 | bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { | ||||
13450 | if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) | ||||
13451 | return false; | ||||
13452 | uint64_t NumBits1 = Ty1->getPrimitiveSizeInBits().getFixedSize(); | ||||
13453 | uint64_t NumBits2 = Ty2->getPrimitiveSizeInBits().getFixedSize(); | ||||
13454 | return NumBits1 > NumBits2; | ||||
13455 | } | ||||
13456 | bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { | ||||
13457 | if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger()) | ||||
13458 | return false; | ||||
13459 | uint64_t NumBits1 = VT1.getFixedSizeInBits(); | ||||
13460 | uint64_t NumBits2 = VT2.getFixedSizeInBits(); | ||||
13461 | return NumBits1 > NumBits2; | ||||
13462 | } | ||||
13463 | |||||
13464 | /// Check if it is profitable to hoist instruction in then/else to if. | ||||
13465 | /// Not profitable if I and it's user can form a FMA instruction | ||||
13466 | /// because we prefer FMSUB/FMADD. | ||||
13467 | bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const { | ||||
13468 | if (I->getOpcode() != Instruction::FMul) | ||||
13469 | return true; | ||||
13470 | |||||
13471 | if (!I->hasOneUse()) | ||||
13472 | return true; | ||||
13473 | |||||
13474 | Instruction *User = I->user_back(); | ||||
13475 | |||||
13476 | if (!(User->getOpcode() == Instruction::FSub || | ||||
13477 | User->getOpcode() == Instruction::FAdd)) | ||||
13478 | return true; | ||||
13479 | |||||
13480 | const TargetOptions &Options = getTargetMachine().Options; | ||||
13481 | const Function *F = I->getFunction(); | ||||
13482 | const DataLayout &DL = F->getParent()->getDataLayout(); | ||||
13483 | Type *Ty = User->getOperand(0)->getType(); | ||||
13484 | |||||
13485 | return !(isFMAFasterThanFMulAndFAdd(*F, Ty) && | ||||
13486 | isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) && | ||||
13487 | (Options.AllowFPOpFusion == FPOpFusion::Fast || | ||||
13488 | Options.UnsafeFPMath)); | ||||
13489 | } | ||||
13490 | |||||
13491 | // All 32-bit GPR operations implicitly zero the high-half of the corresponding | ||||
13492 | // 64-bit GPR. | ||||
13493 | bool AArch64TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { | ||||
13494 | if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) | ||||
13495 | return false; | ||||
13496 | unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); | ||||
13497 | unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); | ||||
13498 | return NumBits1 == 32 && NumBits2 == 64; | ||||
13499 | } | ||||
13500 | bool AArch64TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { | ||||
13501 | if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger()) | ||||
13502 | return false; | ||||
13503 | unsigned NumBits1 = VT1.getSizeInBits(); | ||||
13504 | unsigned NumBits2 = VT2.getSizeInBits(); | ||||
13505 | return NumBits1 == 32 && NumBits2 == 64; | ||||
13506 | } | ||||
13507 | |||||
13508 | bool AArch64TargetLowering::isZExtFree(SDValue Val, EVT VT2) const { | ||||
13509 | EVT VT1 = Val.getValueType(); | ||||
13510 | if (isZExtFree(VT1, VT2)) { | ||||
13511 | return true; | ||||
13512 | } | ||||
13513 | |||||
13514 | if (Val.getOpcode() != ISD::LOAD) | ||||
13515 | return false; | ||||
13516 | |||||
13517 | // 8-, 16-, and 32-bit integer loads all implicitly zero-extend. | ||||
13518 | return (VT1.isSimple() && !VT1.isVector() && VT1.isInteger() && | ||||
13519 | VT2.isSimple() && !VT2.isVector() && VT2.isInteger() && | ||||
13520 | VT1.getSizeInBits() <= 32); | ||||
13521 | } | ||||
13522 | |||||
13523 | bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const { | ||||
13524 | if (isa<FPExtInst>(Ext)) | ||||
13525 | return false; | ||||
13526 | |||||
13527 | // Vector types are not free. | ||||
13528 | if (Ext->getType()->isVectorTy()) | ||||
13529 | return false; | ||||
13530 | |||||
13531 | for (const Use &U : Ext->uses()) { | ||||
13532 | // The extension is free if we can fold it with a left shift in an | ||||
13533 | // addressing mode or an arithmetic operation: add, sub, and cmp. | ||||
13534 | |||||
13535 | // Is there a shift? | ||||
13536 | const Instruction *Instr = cast<Instruction>(U.getUser()); | ||||
13537 | |||||
13538 | // Is this a constant shift? | ||||
13539 | switch (Instr->getOpcode()) { | ||||
13540 | case Instruction::Shl: | ||||
13541 | if (!isa<ConstantInt>(Instr->getOperand(1))) | ||||
13542 | return false; | ||||
13543 | break; | ||||
13544 | case Instruction::GetElementPtr: { | ||||
13545 | gep_type_iterator GTI = gep_type_begin(Instr); | ||||
13546 | auto &DL = Ext->getModule()->getDataLayout(); | ||||
13547 | std::advance(GTI, U.getOperandNo()-1); | ||||
13548 | Type *IdxTy = GTI.getIndexedType(); | ||||
13549 | // This extension will end up with a shift because of the scaling factor. | ||||
13550 | // 8-bit sized types have a scaling factor of 1, thus a shift amount of 0. | ||||
13551 | // Get the shift amount based on the scaling factor: | ||||
13552 | // log2(sizeof(IdxTy)) - log2(8). | ||||
13553 | uint64_t ShiftAmt = | ||||
13554 | countTrailingZeros(DL.getTypeStoreSizeInBits(IdxTy).getFixedSize()) - 3; | ||||
13555 | // Is the constant foldable in the shift of the addressing mode? | ||||
13556 | // I.e., shift amount is between 1 and 4 inclusive. | ||||
13557 | if (ShiftAmt == 0 || ShiftAmt > 4) | ||||
13558 | return false; | ||||
13559 | break; | ||||
13560 | } | ||||
13561 | case Instruction::Trunc: | ||||
13562 | // Check if this is a noop. | ||||
13563 | // trunc(sext ty1 to ty2) to ty1. | ||||
13564 | if (Instr->getType() == Ext->getOperand(0)->getType()) | ||||
13565 | continue; | ||||
13566 | [[fallthrough]]; | ||||
13567 | default: | ||||
13568 | return false; | ||||
13569 | } | ||||
13570 | |||||
13571 | // At this point we can use the bfm family, so this extension is free | ||||
13572 | // for that use. | ||||
13573 | } | ||||
13574 | return true; | ||||
13575 | } | ||||
13576 | |||||
13577 | static bool isSplatShuffle(Value *V) { | ||||
13578 | if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) | ||||
13579 | return all_equal(Shuf->getShuffleMask()); | ||||
13580 | return false; | ||||
13581 | } | ||||
13582 | |||||
13583 | /// Check if both Op1 and Op2 are shufflevector extracts of either the lower | ||||
13584 | /// or upper half of the vector elements. | ||||
13585 | static bool areExtractShuffleVectors(Value *Op1, Value *Op2, | ||||
13586 | bool AllowSplat = false) { | ||||
13587 | auto areTypesHalfed = [](Value *FullV, Value *HalfV) { | ||||
13588 | auto *FullTy = FullV->getType(); | ||||
13589 | auto *HalfTy = HalfV->getType(); | ||||
13590 | return FullTy->getPrimitiveSizeInBits().getFixedSize() == | ||||
13591 | 2 * HalfTy->getPrimitiveSizeInBits().getFixedSize(); | ||||
13592 | }; | ||||
13593 | |||||
13594 | auto extractHalf = [](Value *FullV, Value *HalfV) { | ||||
13595 | auto *FullVT = cast<FixedVectorType>(FullV->getType()); | ||||
13596 | auto *HalfVT = cast<FixedVectorType>(HalfV->getType()); | ||||
13597 | return FullVT->getNumElements() == 2 * HalfVT->getNumElements(); | ||||
13598 | }; | ||||
13599 | |||||
13600 | ArrayRef<int> M1, M2; | ||||
13601 | Value *S1Op1 = nullptr, *S2Op1 = nullptr; | ||||
13602 | if (!match(Op1, m_Shuffle(m_Value(S1Op1), m_Undef(), m_Mask(M1))) || | ||||
13603 | !match(Op2, m_Shuffle(m_Value(S2Op1), m_Undef(), m_Mask(M2)))) | ||||
13604 | return false; | ||||
13605 | |||||
13606 | // If we allow splats, set S1Op1/S2Op1 to nullptr for the relavant arg so that | ||||
13607 | // it is not checked as an extract below. | ||||
13608 | if (AllowSplat && isSplatShuffle(Op1)) | ||||
13609 | S1Op1 = nullptr; | ||||
13610 | if (AllowSplat && isSplatShuffle(Op2)) | ||||
13611 | S2Op1 = nullptr; | ||||
13612 | |||||
13613 | // Check that the operands are half as wide as the result and we extract | ||||
13614 | // half of the elements of the input vectors. | ||||
13615 | if ((S1Op1 && (!areTypesHalfed(S1Op1, Op1) || !extractHalf(S1Op1, Op1))) || | ||||
13616 | (S2Op1 && (!areTypesHalfed(S2Op1, Op2) || !extractHalf(S2Op1, Op2)))) | ||||
13617 | return false; | ||||
13618 | |||||
13619 | // Check the mask extracts either the lower or upper half of vector | ||||
13620 | // elements. | ||||
13621 | int M1Start = 0; | ||||
13622 | int M2Start = 0; | ||||
13623 | int NumElements = cast<FixedVectorType>(Op1->getType())->getNumElements() * 2; | ||||
13624 | if ((S1Op1 && | ||||
13625 | !ShuffleVectorInst::isExtractSubvectorMask(M1, NumElements, M1Start)) || | ||||
13626 | (S2Op1 && | ||||
13627 | !ShuffleVectorInst::isExtractSubvectorMask(M2, NumElements, M2Start))) | ||||
13628 | return false; | ||||
13629 | |||||
13630 | if ((M1Start != 0 && M1Start != (NumElements / 2)) || | ||||
13631 | (M2Start != 0 && M2Start != (NumElements / 2))) | ||||
13632 | return false; | ||||
13633 | if (S1Op1 && S2Op1 && M1Start != M2Start) | ||||
13634 | return false; | ||||
13635 | |||||
13636 | return true; | ||||
13637 | } | ||||
13638 | |||||
13639 | /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth | ||||
13640 | /// of the vector elements. | ||||
13641 | static bool areExtractExts(Value *Ext1, Value *Ext2) { | ||||
13642 | auto areExtDoubled = [](Instruction *Ext) { | ||||
13643 | return Ext->getType()->getScalarSizeInBits() == | ||||
13644 | 2 * Ext->getOperand(0)->getType()->getScalarSizeInBits(); | ||||
13645 | }; | ||||
13646 | |||||
13647 | if (!match(Ext1, m_ZExtOrSExt(m_Value())) || | ||||
13648 | !match(Ext2, m_ZExtOrSExt(m_Value())) || | ||||
13649 | !areExtDoubled(cast<Instruction>(Ext1)) || | ||||
13650 | !areExtDoubled(cast<Instruction>(Ext2))) | ||||
13651 | return false; | ||||
13652 | |||||
13653 | return true; | ||||
13654 | } | ||||
13655 | |||||
13656 | /// Check if Op could be used with vmull_high_p64 intrinsic. | ||||
13657 | static bool isOperandOfVmullHighP64(Value *Op) { | ||||
13658 | Value *VectorOperand = nullptr; | ||||
13659 | ConstantInt *ElementIndex = nullptr; | ||||
13660 | return match(Op, m_ExtractElt(m_Value(VectorOperand), | ||||
13661 | m_ConstantInt(ElementIndex))) && | ||||
13662 | ElementIndex->getValue() == 1 && | ||||
13663 | isa<FixedVectorType>(VectorOperand->getType()) && | ||||
13664 | cast<FixedVectorType>(VectorOperand->getType())->getNumElements() == 2; | ||||
13665 | } | ||||
13666 | |||||
13667 | /// Check if Op1 and Op2 could be used with vmull_high_p64 intrinsic. | ||||
13668 | static bool areOperandsOfVmullHighP64(Value *Op1, Value *Op2) { | ||||
13669 | return isOperandOfVmullHighP64(Op1) && isOperandOfVmullHighP64(Op2); | ||||
13670 | } | ||||
13671 | |||||
13672 | /// Check if sinking \p I's operands to I's basic block is profitable, because | ||||
13673 | /// the operands can be folded into a target instruction, e.g. | ||||
13674 | /// shufflevectors extracts and/or sext/zext can be folded into (u,s)subl(2). | ||||
13675 | bool AArch64TargetLowering::shouldSinkOperands( | ||||
13676 | Instruction *I, SmallVectorImpl<Use *> &Ops) const { | ||||
13677 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { | ||||
13678 | switch (II->getIntrinsicID()) { | ||||
13679 | case Intrinsic::aarch64_neon_smull: | ||||
13680 | case Intrinsic::aarch64_neon_umull: | ||||
13681 | if (areExtractShuffleVectors(II->getOperand(0), II->getOperand(1), | ||||
13682 | /*AllowSplat=*/true)) { | ||||
13683 | Ops.push_back(&II->getOperandUse(0)); | ||||
13684 | Ops.push_back(&II->getOperandUse(1)); | ||||
13685 | return true; | ||||
13686 | } | ||||
13687 | [[fallthrough]]; | ||||
13688 | |||||
13689 | case Intrinsic::fma: | ||||
13690 | if (isa<VectorType>(I->getType()) && | ||||
13691 | cast<VectorType>(I->getType())->getElementType()->isHalfTy() && | ||||
13692 | !Subtarget->hasFullFP16()) | ||||
13693 | return false; | ||||
13694 | [[fallthrough]]; | ||||
13695 | case Intrinsic::aarch64_neon_sqdmull: | ||||
13696 | case Intrinsic::aarch64_neon_sqdmulh: | ||||
13697 | case Intrinsic::aarch64_neon_sqrdmulh: | ||||
13698 | // Sink splats for index lane variants | ||||
13699 | if (isSplatShuffle(II->getOperand(0))) | ||||
13700 | Ops.push_back(&II->getOperandUse(0)); | ||||
13701 | if (isSplatShuffle(II->getOperand(1))) | ||||
13702 | Ops.push_back(&II->getOperandUse(1)); | ||||
13703 | return !Ops.empty(); | ||||
13704 | case Intrinsic::aarch64_sve_ptest_first: | ||||
13705 | case Intrinsic::aarch64_sve_ptest_last: | ||||
13706 | if (auto *IIOp = dyn_cast<IntrinsicInst>(II->getOperand(0))) | ||||
13707 | if (IIOp->getIntrinsicID() == Intrinsic::aarch64_sve_ptrue) | ||||
13708 | Ops.push_back(&II->getOperandUse(0)); | ||||
13709 | return !Ops.empty(); | ||||
13710 | case Intrinsic::aarch64_sme_write_horiz: | ||||
13711 | case Intrinsic::aarch64_sme_write_vert: | ||||
13712 | case Intrinsic::aarch64_sme_writeq_horiz: | ||||
13713 | case Intrinsic::aarch64_sme_writeq_vert: { | ||||
13714 | auto *Idx = dyn_cast<Instruction>(II->getOperand(1)); | ||||
13715 | if (!Idx || Idx->getOpcode() != Instruction::Add) | ||||
13716 | return false; | ||||
13717 | Ops.push_back(&II->getOperandUse(1)); | ||||
13718 | return true; | ||||
13719 | } | ||||
13720 | case Intrinsic::aarch64_sme_read_horiz: | ||||
13721 | case Intrinsic::aarch64_sme_read_vert: | ||||
13722 | case Intrinsic::aarch64_sme_readq_horiz: | ||||
13723 | case Intrinsic::aarch64_sme_readq_vert: | ||||
13724 | case Intrinsic::aarch64_sme_ld1b_vert: | ||||
13725 | case Intrinsic::aarch64_sme_ld1h_vert: | ||||
13726 | case Intrinsic::aarch64_sme_ld1w_vert: | ||||
13727 | case Intrinsic::aarch64_sme_ld1d_vert: | ||||
13728 | case Intrinsic::aarch64_sme_ld1q_vert: | ||||
13729 | case Intrinsic::aarch64_sme_st1b_vert: | ||||
13730 | case Intrinsic::aarch64_sme_st1h_vert: | ||||
13731 | case Intrinsic::aarch64_sme_st1w_vert: | ||||
13732 | case Intrinsic::aarch64_sme_st1d_vert: | ||||
13733 | case Intrinsic::aarch64_sme_st1q_vert: | ||||
13734 | case Intrinsic::aarch64_sme_ld1b_horiz: | ||||
13735 | case Intrinsic::aarch64_sme_ld1h_horiz: | ||||
13736 | case Intrinsic::aarch64_sme_ld1w_horiz: | ||||
13737 | case Intrinsic::aarch64_sme_ld1d_horiz: | ||||
13738 | case Intrinsic::aarch64_sme_ld1q_horiz: | ||||
13739 | case Intrinsic::aarch64_sme_st1b_horiz: | ||||
13740 | case Intrinsic::aarch64_sme_st1h_horiz: | ||||
13741 | case Intrinsic::aarch64_sme_st1w_horiz: | ||||
13742 | case Intrinsic::aarch64_sme_st1d_horiz: | ||||
13743 | case Intrinsic::aarch64_sme_st1q_horiz: { | ||||
13744 | auto *Idx = dyn_cast<Instruction>(II->getOperand(3)); | ||||
13745 | if (!Idx || Idx->getOpcode() != Instruction::Add) | ||||
13746 | return false; | ||||
13747 | Ops.push_back(&II->getOperandUse(3)); | ||||
13748 | return true; | ||||
13749 | } | ||||
13750 | case Intrinsic::aarch64_neon_pmull: | ||||
13751 | if (!areExtractShuffleVectors(II->getOperand(0), II->getOperand(1))) | ||||
13752 | return false; | ||||
13753 | Ops.push_back(&II->getOperandUse(0)); | ||||
13754 | Ops.push_back(&II->getOperandUse(1)); | ||||
13755 | return true; | ||||
13756 | case Intrinsic::aarch64_neon_pmull64: | ||||
13757 | if (!areOperandsOfVmullHighP64(II->getArgOperand(0), | ||||
13758 | II->getArgOperand(1))) | ||||
13759 | return false; | ||||
13760 | Ops.push_back(&II->getArgOperandUse(0)); | ||||
13761 | Ops.push_back(&II->getArgOperandUse(1)); | ||||
13762 | return true; | ||||
13763 | default: | ||||
13764 | return false; | ||||
13765 | } | ||||
13766 | } | ||||
13767 | |||||
13768 | if (!I->getType()->isVectorTy()) | ||||
13769 | return false; | ||||
13770 | |||||
13771 | switch (I->getOpcode()) { | ||||
13772 | case Instruction::Sub: | ||||
13773 | case Instruction::Add: { | ||||
13774 | if (!areExtractExts(I->getOperand(0), I->getOperand(1))) | ||||
13775 | return false; | ||||
13776 | |||||
13777 | // If the exts' operands extract either the lower or upper elements, we | ||||
13778 | // can sink them too. | ||||
13779 | auto Ext1 = cast<Instruction>(I->getOperand(0)); | ||||
13780 | auto Ext2 = cast<Instruction>(I->getOperand(1)); | ||||
13781 | if (areExtractShuffleVectors(Ext1->getOperand(0), Ext2->getOperand(0))) { | ||||
13782 | Ops.push_back(&Ext1->getOperandUse(0)); | ||||
13783 | Ops.push_back(&Ext2->getOperandUse(0)); | ||||
13784 | } | ||||
13785 | |||||
13786 | Ops.push_back(&I->getOperandUse(0)); | ||||
13787 | Ops.push_back(&I->getOperandUse(1)); | ||||
13788 | |||||
13789 | return true; | ||||
13790 | } | ||||
13791 | case Instruction::Mul: { | ||||
13792 | bool IsProfitable = false; | ||||
13793 | for (auto &Op : I->operands()) { | ||||
13794 | // Make sure we are not already sinking this operand | ||||
13795 | if (any_of(Ops, [&](Use *U) { return U->get() == Op; })) | ||||
13796 | continue; | ||||
13797 | |||||
13798 | ShuffleVectorInst *Shuffle = dyn_cast<ShuffleVectorInst>(Op); | ||||
13799 | |||||
13800 | // If the Shuffle is a splat and the operand is a zext/sext, sinking the | ||||
13801 | // operand and the s/zext can help create indexed s/umull. This is | ||||
13802 | // especially useful to prevent i64 mul being scalarized. | ||||
13803 | if (Shuffle && isSplatShuffle(Shuffle) && | ||||
13804 | match(Shuffle->getOperand(0), m_ZExtOrSExt(m_Value()))) { | ||||
13805 | Ops.push_back(&Shuffle->getOperandUse(0)); | ||||
13806 | Ops.push_back(&Op); | ||||
13807 | IsProfitable = true; | ||||
13808 | continue; | ||||
13809 | } | ||||
13810 | |||||
13811 | if (!Shuffle || !Shuffle->isZeroEltSplat()) | ||||
13812 | continue; | ||||
13813 | |||||
13814 | Value *ShuffleOperand = Shuffle->getOperand(0); | ||||
13815 | InsertElementInst *Insert = dyn_cast<InsertElementInst>(ShuffleOperand); | ||||
13816 | if (!Insert) | ||||
13817 | continue; | ||||
13818 | |||||
13819 | Instruction *OperandInstr = dyn_cast<Instruction>(Insert->getOperand(1)); | ||||
13820 | if (!OperandInstr) | ||||
13821 | continue; | ||||
13822 | |||||
13823 | ConstantInt *ElementConstant = | ||||
13824 | dyn_cast<ConstantInt>(Insert->getOperand(2)); | ||||
13825 | // Check that the insertelement is inserting into element 0 | ||||
13826 | if (!ElementConstant || ElementConstant->getZExtValue() != 0) | ||||
13827 | continue; | ||||
13828 | |||||
13829 | unsigned Opcode = OperandInstr->getOpcode(); | ||||
13830 | if (Opcode != Instruction::SExt && Opcode != Instruction::ZExt) | ||||
13831 | continue; | ||||
13832 | |||||
13833 | Ops.push_back(&Shuffle->getOperandUse(0)); | ||||
13834 | Ops.push_back(&Op); | ||||
13835 | IsProfitable = true; | ||||
13836 | } | ||||
13837 | |||||
13838 | return IsProfitable; | ||||
13839 | } | ||||
13840 | default: | ||||
13841 | return false; | ||||
13842 | } | ||||
13843 | return false; | ||||
13844 | } | ||||
13845 | |||||
13846 | static void createTblShuffleForZExt(ZExtInst *ZExt, bool IsLittleEndian) { | ||||
13847 | Value *Op = ZExt->getOperand(0); | ||||
13848 | auto *SrcTy = dyn_cast<FixedVectorType>(Op->getType()); | ||||
13849 | auto *DstTy = dyn_cast<FixedVectorType>(ZExt->getType()); | ||||
13850 | unsigned NumElts = SrcTy->getNumElements(); | ||||
| |||||
13851 | IRBuilder<> Builder(ZExt); | ||||
13852 | SmallVector<int> Mask(4 * NumElts, NumElts); | ||||
13853 | // Create a mask that selects <0,0,0,Op[i]> for each lane of vector of i32 to | ||||
13854 | // replace the original ZExt. This can later be lowered to a set of tbl | ||||
13855 | // instructions. | ||||
13856 | for (unsigned i = 0; i < NumElts; i++) { | ||||
13857 | if (IsLittleEndian) | ||||
13858 | Mask[i * 4] = i; | ||||
13859 | else | ||||
13860 | Mask[i * 4 + 3] = i; | ||||
13861 | } | ||||
13862 | |||||
13863 | auto *FirstEltZero = Builder.CreateInsertElement( | ||||
13864 | PoisonValue::get(SrcTy), Builder.getInt8(0), uint64_t(0)); | ||||
13865 | Value *Result = Builder.CreateShuffleVector(Op, FirstEltZero, Mask); | ||||
13866 | Result = Builder.CreateBitCast(Result, DstTy); | ||||
13867 | ZExt->replaceAllUsesWith(Result); | ||||
13868 | ZExt->eraseFromParent(); | ||||
13869 | } | ||||
13870 | |||||
13871 | static void createTblForTrunc(TruncInst *TI, bool IsLittleEndian) { | ||||
13872 | IRBuilder<> Builder(TI); | ||||
13873 | SmallVector<Value *> Parts; | ||||
13874 | Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), 16); | ||||
13875 | Parts.push_back(Builder.CreateBitCast( | ||||
13876 | Builder.CreateShuffleVector(TI->getOperand(0), {0, 1, 2, 3}), VecTy)); | ||||
13877 | Parts.push_back(Builder.CreateBitCast( | ||||
13878 | Builder.CreateShuffleVector(TI->getOperand(0), {4, 5, 6, 7}), VecTy)); | ||||
13879 | |||||
13880 | Intrinsic::ID TblID = Intrinsic::aarch64_neon_tbl2; | ||||
13881 | unsigned NumElements = cast<FixedVectorType>(TI->getType())->getNumElements(); | ||||
13882 | if (NumElements == 16) { | ||||
13883 | Parts.push_back(Builder.CreateBitCast( | ||||
13884 | Builder.CreateShuffleVector(TI->getOperand(0), {8, 9, 10, 11}), VecTy)); | ||||
13885 | Parts.push_back(Builder.CreateBitCast( | ||||
13886 | Builder.CreateShuffleVector(TI->getOperand(0), {12, 13, 14, 15}), | ||||
13887 | VecTy)); | ||||
13888 | TblID = Intrinsic::aarch64_neon_tbl4; | ||||
13889 | } | ||||
13890 | SmallVector<Constant *, 16> MaskConst; | ||||
13891 | for (unsigned Idx = 0; Idx < NumElements * 4; Idx += 4) | ||||
13892 | MaskConst.push_back( | ||||
13893 | ConstantInt::get(Builder.getInt8Ty(), IsLittleEndian ? Idx : Idx + 3)); | ||||
13894 | |||||
13895 | for (unsigned Idx = NumElements * 4; Idx < 64; Idx += 4) | ||||
13896 | MaskConst.push_back(ConstantInt::get(Builder.getInt8Ty(), 255)); | ||||
13897 | |||||
13898 | Parts.push_back(ConstantVector::get(MaskConst)); | ||||
13899 | auto *F = | ||||
13900 | Intrinsic::getDeclaration(TI->getModule(), TblID, Parts[0]->getType()); | ||||
13901 | Value *Res = Builder.CreateCall(F, Parts); | ||||
13902 | |||||
13903 | if (NumElements == 8) | ||||
13904 | Res = Builder.CreateShuffleVector(Res, {0, 1, 2, 3, 4, 5, 6, 7}); | ||||
13905 | TI->replaceAllUsesWith(Res); | ||||
13906 | TI->eraseFromParent(); | ||||
13907 | } | ||||
13908 | |||||
13909 | bool AArch64TargetLowering::optimizeExtendOrTruncateConversion(Instruction *I, | ||||
13910 | Loop *L) const { | ||||
13911 | // Try to optimize conversions using tbl. This requires materializing constant | ||||
13912 | // index vectors, which can increase code size and add loads. Skip the | ||||
13913 | // transform unless the conversion is in a loop block guaranteed to execute | ||||
13914 | // and we are not optimizing for size. | ||||
13915 | Function *F = I->getParent()->getParent(); | ||||
13916 | if (!L || L->getHeader() != I->getParent() || F->hasMinSize() || | ||||
| |||||
13917 | F->hasOptSize()) | ||||
13918 | return false; | ||||
13919 | |||||
13920 | auto *SrcTy = dyn_cast<FixedVectorType>(I->getOperand(0)->getType()); | ||||
13921 | auto *DstTy = dyn_cast<FixedVectorType>(I->getType()); | ||||
13922 | if (!SrcTy
| ||||
13923 | return false; | ||||
13924 | |||||
13925 | // Convert 'zext <(8|16) x i8> %x to <(8|16) x i32>' to a shuffle that can be | ||||
13926 | // lowered to either 2 or 4 tbl instructions to insert the original i8 | ||||
13927 | // elements into i32 lanes. | ||||
13928 | auto *ZExt = dyn_cast<ZExtInst>(I); | ||||
13929 | if (ZExt
| ||||
13930 | SrcTy->getElementType()->isIntegerTy(8) && | ||||
13931 | DstTy->getElementType()->isIntegerTy(32)) { | ||||
13932 | createTblShuffleForZExt(ZExt, Subtarget->isLittleEndian()); | ||||
13933 | return true; | ||||
13934 | } | ||||
13935 | |||||
13936 | auto *UIToFP = dyn_cast<UIToFPInst>(I); | ||||
13937 | if (UIToFP && | ||||
13938 | (SrcTy->getNumElements() == 8 || SrcTy->getNumElements() == 16) && | ||||
13939 | SrcTy->getElementType()->isIntegerTy(8) && | ||||
13940 | DstTy->getElementType()->isFloatTy()) { | ||||
13941 | IRBuilder<> Builder(I); | ||||
13942 | auto *ZExt = cast<ZExtInst>( | ||||
13943 | Builder.CreateZExt(I->getOperand(0), VectorType::getInteger(DstTy))); | ||||
13944 | auto *UI = Builder.CreateUIToFP(ZExt, DstTy); | ||||
13945 | I->replaceAllUsesWith(UI); | ||||
13946 | I->eraseFromParent(); | ||||
13947 | createTblShuffleForZExt(ZExt, Subtarget->isLittleEndian()); | ||||
13948 | return true; | ||||
13949 | } | ||||
13950 | |||||
13951 | // Convert 'fptoui <(8|16) x float> to <(8|16) x i8>' to a wide fptoui | ||||
13952 | // followed by a truncate lowered to using tbl.4. | ||||
13953 | auto *FPToUI = dyn_cast<FPToUIInst>(I); | ||||
13954 | if (FPToUI && | ||||
13955 | (SrcTy->getNumElements() == 8 || SrcTy->getNumElements() == 16) && | ||||
13956 | SrcTy->getElementType()->isFloatTy() && | ||||
13957 | DstTy->getElementType()->isIntegerTy(8)) { | ||||
13958 | IRBuilder<> Builder(I); | ||||
13959 | auto *WideConv = Builder.CreateFPToUI(FPToUI->getOperand(0), | ||||
13960 | VectorType::getInteger(SrcTy)); | ||||
13961 | auto *TruncI = Builder.CreateTrunc(WideConv, DstTy); | ||||
13962 | I->replaceAllUsesWith(TruncI); | ||||
13963 | I->eraseFromParent(); | ||||
13964 | createTblForTrunc(cast<TruncInst>(TruncI), Subtarget->isLittleEndian()); | ||||
13965 | return true; | ||||
13966 | } | ||||
13967 | |||||
13968 | // Convert 'trunc <(8|16) x i32> %x to <(8|16) x i8>' to a single tbl.4 | ||||
13969 | // instruction selecting the lowest 8 bits per lane of the input interpreted | ||||
13970 | // as 2 or 4 <4 x i32> vectors. | ||||
13971 | auto *TI = dyn_cast<TruncInst>(I); | ||||
13972 | if (TI && (SrcTy->getNumElements() == 8 || SrcTy->getNumElements() == 16) && | ||||
13973 | SrcTy->getElementType()->isIntegerTy(32) && | ||||
13974 | DstTy->getElementType()->isIntegerTy(8)) { | ||||
13975 | createTblForTrunc(TI, Subtarget->isLittleEndian()); | ||||
13976 | return true; | ||||
13977 | } | ||||
13978 | |||||
13979 | return false; | ||||
13980 | } | ||||
13981 | |||||
13982 | bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType, | ||||
13983 | Align &RequiredAligment) const { | ||||
13984 | if (!LoadedType.isSimple() || | ||||
13985 | (!LoadedType.isInteger() && !LoadedType.isFloatingPoint())) | ||||
13986 | return false; | ||||
13987 | // Cyclone supports unaligned accesses. | ||||
13988 | RequiredAligment = Align(1); | ||||
13989 | unsigned NumBits = LoadedType.getSizeInBits(); | ||||
13990 | return NumBits == 32 || NumBits == 64; | ||||
13991 | } | ||||
13992 | |||||
13993 | /// A helper function for determining the number of interleaved accesses we | ||||
13994 | /// will generate when lowering accesses of the given type. | ||||
13995 | unsigned AArch64TargetLowering::getNumInterleavedAccesses( | ||||
13996 | VectorType *VecTy, const DataLayout &DL, bool UseScalable) const { | ||||
13997 | unsigned VecSize = 128; | ||||
13998 | if (UseScalable) | ||||
13999 | VecSize = std::max(Subtarget->getMinSVEVectorSizeInBits(), 128u); | ||||
14000 | return std::max<unsigned>(1, (DL.getTypeSizeInBits(VecTy) + 127) / VecSize); | ||||
14001 | } | ||||
14002 | |||||
14003 | MachineMemOperand::Flags | ||||
14004 | AArch64TargetLowering::getTargetMMOFlags(const Instruction &I) const { | ||||
14005 | if (Subtarget->getProcFamily() == AArch64Subtarget::Falkor && | ||||
14006 | I.getMetadata(FALKOR_STRIDED_ACCESS_MD"falkor.strided.access") != nullptr) | ||||
14007 | return MOStridedAccess; | ||||
14008 | return MachineMemOperand::MONone; | ||||
14009 | } | ||||
14010 | |||||
14011 | bool AArch64TargetLowering::isLegalInterleavedAccessType( | ||||
14012 | VectorType *VecTy, const DataLayout &DL, bool &UseScalable) const { | ||||
14013 | |||||
14014 | unsigned VecSize = DL.getTypeSizeInBits(VecTy); | ||||
14015 | unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType()); | ||||
14016 | unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements(); | ||||
14017 | |||||
14018 | UseScalable = false; | ||||
14019 | |||||
14020 | // Ensure that the predicate for this number of elements is available. | ||||
14021 | if (Subtarget->hasSVE() && !getSVEPredPatternFromNumElements(NumElements)) | ||||
14022 | return false; | ||||
14023 | |||||
14024 | // Ensure the number of vector elements is greater than 1. | ||||
14025 | if (NumElements < 2) | ||||
14026 | return false; | ||||
14027 | |||||
14028 | // Ensure the element type is legal. | ||||
14029 | if (ElSize != 8 && ElSize != 16 && ElSize != 32 && ElSize != 64) | ||||
14030 | return false; | ||||
14031 | |||||
14032 | if (Subtarget->forceStreamingCompatibleSVE() || | ||||
14033 | (Subtarget->useSVEForFixedLengthVectors() && | ||||
14034 | (VecSize % Subtarget->getMinSVEVectorSizeInBits() == 0 || | ||||
14035 | (VecSize < Subtarget->getMinSVEVectorSizeInBits() && | ||||
14036 | isPowerOf2_32(NumElements) && VecSize > 128)))) { | ||||
14037 | UseScalable = true; | ||||
14038 | return true; | ||||
14039 | } | ||||
14040 | |||||
14041 | // Ensure the total vector size is 64 or a multiple of 128. Types larger than | ||||
14042 | // 128 will be split into multiple interleaved accesses. | ||||
14043 | return VecSize == 64 || VecSize % 128 == 0; | ||||
14044 | } | ||||
14045 | |||||
14046 | static ScalableVectorType *getSVEContainerIRType(FixedVectorType *VTy) { | ||||
14047 | if (VTy->getElementType() == Type::getDoubleTy(VTy->getContext())) | ||||
14048 | return ScalableVectorType::get(VTy->getElementType(), 2); | ||||
14049 | |||||
14050 | if (VTy->getElementType() == Type::getFloatTy(VTy->getContext())) | ||||
14051 | return ScalableVectorType::get(VTy->getElementType(), 4); | ||||
14052 | |||||
14053 | if (VTy->getElementType() == Type::getBFloatTy(VTy->getContext())) | ||||
14054 | return ScalableVectorType::get(VTy->getElementType(), 8); | ||||
14055 | |||||
14056 | if (VTy->getElementType() == Type::getHalfTy(VTy->getContext())) | ||||
14057 | return ScalableVectorType::get(VTy->getElementType(), 8); | ||||
14058 | |||||
14059 | if (VTy->getElementType() == Type::getInt64Ty(VTy->getContext())) | ||||
14060 | return ScalableVectorType::get(VTy->getElementType(), 2); | ||||
14061 | |||||
14062 | if (VTy->getElementType() == Type::getInt32Ty(VTy->getContext())) | ||||
14063 | return ScalableVectorType::get(VTy->getElementType(), 4); | ||||
14064 | |||||
14065 | if (VTy->getElementType() == Type::getInt16Ty(VTy->getContext())) | ||||
14066 | return ScalableVectorType::get(VTy->getElementType(), 8); | ||||
14067 | |||||
14068 | if (VTy->getElementType() == Type::getInt8Ty(VTy->getContext())) | ||||
14069 | return ScalableVectorType::get(VTy->getElementType(), 16); | ||||
14070 | |||||
14071 | llvm_unreachable("Cannot handle input vector type")::llvm::llvm_unreachable_internal("Cannot handle input vector type" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14071); | ||||
14072 | } | ||||
14073 | |||||
14074 | /// Lower an interleaved load into a ldN intrinsic. | ||||
14075 | /// | ||||
14076 | /// E.g. Lower an interleaved load (Factor = 2): | ||||
14077 | /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr | ||||
14078 | /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements | ||||
14079 | /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements | ||||
14080 | /// | ||||
14081 | /// Into: | ||||
14082 | /// %ld2 = { <4 x i32>, <4 x i32> } call llvm.aarch64.neon.ld2(%ptr) | ||||
14083 | /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0 | ||||
14084 | /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1 | ||||
14085 | bool AArch64TargetLowering::lowerInterleavedLoad( | ||||
14086 | LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles, | ||||
14087 | ArrayRef<unsigned> Indices, unsigned Factor) const { | ||||
14088 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14089, __extension__ __PRETTY_FUNCTION__)) | ||||
14089 | "Invalid interleave factor")(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14089, __extension__ __PRETTY_FUNCTION__)); | ||||
14090 | assert(!Shuffles.empty() && "Empty shufflevector input")(static_cast <bool> (!Shuffles.empty() && "Empty shufflevector input" ) ? void (0) : __assert_fail ("!Shuffles.empty() && \"Empty shufflevector input\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14090, __extension__ __PRETTY_FUNCTION__)); | ||||
14091 | assert(Shuffles.size() == Indices.size() &&(static_cast <bool> (Shuffles.size() == Indices.size() && "Unmatched number of shufflevectors and indices") ? void (0) : __assert_fail ("Shuffles.size() == Indices.size() && \"Unmatched number of shufflevectors and indices\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14092, __extension__ __PRETTY_FUNCTION__)) | ||||
14092 | "Unmatched number of shufflevectors and indices")(static_cast <bool> (Shuffles.size() == Indices.size() && "Unmatched number of shufflevectors and indices") ? void (0) : __assert_fail ("Shuffles.size() == Indices.size() && \"Unmatched number of shufflevectors and indices\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14092, __extension__ __PRETTY_FUNCTION__)); | ||||
14093 | |||||
14094 | const DataLayout &DL = LI->getModule()->getDataLayout(); | ||||
14095 | |||||
14096 | VectorType *VTy = Shuffles[0]->getType(); | ||||
14097 | |||||
14098 | // Skip if we do not have NEON and skip illegal vector types. We can | ||||
14099 | // "legalize" wide vector types into multiple interleaved accesses as long as | ||||
14100 | // the vector types are divisible by 128. | ||||
14101 | bool UseScalable; | ||||
14102 | if (!Subtarget->hasNEON() || | ||||
14103 | !isLegalInterleavedAccessType(VTy, DL, UseScalable)) | ||||
14104 | return false; | ||||
14105 | |||||
14106 | unsigned NumLoads = getNumInterleavedAccesses(VTy, DL, UseScalable); | ||||
14107 | |||||
14108 | auto *FVTy = cast<FixedVectorType>(VTy); | ||||
14109 | |||||
14110 | // A pointer vector can not be the return type of the ldN intrinsics. Need to | ||||
14111 | // load integer vectors first and then convert to pointer vectors. | ||||
14112 | Type *EltTy = FVTy->getElementType(); | ||||
14113 | if (EltTy->isPointerTy()) | ||||
14114 | FVTy = | ||||
14115 | FixedVectorType::get(DL.getIntPtrType(EltTy), FVTy->getNumElements()); | ||||
14116 | |||||
14117 | // If we're going to generate more than one load, reset the sub-vector type | ||||
14118 | // to something legal. | ||||
14119 | FVTy = FixedVectorType::get(FVTy->getElementType(), | ||||
14120 | FVTy->getNumElements() / NumLoads); | ||||
14121 | |||||
14122 | auto *LDVTy = | ||||
14123 | UseScalable ? cast<VectorType>(getSVEContainerIRType(FVTy)) : FVTy; | ||||
14124 | |||||
14125 | IRBuilder<> Builder(LI); | ||||
14126 | |||||
14127 | // The base address of the load. | ||||
14128 | Value *BaseAddr = LI->getPointerOperand(); | ||||
14129 | |||||
14130 | if (NumLoads > 1) { | ||||
14131 | // We will compute the pointer operand of each load from the original base | ||||
14132 | // address using GEPs. Cast the base address to a pointer to the scalar | ||||
14133 | // element type. | ||||
14134 | BaseAddr = Builder.CreateBitCast( | ||||
14135 | BaseAddr, | ||||
14136 | LDVTy->getElementType()->getPointerTo(LI->getPointerAddressSpace())); | ||||
14137 | } | ||||
14138 | |||||
14139 | Type *PtrTy = | ||||
14140 | UseScalable | ||||
14141 | ? LDVTy->getElementType()->getPointerTo(LI->getPointerAddressSpace()) | ||||
14142 | : LDVTy->getPointerTo(LI->getPointerAddressSpace()); | ||||
14143 | Type *PredTy = VectorType::get(Type::getInt1Ty(LDVTy->getContext()), | ||||
14144 | LDVTy->getElementCount()); | ||||
14145 | |||||
14146 | static const Intrinsic::ID SVELoadIntrs[3] = { | ||||
14147 | Intrinsic::aarch64_sve_ld2_sret, Intrinsic::aarch64_sve_ld3_sret, | ||||
14148 | Intrinsic::aarch64_sve_ld4_sret}; | ||||
14149 | static const Intrinsic::ID NEONLoadIntrs[3] = {Intrinsic::aarch64_neon_ld2, | ||||
14150 | Intrinsic::aarch64_neon_ld3, | ||||
14151 | Intrinsic::aarch64_neon_ld4}; | ||||
14152 | Function *LdNFunc; | ||||
14153 | if (UseScalable) | ||||
14154 | LdNFunc = Intrinsic::getDeclaration(LI->getModule(), | ||||
14155 | SVELoadIntrs[Factor - 2], {LDVTy}); | ||||
14156 | else | ||||
14157 | LdNFunc = Intrinsic::getDeclaration( | ||||
14158 | LI->getModule(), NEONLoadIntrs[Factor - 2], {LDVTy, PtrTy}); | ||||
14159 | |||||
14160 | // Holds sub-vectors extracted from the load intrinsic return values. The | ||||
14161 | // sub-vectors are associated with the shufflevector instructions they will | ||||
14162 | // replace. | ||||
14163 | DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs; | ||||
14164 | |||||
14165 | Value *PTrue = nullptr; | ||||
14166 | if (UseScalable) { | ||||
14167 | std::optional<unsigned> PgPattern = | ||||
14168 | getSVEPredPatternFromNumElements(FVTy->getNumElements()); | ||||
14169 | if (Subtarget->getMinSVEVectorSizeInBits() == | ||||
14170 | Subtarget->getMaxSVEVectorSizeInBits() && | ||||
14171 | Subtarget->getMinSVEVectorSizeInBits() == DL.getTypeSizeInBits(FVTy)) | ||||
14172 | PgPattern = AArch64SVEPredPattern::all; | ||||
14173 | |||||
14174 | auto *PTruePat = | ||||
14175 | ConstantInt::get(Type::getInt32Ty(LDVTy->getContext()), *PgPattern); | ||||
14176 | PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, {PredTy}, | ||||
14177 | {PTruePat}); | ||||
14178 | } | ||||
14179 | |||||
14180 | for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) { | ||||
14181 | |||||
14182 | // If we're generating more than one load, compute the base address of | ||||
14183 | // subsequent loads as an offset from the previous. | ||||
14184 | if (LoadCount > 0) | ||||
14185 | BaseAddr = Builder.CreateConstGEP1_32(LDVTy->getElementType(), BaseAddr, | ||||
14186 | FVTy->getNumElements() * Factor); | ||||
14187 | |||||
14188 | CallInst *LdN; | ||||
14189 | if (UseScalable) | ||||
14190 | LdN = Builder.CreateCall( | ||||
14191 | LdNFunc, {PTrue, Builder.CreateBitCast(BaseAddr, PtrTy)}, "ldN"); | ||||
14192 | else | ||||
14193 | LdN = Builder.CreateCall(LdNFunc, Builder.CreateBitCast(BaseAddr, PtrTy), | ||||
14194 | "ldN"); | ||||
14195 | |||||
14196 | // Extract and store the sub-vectors returned by the load intrinsic. | ||||
14197 | for (unsigned i = 0; i < Shuffles.size(); i++) { | ||||
14198 | ShuffleVectorInst *SVI = Shuffles[i]; | ||||
14199 | unsigned Index = Indices[i]; | ||||
14200 | |||||
14201 | Value *SubVec = Builder.CreateExtractValue(LdN, Index); | ||||
14202 | |||||
14203 | if (UseScalable) | ||||
14204 | SubVec = Builder.CreateExtractVector( | ||||
14205 | FVTy, SubVec, | ||||
14206 | ConstantInt::get(Type::getInt64Ty(VTy->getContext()), 0)); | ||||
14207 | |||||
14208 | // Convert the integer vector to pointer vector if the element is pointer. | ||||
14209 | if (EltTy->isPointerTy()) | ||||
14210 | SubVec = Builder.CreateIntToPtr( | ||||
14211 | SubVec, FixedVectorType::get(SVI->getType()->getElementType(), | ||||
14212 | FVTy->getNumElements())); | ||||
14213 | |||||
14214 | SubVecs[SVI].push_back(SubVec); | ||||
14215 | } | ||||
14216 | } | ||||
14217 | |||||
14218 | // Replace uses of the shufflevector instructions with the sub-vectors | ||||
14219 | // returned by the load intrinsic. If a shufflevector instruction is | ||||
14220 | // associated with more than one sub-vector, those sub-vectors will be | ||||
14221 | // concatenated into a single wide vector. | ||||
14222 | for (ShuffleVectorInst *SVI : Shuffles) { | ||||
14223 | auto &SubVec = SubVecs[SVI]; | ||||
14224 | auto *WideVec = | ||||
14225 | SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0]; | ||||
14226 | SVI->replaceAllUsesWith(WideVec); | ||||
14227 | } | ||||
14228 | |||||
14229 | return true; | ||||
14230 | } | ||||
14231 | |||||
14232 | /// Lower an interleaved store into a stN intrinsic. | ||||
14233 | /// | ||||
14234 | /// E.g. Lower an interleaved store (Factor = 3): | ||||
14235 | /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, | ||||
14236 | /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> | ||||
14237 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr | ||||
14238 | /// | ||||
14239 | /// Into: | ||||
14240 | /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> | ||||
14241 | /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> | ||||
14242 | /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> | ||||
14243 | /// call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr) | ||||
14244 | /// | ||||
14245 | /// Note that the new shufflevectors will be removed and we'll only generate one | ||||
14246 | /// st3 instruction in CodeGen. | ||||
14247 | /// | ||||
14248 | /// Example for a more general valid mask (Factor 3). Lower: | ||||
14249 | /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1, | ||||
14250 | /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19> | ||||
14251 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr | ||||
14252 | /// | ||||
14253 | /// Into: | ||||
14254 | /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7> | ||||
14255 | /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35> | ||||
14256 | /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19> | ||||
14257 | /// call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr) | ||||
14258 | bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI, | ||||
14259 | ShuffleVectorInst *SVI, | ||||
14260 | unsigned Factor) const { | ||||
14261 | // Skip if streaming compatible SVE is enabled, because it generates invalid | ||||
14262 | // code in streaming mode when SVE length is not specified. | ||||
14263 | if (Subtarget->forceStreamingCompatibleSVE()) | ||||
14264 | return false; | ||||
14265 | |||||
14266 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14267, __extension__ __PRETTY_FUNCTION__)) | ||||
14267 | "Invalid interleave factor")(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14267, __extension__ __PRETTY_FUNCTION__)); | ||||
14268 | |||||
14269 | auto *VecTy = cast<FixedVectorType>(SVI->getType()); | ||||
14270 | assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store")(static_cast <bool> (VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store") ? void (0) : __assert_fail ("VecTy->getNumElements() % Factor == 0 && \"Invalid interleaved store\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14270, __extension__ __PRETTY_FUNCTION__)); | ||||
14271 | |||||
14272 | unsigned LaneLen = VecTy->getNumElements() / Factor; | ||||
14273 | Type *EltTy = VecTy->getElementType(); | ||||
14274 | auto *SubVecTy = FixedVectorType::get(EltTy, LaneLen); | ||||
14275 | |||||
14276 | const DataLayout &DL = SI->getModule()->getDataLayout(); | ||||
14277 | bool UseScalable; | ||||
14278 | |||||
14279 | // Skip if we do not have NEON and skip illegal vector types. We can | ||||
14280 | // "legalize" wide vector types into multiple interleaved accesses as long as | ||||
14281 | // the vector types are divisible by 128. | ||||
14282 | if (!Subtarget->hasNEON() || | ||||
14283 | !isLegalInterleavedAccessType(SubVecTy, DL, UseScalable)) | ||||
14284 | return false; | ||||
14285 | |||||
14286 | unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL, UseScalable); | ||||
14287 | |||||
14288 | Value *Op0 = SVI->getOperand(0); | ||||
14289 | Value *Op1 = SVI->getOperand(1); | ||||
14290 | IRBuilder<> Builder(SI); | ||||
14291 | |||||
14292 | // StN intrinsics don't support pointer vectors as arguments. Convert pointer | ||||
14293 | // vectors to integer vectors. | ||||
14294 | if (EltTy->isPointerTy()) { | ||||
14295 | Type *IntTy = DL.getIntPtrType(EltTy); | ||||
14296 | unsigned NumOpElts = | ||||
14297 | cast<FixedVectorType>(Op0->getType())->getNumElements(); | ||||
14298 | |||||
14299 | // Convert to the corresponding integer vector. | ||||
14300 | auto *IntVecTy = FixedVectorType::get(IntTy, NumOpElts); | ||||
14301 | Op0 = Builder.CreatePtrToInt(Op0, IntVecTy); | ||||
14302 | Op1 = Builder.CreatePtrToInt(Op1, IntVecTy); | ||||
14303 | |||||
14304 | SubVecTy = FixedVectorType::get(IntTy, LaneLen); | ||||
14305 | } | ||||
14306 | |||||
14307 | // If we're going to generate more than one store, reset the lane length | ||||
14308 | // and sub-vector type to something legal. | ||||
14309 | LaneLen /= NumStores; | ||||
14310 | SubVecTy = FixedVectorType::get(SubVecTy->getElementType(), LaneLen); | ||||
14311 | |||||
14312 | auto *STVTy = UseScalable ? cast<VectorType>(getSVEContainerIRType(SubVecTy)) | ||||
14313 | : SubVecTy; | ||||
14314 | |||||
14315 | // The base address of the store. | ||||
14316 | Value *BaseAddr = SI->getPointerOperand(); | ||||
14317 | |||||
14318 | if (NumStores > 1) { | ||||
14319 | // We will compute the pointer operand of each store from the original base | ||||
14320 | // address using GEPs. Cast the base address to a pointer to the scalar | ||||
14321 | // element type. | ||||
14322 | BaseAddr = Builder.CreateBitCast( | ||||
14323 | BaseAddr, | ||||
14324 | SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace())); | ||||
14325 | } | ||||
14326 | |||||
14327 | auto Mask = SVI->getShuffleMask(); | ||||
14328 | |||||
14329 | // Sanity check if all the indices are NOT in range. | ||||
14330 | // If mask is `undef` or `poison`, `Mask` may be a vector of -1s. | ||||
14331 | // If all of them are `undef`, OOB read will happen later. | ||||
14332 | if (llvm::all_of(Mask, [](int Idx) { return Idx == UndefMaskElem; })) { | ||||
14333 | return false; | ||||
14334 | } | ||||
14335 | |||||
14336 | Type *PtrTy = | ||||
14337 | UseScalable | ||||
14338 | ? STVTy->getElementType()->getPointerTo(SI->getPointerAddressSpace()) | ||||
14339 | : STVTy->getPointerTo(SI->getPointerAddressSpace()); | ||||
14340 | Type *PredTy = VectorType::get(Type::getInt1Ty(STVTy->getContext()), | ||||
14341 | STVTy->getElementCount()); | ||||
14342 | |||||
14343 | static const Intrinsic::ID SVEStoreIntrs[3] = {Intrinsic::aarch64_sve_st2, | ||||
14344 | Intrinsic::aarch64_sve_st3, | ||||
14345 | Intrinsic::aarch64_sve_st4}; | ||||
14346 | static const Intrinsic::ID NEONStoreIntrs[3] = {Intrinsic::aarch64_neon_st2, | ||||
14347 | Intrinsic::aarch64_neon_st3, | ||||
14348 | Intrinsic::aarch64_neon_st4}; | ||||
14349 | Function *StNFunc; | ||||
14350 | if (UseScalable) | ||||
14351 | StNFunc = Intrinsic::getDeclaration(SI->getModule(), | ||||
14352 | SVEStoreIntrs[Factor - 2], {STVTy}); | ||||
14353 | else | ||||
14354 | StNFunc = Intrinsic::getDeclaration( | ||||
14355 | SI->getModule(), NEONStoreIntrs[Factor - 2], {STVTy, PtrTy}); | ||||
14356 | |||||
14357 | Value *PTrue = nullptr; | ||||
14358 | if (UseScalable) { | ||||
14359 | std::optional<unsigned> PgPattern = | ||||
14360 | getSVEPredPatternFromNumElements(SubVecTy->getNumElements()); | ||||
14361 | if (Subtarget->getMinSVEVectorSizeInBits() == | ||||
14362 | Subtarget->getMaxSVEVectorSizeInBits() && | ||||
14363 | Subtarget->getMinSVEVectorSizeInBits() == | ||||
14364 | DL.getTypeSizeInBits(SubVecTy)) | ||||
14365 | PgPattern = AArch64SVEPredPattern::all; | ||||
14366 | |||||
14367 | auto *PTruePat = | ||||
14368 | ConstantInt::get(Type::getInt32Ty(STVTy->getContext()), *PgPattern); | ||||
14369 | PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, {PredTy}, | ||||
14370 | {PTruePat}); | ||||
14371 | } | ||||
14372 | |||||
14373 | for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) { | ||||
14374 | |||||
14375 | SmallVector<Value *, 5> Ops; | ||||
14376 | |||||
14377 | // Split the shufflevector operands into sub vectors for the new stN call. | ||||
14378 | for (unsigned i = 0; i < Factor; i++) { | ||||
14379 | Value *Shuffle; | ||||
14380 | unsigned IdxI = StoreCount * LaneLen * Factor + i; | ||||
14381 | if (Mask[IdxI] >= 0) { | ||||
14382 | Shuffle = Builder.CreateShuffleVector( | ||||
14383 | Op0, Op1, createSequentialMask(Mask[IdxI], LaneLen, 0)); | ||||
14384 | } else { | ||||
14385 | unsigned StartMask = 0; | ||||
14386 | for (unsigned j = 1; j < LaneLen; j++) { | ||||
14387 | unsigned IdxJ = StoreCount * LaneLen * Factor + j * Factor + i; | ||||
14388 | if (Mask[IdxJ] >= 0) { | ||||
14389 | StartMask = Mask[IdxJ] - j; | ||||
14390 | break; | ||||
14391 | } | ||||
14392 | } | ||||
14393 | // Note: Filling undef gaps with random elements is ok, since | ||||
14394 | // those elements were being written anyway (with undefs). | ||||
14395 | // In the case of all undefs we're defaulting to using elems from 0 | ||||
14396 | // Note: StartMask cannot be negative, it's checked in | ||||
14397 | // isReInterleaveMask | ||||
14398 | Shuffle = Builder.CreateShuffleVector( | ||||
14399 | Op0, Op1, createSequentialMask(StartMask, LaneLen, 0)); | ||||
14400 | } | ||||
14401 | |||||
14402 | if (UseScalable) | ||||
14403 | Shuffle = Builder.CreateInsertVector( | ||||
14404 | STVTy, UndefValue::get(STVTy), Shuffle, | ||||
14405 | ConstantInt::get(Type::getInt64Ty(STVTy->getContext()), 0)); | ||||
14406 | |||||
14407 | Ops.push_back(Shuffle); | ||||
14408 | } | ||||
14409 | |||||
14410 | if (UseScalable) | ||||
14411 | Ops.push_back(PTrue); | ||||
14412 | |||||
14413 | // If we generating more than one store, we compute the base address of | ||||
14414 | // subsequent stores as an offset from the previous. | ||||
14415 | if (StoreCount > 0) | ||||
14416 | BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(), | ||||
14417 | BaseAddr, LaneLen * Factor); | ||||
14418 | |||||
14419 | Ops.push_back(Builder.CreateBitCast(BaseAddr, PtrTy)); | ||||
14420 | Builder.CreateCall(StNFunc, Ops); | ||||
14421 | } | ||||
14422 | return true; | ||||
14423 | } | ||||
14424 | |||||
14425 | EVT AArch64TargetLowering::getOptimalMemOpType( | ||||
14426 | const MemOp &Op, const AttributeList &FuncAttributes) const { | ||||
14427 | bool CanImplicitFloat = !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat); | ||||
14428 | bool CanUseNEON = Subtarget->hasNEON() && CanImplicitFloat; | ||||
14429 | bool CanUseFP = Subtarget->hasFPARMv8() && CanImplicitFloat; | ||||
14430 | // Only use AdvSIMD to implement memset of 32-byte and above. It would have | ||||
14431 | // taken one instruction to materialize the v2i64 zero and one store (with | ||||
14432 | // restrictive addressing mode). Just do i64 stores. | ||||
14433 | bool IsSmallMemset = Op.isMemset() && Op.size() < 32; | ||||
14434 | auto AlignmentIsAcceptable = [&](EVT VT, Align AlignCheck) { | ||||
14435 | if (Op.isAligned(AlignCheck)) | ||||
14436 | return true; | ||||
14437 | unsigned Fast; | ||||
14438 | return allowsMisalignedMemoryAccesses(VT, 0, Align(1), | ||||
14439 | MachineMemOperand::MONone, &Fast) && | ||||
14440 | Fast; | ||||
14441 | }; | ||||
14442 | |||||
14443 | if (CanUseNEON && Op.isMemset() && !IsSmallMemset && | ||||
14444 | AlignmentIsAcceptable(MVT::v16i8, Align(16))) | ||||
14445 | return MVT::v16i8; | ||||
14446 | if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16))) | ||||
14447 | return MVT::f128; | ||||
14448 | if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8))) | ||||
14449 | return MVT::i64; | ||||
14450 | if (Op.size() >= 4 && AlignmentIsAcceptable(MVT::i32, Align(4))) | ||||
14451 | return MVT::i32; | ||||
14452 | return MVT::Other; | ||||
14453 | } | ||||
14454 | |||||
14455 | LLT AArch64TargetLowering::getOptimalMemOpLLT( | ||||
14456 | const MemOp &Op, const AttributeList &FuncAttributes) const { | ||||
14457 | bool CanImplicitFloat = !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat); | ||||
14458 | bool CanUseNEON = Subtarget->hasNEON() && CanImplicitFloat; | ||||
14459 | bool CanUseFP = Subtarget->hasFPARMv8() && CanImplicitFloat; | ||||
14460 | // Only use AdvSIMD to implement memset of 32-byte and above. It would have | ||||
14461 | // taken one instruction to materialize the v2i64 zero and one store (with | ||||
14462 | // restrictive addressing mode). Just do i64 stores. | ||||
14463 | bool IsSmallMemset = Op.isMemset() && Op.size() < 32; | ||||
14464 | auto AlignmentIsAcceptable = [&](EVT VT, Align AlignCheck) { | ||||
14465 | if (Op.isAligned(AlignCheck)) | ||||
14466 | return true; | ||||
14467 | unsigned Fast; | ||||
14468 | return allowsMisalignedMemoryAccesses(VT, 0, Align(1), | ||||
14469 | MachineMemOperand::MONone, &Fast) && | ||||
14470 | Fast; | ||||
14471 | }; | ||||
14472 | |||||
14473 | if (CanUseNEON && Op.isMemset() && !IsSmallMemset && | ||||
14474 | AlignmentIsAcceptable(MVT::v2i64, Align(16))) | ||||
14475 | return LLT::fixed_vector(2, 64); | ||||
14476 | if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16))) | ||||
14477 | return LLT::scalar(128); | ||||
14478 | if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8))) | ||||
14479 | return LLT::scalar(64); | ||||
14480 | if (Op.size() >= 4 && AlignmentIsAcceptable(MVT::i32, Align(4))) | ||||
14481 | return LLT::scalar(32); | ||||
14482 | return LLT(); | ||||
14483 | } | ||||
14484 | |||||
14485 | // 12-bit optionally shifted immediates are legal for adds. | ||||
14486 | bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const { | ||||
14487 | if (Immed == std::numeric_limits<int64_t>::min()) { | ||||
14488 | LLVM_DEBUG(dbgs() << "Illegal add imm " << Immeddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Illegal add imm " << Immed << ": avoid UB for INT64_MIN\n"; } } while (false ) | ||||
14489 | << ": avoid UB for INT64_MIN\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Illegal add imm " << Immed << ": avoid UB for INT64_MIN\n"; } } while (false ); | ||||
14490 | return false; | ||||
14491 | } | ||||
14492 | // Same encoding for add/sub, just flip the sign. | ||||
14493 | Immed = std::abs(Immed); | ||||
14494 | bool IsLegal = ((Immed >> 12) == 0 || | ||||
14495 | ((Immed & 0xfff) == 0 && Immed >> 24 == 0)); | ||||
14496 | LLVM_DEBUG(dbgs() << "Is " << Immeddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Is " << Immed << " legal add imm: " << (IsLegal ? "yes" : "no") << "\n"; } } while (false) | ||||
14497 | << " legal add imm: " << (IsLegal ? "yes" : "no") << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Is " << Immed << " legal add imm: " << (IsLegal ? "yes" : "no") << "\n"; } } while (false); | ||||
14498 | return IsLegal; | ||||
14499 | } | ||||
14500 | |||||
14501 | // Return false to prevent folding | ||||
14502 | // (mul (add x, c1), c2) -> (add (mul x, c2), c2*c1) in DAGCombine, | ||||
14503 | // if the folding leads to worse code. | ||||
14504 | bool AArch64TargetLowering::isMulAddWithConstProfitable( | ||||
14505 | SDValue AddNode, SDValue ConstNode) const { | ||||
14506 | // Let the DAGCombiner decide for vector types and large types. | ||||
14507 | const EVT VT = AddNode.getValueType(); | ||||
14508 | if (VT.isVector() || VT.getScalarSizeInBits() > 64) | ||||
14509 | return true; | ||||
14510 | |||||
14511 | // It is worse if c1 is legal add immediate, while c1*c2 is not | ||||
14512 | // and has to be composed by at least two instructions. | ||||
14513 | const ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1)); | ||||
14514 | const ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode); | ||||
14515 | const int64_t C1 = C1Node->getSExtValue(); | ||||
14516 | const APInt C1C2 = C1Node->getAPIntValue() * C2Node->getAPIntValue(); | ||||
14517 | if (!isLegalAddImmediate(C1) || isLegalAddImmediate(C1C2.getSExtValue())) | ||||
14518 | return true; | ||||
14519 | SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn; | ||||
14520 | AArch64_IMM::expandMOVImm(C1C2.getZExtValue(), VT.getSizeInBits(), Insn); | ||||
14521 | if (Insn.size() > 1) | ||||
14522 | return false; | ||||
14523 | |||||
14524 | // Default to true and let the DAGCombiner decide. | ||||
14525 | return true; | ||||
14526 | } | ||||
14527 | |||||
14528 | // Integer comparisons are implemented with ADDS/SUBS, so the range of valid | ||||
14529 | // immediates is the same as for an add or a sub. | ||||
14530 | bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Immed) const { | ||||
14531 | return isLegalAddImmediate(Immed); | ||||
14532 | } | ||||
14533 | |||||
14534 | /// isLegalAddressingMode - Return true if the addressing mode represented | ||||
14535 | /// by AM is legal for this target, for a load/store of the specified type. | ||||
14536 | bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL, | ||||
14537 | const AddrMode &AM, Type *Ty, | ||||
14538 | unsigned AS, Instruction *I) const { | ||||
14539 | // AArch64 has five basic addressing modes: | ||||
14540 | // reg | ||||
14541 | // reg + 9-bit signed offset | ||||
14542 | // reg + SIZE_IN_BYTES * 12-bit unsigned offset | ||||
14543 | // reg1 + reg2 | ||||
14544 | // reg + SIZE_IN_BYTES * reg | ||||
14545 | |||||
14546 | // No global is ever allowed as a base. | ||||
14547 | if (AM.BaseGV) | ||||
14548 | return false; | ||||
14549 | |||||
14550 | // No reg+reg+imm addressing. | ||||
14551 | if (AM.HasBaseReg && AM.BaseOffs && AM.Scale) | ||||
14552 | return false; | ||||
14553 | |||||
14554 | // FIXME: Update this method to support scalable addressing modes. | ||||
14555 | if (isa<ScalableVectorType>(Ty)) { | ||||
14556 | uint64_t VecElemNumBytes = | ||||
14557 | DL.getTypeSizeInBits(cast<VectorType>(Ty)->getElementType()) / 8; | ||||
14558 | return AM.HasBaseReg && !AM.BaseOffs && | ||||
14559 | (AM.Scale == 0 || (uint64_t)AM.Scale == VecElemNumBytes); | ||||
14560 | } | ||||
14561 | |||||
14562 | // check reg + imm case: | ||||
14563 | // i.e., reg + 0, reg + imm9, reg + SIZE_IN_BYTES * uimm12 | ||||
14564 | uint64_t NumBytes = 0; | ||||
14565 | if (Ty->isSized()) { | ||||
14566 | uint64_t NumBits = DL.getTypeSizeInBits(Ty); | ||||
14567 | NumBytes = NumBits / 8; | ||||
14568 | if (!isPowerOf2_64(NumBits)) | ||||
14569 | NumBytes = 0; | ||||
14570 | } | ||||
14571 | |||||
14572 | if (!AM.Scale) { | ||||
14573 | int64_t Offset = AM.BaseOffs; | ||||
14574 | |||||
14575 | // 9-bit signed offset | ||||
14576 | if (isInt<9>(Offset)) | ||||
14577 | return true; | ||||
14578 | |||||
14579 | // 12-bit unsigned offset | ||||
14580 | unsigned shift = Log2_64(NumBytes); | ||||
14581 | if (NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 && | ||||
14582 | // Must be a multiple of NumBytes (NumBytes is a power of 2) | ||||
14583 | (Offset >> shift) << shift == Offset) | ||||
14584 | return true; | ||||
14585 | return false; | ||||
14586 | } | ||||
14587 | |||||
14588 | // Check reg1 + SIZE_IN_BYTES * reg2 and reg1 + reg2 | ||||
14589 | |||||
14590 | return AM.Scale == 1 || (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes); | ||||
14591 | } | ||||
14592 | |||||
14593 | bool AArch64TargetLowering::shouldConsiderGEPOffsetSplit() const { | ||||
14594 | // Consider splitting large offset of struct or array. | ||||
14595 | return true; | ||||
14596 | } | ||||
14597 | |||||
14598 | bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd( | ||||
14599 | const MachineFunction &MF, EVT VT) const { | ||||
14600 | VT = VT.getScalarType(); | ||||
14601 | |||||
14602 | if (!VT.isSimple()) | ||||
14603 | return false; | ||||
14604 | |||||
14605 | switch (VT.getSimpleVT().SimpleTy) { | ||||
14606 | case MVT::f16: | ||||
14607 | return Subtarget->hasFullFP16(); | ||||
14608 | case MVT::f32: | ||||
14609 | case MVT::f64: | ||||
14610 | return true; | ||||
14611 | default: | ||||
14612 | break; | ||||
14613 | } | ||||
14614 | |||||
14615 | return false; | ||||
14616 | } | ||||
14617 | |||||
14618 | bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F, | ||||
14619 | Type *Ty) const { | ||||
14620 | switch (Ty->getScalarType()->getTypeID()) { | ||||
14621 | case Type::FloatTyID: | ||||
14622 | case Type::DoubleTyID: | ||||
14623 | return true; | ||||
14624 | default: | ||||
14625 | return false; | ||||
14626 | } | ||||
14627 | } | ||||
14628 | |||||
14629 | bool AArch64TargetLowering::generateFMAsInMachineCombiner( | ||||
14630 | EVT VT, CodeGenOpt::Level OptLevel) const { | ||||
14631 | return (OptLevel >= CodeGenOpt::Aggressive) && !VT.isScalableVector() && | ||||
14632 | !useSVEForFixedLengthVectorVT(VT); | ||||
14633 | } | ||||
14634 | |||||
14635 | const MCPhysReg * | ||||
14636 | AArch64TargetLowering::getScratchRegisters(CallingConv::ID) const { | ||||
14637 | // LR is a callee-save register, but we must treat it as clobbered by any call | ||||
14638 | // site. Hence we include LR in the scratch registers, which are in turn added | ||||
14639 | // as implicit-defs for stackmaps and patchpoints. | ||||
14640 | static const MCPhysReg ScratchRegs[] = { | ||||
14641 | AArch64::X16, AArch64::X17, AArch64::LR, 0 | ||||
14642 | }; | ||||
14643 | return ScratchRegs; | ||||
14644 | } | ||||
14645 | |||||
14646 | bool | ||||
14647 | AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N, | ||||
14648 | CombineLevel Level) const { | ||||
14649 | assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||(static_cast <bool> ((N->getOpcode() == ISD::SHL || N ->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL ) && "Expected shift op") ? void (0) : __assert_fail ( "(N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) && \"Expected shift op\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14651, __extension__ __PRETTY_FUNCTION__)) | ||||
14650 | N->getOpcode() == ISD::SRL) &&(static_cast <bool> ((N->getOpcode() == ISD::SHL || N ->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL ) && "Expected shift op") ? void (0) : __assert_fail ( "(N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) && \"Expected shift op\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14651, __extension__ __PRETTY_FUNCTION__)) | ||||
14651 | "Expected shift op")(static_cast <bool> ((N->getOpcode() == ISD::SHL || N ->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL ) && "Expected shift op") ? void (0) : __assert_fail ( "(N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) && \"Expected shift op\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14651, __extension__ __PRETTY_FUNCTION__)); | ||||
14652 | |||||
14653 | SDValue ShiftLHS = N->getOperand(0); | ||||
14654 | EVT VT = N->getValueType(0); | ||||
14655 | |||||
14656 | // If ShiftLHS is unsigned bit extraction: ((x >> C) & mask), then do not | ||||
14657 | // combine it with shift 'N' to let it be lowered to UBFX except: | ||||
14658 | // ((x >> C) & mask) << C. | ||||
14659 | if (ShiftLHS.getOpcode() == ISD::AND && (VT == MVT::i32 || VT == MVT::i64) && | ||||
14660 | isa<ConstantSDNode>(ShiftLHS.getOperand(1))) { | ||||
14661 | uint64_t TruncMask = ShiftLHS.getConstantOperandVal(1); | ||||
14662 | if (isMask_64(TruncMask)) { | ||||
14663 | SDValue AndLHS = ShiftLHS.getOperand(0); | ||||
14664 | if (AndLHS.getOpcode() == ISD::SRL) { | ||||
14665 | if (auto *SRLC = dyn_cast<ConstantSDNode>(AndLHS.getOperand(1))) { | ||||
14666 | if (N->getOpcode() == ISD::SHL) | ||||
14667 | if (auto *SHLC = dyn_cast<ConstantSDNode>(N->getOperand(1))) | ||||
14668 | return SRLC->getZExtValue() == SHLC->getZExtValue(); | ||||
14669 | return false; | ||||
14670 | } | ||||
14671 | } | ||||
14672 | } | ||||
14673 | } | ||||
14674 | return true; | ||||
14675 | } | ||||
14676 | |||||
14677 | bool AArch64TargetLowering::isDesirableToCommuteXorWithShift( | ||||
14678 | const SDNode *N) const { | ||||
14679 | assert(N->getOpcode() == ISD::XOR &&(static_cast <bool> (N->getOpcode() == ISD::XOR && (N->getOperand(0).getOpcode() == ISD::SHL || N->getOperand (0).getOpcode() == ISD::SRL) && "Expected XOR(SHIFT) pattern" ) ? void (0) : __assert_fail ("N->getOpcode() == ISD::XOR && (N->getOperand(0).getOpcode() == ISD::SHL || N->getOperand(0).getOpcode() == ISD::SRL) && \"Expected XOR(SHIFT) pattern\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14682, __extension__ __PRETTY_FUNCTION__)) | ||||
14680 | (N->getOperand(0).getOpcode() == ISD::SHL ||(static_cast <bool> (N->getOpcode() == ISD::XOR && (N->getOperand(0).getOpcode() == ISD::SHL || N->getOperand (0).getOpcode() == ISD::SRL) && "Expected XOR(SHIFT) pattern" ) ? void (0) : __assert_fail ("N->getOpcode() == ISD::XOR && (N->getOperand(0).getOpcode() == ISD::SHL || N->getOperand(0).getOpcode() == ISD::SRL) && \"Expected XOR(SHIFT) pattern\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14682, __extension__ __PRETTY_FUNCTION__)) | ||||
14681 | N->getOperand(0).getOpcode() == ISD::SRL) &&(static_cast <bool> (N->getOpcode() == ISD::XOR && (N->getOperand(0).getOpcode() == ISD::SHL || N->getOperand (0).getOpcode() == ISD::SRL) && "Expected XOR(SHIFT) pattern" ) ? void (0) : __assert_fail ("N->getOpcode() == ISD::XOR && (N->getOperand(0).getOpcode() == ISD::SHL || N->getOperand(0).getOpcode() == ISD::SRL) && \"Expected XOR(SHIFT) pattern\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14682, __extension__ __PRETTY_FUNCTION__)) | ||||
14682 | "Expected XOR(SHIFT) pattern")(static_cast <bool> (N->getOpcode() == ISD::XOR && (N->getOperand(0).getOpcode() == ISD::SHL || N->getOperand (0).getOpcode() == ISD::SRL) && "Expected XOR(SHIFT) pattern" ) ? void (0) : __assert_fail ("N->getOpcode() == ISD::XOR && (N->getOperand(0).getOpcode() == ISD::SHL || N->getOperand(0).getOpcode() == ISD::SRL) && \"Expected XOR(SHIFT) pattern\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14682, __extension__ __PRETTY_FUNCTION__)); | ||||
14683 | |||||
14684 | // Only commute if the entire NOT mask is a hidden shifted mask. | ||||
14685 | auto *XorC = dyn_cast<ConstantSDNode>(N->getOperand(1)); | ||||
14686 | auto *ShiftC = dyn_cast<ConstantSDNode>(N->getOperand(0).getOperand(1)); | ||||
14687 | if (XorC && ShiftC) { | ||||
14688 | unsigned MaskIdx, MaskLen; | ||||
14689 | if (XorC->getAPIntValue().isShiftedMask(MaskIdx, MaskLen)) { | ||||
14690 | unsigned ShiftAmt = ShiftC->getZExtValue(); | ||||
14691 | unsigned BitWidth = N->getValueType(0).getScalarSizeInBits(); | ||||
14692 | if (N->getOperand(0).getOpcode() == ISD::SHL) | ||||
14693 | return MaskIdx == ShiftAmt && MaskLen == (BitWidth - ShiftAmt); | ||||
14694 | return MaskIdx == 0 && MaskLen == (BitWidth - ShiftAmt); | ||||
14695 | } | ||||
14696 | } | ||||
14697 | |||||
14698 | return false; | ||||
14699 | } | ||||
14700 | |||||
14701 | bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask( | ||||
14702 | const SDNode *N, CombineLevel Level) const { | ||||
14703 | assert(((N->getOpcode() == ISD::SHL &&(static_cast <bool> (((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode () == ISD::SRL && N->getOperand(0).getOpcode() == ISD ::SHL)) && "Expected shift-shift mask") ? void (0) : __assert_fail ("((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && N->getOperand(0).getOpcode() == ISD::SHL)) && \"Expected shift-shift mask\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14707, __extension__ __PRETTY_FUNCTION__)) | ||||
14704 | N->getOperand(0).getOpcode() == ISD::SRL) ||(static_cast <bool> (((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode () == ISD::SRL && N->getOperand(0).getOpcode() == ISD ::SHL)) && "Expected shift-shift mask") ? void (0) : __assert_fail ("((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && N->getOperand(0).getOpcode() == ISD::SHL)) && \"Expected shift-shift mask\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14707, __extension__ __PRETTY_FUNCTION__)) | ||||
14705 | (N->getOpcode() == ISD::SRL &&(static_cast <bool> (((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode () == ISD::SRL && N->getOperand(0).getOpcode() == ISD ::SHL)) && "Expected shift-shift mask") ? void (0) : __assert_fail ("((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && N->getOperand(0).getOpcode() == ISD::SHL)) && \"Expected shift-shift mask\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14707, __extension__ __PRETTY_FUNCTION__)) | ||||
14706 | N->getOperand(0).getOpcode() == ISD::SHL)) &&(static_cast <bool> (((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode () == ISD::SRL && N->getOperand(0).getOpcode() == ISD ::SHL)) && "Expected shift-shift mask") ? void (0) : __assert_fail ("((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && N->getOperand(0).getOpcode() == ISD::SHL)) && \"Expected shift-shift mask\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14707, __extension__ __PRETTY_FUNCTION__)) | ||||
14707 | "Expected shift-shift mask")(static_cast <bool> (((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode () == ISD::SRL && N->getOperand(0).getOpcode() == ISD ::SHL)) && "Expected shift-shift mask") ? void (0) : __assert_fail ("((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && N->getOperand(0).getOpcode() == ISD::SHL)) && \"Expected shift-shift mask\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 14707, __extension__ __PRETTY_FUNCTION__)); | ||||
14708 | // Don't allow multiuse shift folding with the same shift amount. | ||||
14709 | if (!N->getOperand(0)->hasOneUse()) | ||||
14710 | return false; | ||||
14711 | |||||
14712 | // Only fold srl(shl(x,c1),c2) iff C1 >= C2 to prevent loss of UBFX patterns. | ||||
14713 | EVT VT = N->getValueType(0); | ||||
14714 | if (N->getOpcode() == ISD::SRL && (VT == MVT::i32 || VT == MVT::i64)) { | ||||
14715 | auto *C1 = dyn_cast<ConstantSDNode>(N->getOperand(0).getOperand(1)); | ||||
14716 | auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); | ||||
14717 | return (!C1 || !C2 || C1->getZExtValue() >= C2->getZExtValue()); | ||||
14718 | } | ||||
14719 | |||||
14720 | return true; | ||||
14721 | } | ||||
14722 | |||||
14723 | bool AArch64TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, | ||||
14724 | Type *Ty) const { | ||||
14725 | assert(Ty->isIntegerTy())(static_cast <bool> (Ty->isIntegerTy()) ? void (0) : __assert_fail ("Ty->isIntegerTy()", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 14725, __extension__ __PRETTY_FUNCTION__)); | ||||
14726 | |||||
14727 | unsigned BitSize = Ty->getPrimitiveSizeInBits(); | ||||
14728 | if (BitSize == 0) | ||||
14729 | return false; | ||||
14730 | |||||
14731 | int64_t Val = Imm.getSExtValue(); | ||||
14732 | if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, BitSize)) | ||||
14733 | return true; | ||||
14734 | |||||
14735 | if ((int64_t)Val < 0) | ||||
14736 | Val = ~Val; | ||||
14737 | if (BitSize == 32) | ||||
14738 | Val &= (1LL << 32) - 1; | ||||
14739 | |||||
14740 | unsigned LZ = countLeadingZeros((uint64_t)Val); | ||||
14741 | unsigned Shift = (63 - LZ) / 16; | ||||
14742 | // MOVZ is free so return true for one or fewer MOVK. | ||||
14743 | return Shift < 3; | ||||
14744 | } | ||||
14745 | |||||
14746 | bool AArch64TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, | ||||
14747 | unsigned Index) const { | ||||
14748 | if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT)) | ||||
14749 | return false; | ||||
14750 | |||||
14751 | return (Index == 0 || Index == ResVT.getVectorMinNumElements()); | ||||
14752 | } | ||||
14753 | |||||
14754 | /// Turn vector tests of the signbit in the form of: | ||||
14755 | /// xor (sra X, elt_size(X)-1), -1 | ||||
14756 | /// into: | ||||
14757 | /// cmge X, X, #0 | ||||
14758 | static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG, | ||||
14759 | const AArch64Subtarget *Subtarget) { | ||||
14760 | EVT VT = N->getValueType(0); | ||||
14761 | if (!Subtarget->hasNEON() || !VT.isVector()) | ||||
14762 | return SDValue(); | ||||
14763 | |||||
14764 | // There must be a shift right algebraic before the xor, and the xor must be a | ||||
14765 | // 'not' operation. | ||||
14766 | SDValue Shift = N->getOperand(0); | ||||
14767 | SDValue Ones = N->getOperand(1); | ||||
14768 | if (Shift.getOpcode() != AArch64ISD::VASHR || !Shift.hasOneUse() || | ||||
14769 | !ISD::isBuildVectorAllOnes(Ones.getNode())) | ||||
14770 | return SDValue(); | ||||
14771 | |||||
14772 | // The shift should be smearing the sign bit across each vector element. | ||||
14773 | auto *ShiftAmt = dyn_cast<ConstantSDNode>(Shift.getOperand(1)); | ||||
14774 | EVT ShiftEltTy = Shift.getValueType().getVectorElementType(); | ||||
14775 | if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1) | ||||
14776 | return SDValue(); | ||||
14777 | |||||
14778 | return DAG.getNode(AArch64ISD::CMGEz, SDLoc(N), VT, Shift.getOperand(0)); | ||||
14779 | } | ||||
14780 | |||||
14781 | // Given a vecreduce_add node, detect the below pattern and convert it to the | ||||
14782 | // node sequence with UABDL, [S|U]ADB and UADDLP. | ||||
14783 | // | ||||
14784 | // i32 vecreduce_add( | ||||
14785 | // v16i32 abs( | ||||
14786 | // v16i32 sub( | ||||
14787 | // v16i32 [sign|zero]_extend(v16i8 a), v16i32 [sign|zero]_extend(v16i8 b)))) | ||||
14788 | // =================> | ||||
14789 | // i32 vecreduce_add( | ||||
14790 | // v4i32 UADDLP( | ||||
14791 | // v8i16 add( | ||||
14792 | // v8i16 zext( | ||||
14793 | // v8i8 [S|U]ABD low8:v16i8 a, low8:v16i8 b | ||||
14794 | // v8i16 zext( | ||||
14795 | // v8i8 [S|U]ABD high8:v16i8 a, high8:v16i8 b | ||||
14796 | static SDValue performVecReduceAddCombineWithUADDLP(SDNode *N, | ||||
14797 | SelectionDAG &DAG) { | ||||
14798 | // Assumed i32 vecreduce_add | ||||
14799 | if (N->getValueType(0) != MVT::i32) | ||||
14800 | return SDValue(); | ||||
14801 | |||||
14802 | SDValue VecReduceOp0 = N->getOperand(0); | ||||
14803 | unsigned Opcode = VecReduceOp0.getOpcode(); | ||||
14804 | // Assumed v16i32 abs | ||||
14805 | if (Opcode != ISD::ABS || VecReduceOp0->getValueType(0) != MVT::v16i32) | ||||
14806 | return SDValue(); | ||||
14807 | |||||
14808 | SDValue ABS = VecReduceOp0; | ||||
14809 | // Assumed v16i32 sub | ||||
14810 | if (ABS->getOperand(0)->getOpcode() != ISD::SUB || | ||||
14811 | ABS->getOperand(0)->getValueType(0) != MVT::v16i32) | ||||
14812 | return SDValue(); | ||||
14813 | |||||
14814 | SDValue SUB = ABS->getOperand(0); | ||||
14815 | unsigned Opcode0 = SUB->getOperand(0).getOpcode(); | ||||
14816 | unsigned Opcode1 = SUB->getOperand(1).getOpcode(); | ||||
14817 | // Assumed v16i32 type | ||||
14818 | if (SUB->getOperand(0)->getValueType(0) != MVT::v16i32 || | ||||
14819 | SUB->getOperand(1)->getValueType(0) != MVT::v16i32) | ||||
14820 | return SDValue(); | ||||
14821 | |||||
14822 | // Assumed zext or sext | ||||
14823 | bool IsZExt = false; | ||||
14824 | if (Opcode0 == ISD::ZERO_EXTEND && Opcode1 == ISD::ZERO_EXTEND) { | ||||
14825 | IsZExt = true; | ||||
14826 | } else if (Opcode0 == ISD::SIGN_EXTEND && Opcode1 == ISD::SIGN_EXTEND) { | ||||
14827 | IsZExt = false; | ||||
14828 | } else | ||||
14829 | return SDValue(); | ||||
14830 | |||||
14831 | SDValue EXT0 = SUB->getOperand(0); | ||||
14832 | SDValue EXT1 = SUB->getOperand(1); | ||||
14833 | // Assumed zext's operand has v16i8 type | ||||
14834 | if (EXT0->getOperand(0)->getValueType(0) != MVT::v16i8 || | ||||
14835 | EXT1->getOperand(0)->getValueType(0) != MVT::v16i8) | ||||
14836 | return SDValue(); | ||||
14837 | |||||
14838 | // Pattern is dectected. Let's convert it to sequence of nodes. | ||||
14839 | SDLoc DL(N); | ||||
14840 | |||||
14841 | // First, create the node pattern of UABD/SABD. | ||||
14842 | SDValue UABDHigh8Op0 = | ||||
14843 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT0->getOperand(0), | ||||
14844 | DAG.getConstant(8, DL, MVT::i64)); | ||||
14845 | SDValue UABDHigh8Op1 = | ||||
14846 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT1->getOperand(0), | ||||
14847 | DAG.getConstant(8, DL, MVT::i64)); | ||||
14848 | SDValue UABDHigh8 = DAG.getNode(IsZExt ? ISD::ABDU : ISD::ABDS, DL, MVT::v8i8, | ||||
14849 | UABDHigh8Op0, UABDHigh8Op1); | ||||
14850 | SDValue UABDL = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, UABDHigh8); | ||||
14851 | |||||
14852 | // Second, create the node pattern of UABAL. | ||||
14853 | SDValue UABDLo8Op0 = | ||||
14854 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT0->getOperand(0), | ||||
14855 | DAG.getConstant(0, DL, MVT::i64)); | ||||
14856 | SDValue UABDLo8Op1 = | ||||
14857 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT1->getOperand(0), | ||||
14858 | DAG.getConstant(0, DL, MVT::i64)); | ||||
14859 | SDValue UABDLo8 = DAG.getNode(IsZExt ? ISD::ABDU : ISD::ABDS, DL, MVT::v8i8, | ||||
14860 | UABDLo8Op0, UABDLo8Op1); | ||||
14861 | SDValue ZExtUABD = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, UABDLo8); | ||||
14862 | SDValue UABAL = DAG.getNode(ISD::ADD, DL, MVT::v8i16, UABDL, ZExtUABD); | ||||
14863 | |||||
14864 | // Third, create the node of UADDLP. | ||||
14865 | SDValue UADDLP = DAG.getNode(AArch64ISD::UADDLP, DL, MVT::v4i32, UABAL); | ||||
14866 | |||||
14867 | // Fourth, create the node of VECREDUCE_ADD. | ||||
14868 | return DAG.getNode(ISD::VECREDUCE_ADD, DL, MVT::i32, UADDLP); | ||||
14869 | } | ||||
14870 | |||||
14871 | // Turn a v8i8/v16i8 extended vecreduce into a udot/sdot and vecreduce | ||||
14872 | // vecreduce.add(ext(A)) to vecreduce.add(DOT(zero, A, one)) | ||||
14873 | // vecreduce.add(mul(ext(A), ext(B))) to vecreduce.add(DOT(zero, A, B)) | ||||
14874 | static SDValue performVecReduceAddCombine(SDNode *N, SelectionDAG &DAG, | ||||
14875 | const AArch64Subtarget *ST) { | ||||
14876 | if (!ST->hasDotProd()) | ||||
14877 | return performVecReduceAddCombineWithUADDLP(N, DAG); | ||||
14878 | |||||
14879 | SDValue Op0 = N->getOperand(0); | ||||
14880 | if (N->getValueType(0) != MVT::i32 || | ||||
14881 | Op0.getValueType().getVectorElementType() != MVT::i32) | ||||
14882 | return SDValue(); | ||||
14883 | |||||
14884 | unsigned ExtOpcode = Op0.getOpcode(); | ||||
14885 | SDValue A = Op0; | ||||
14886 | SDValue B; | ||||
14887 | if (ExtOpcode == ISD::MUL) { | ||||
14888 | A = Op0.getOperand(0); | ||||
14889 | B = Op0.getOperand(1); | ||||
14890 | if (A.getOpcode() != B.getOpcode() || | ||||
14891 | A.getOperand(0).getValueType() != B.getOperand(0).getValueType()) | ||||
14892 | return SDValue(); | ||||
14893 | ExtOpcode = A.getOpcode(); | ||||
14894 | } | ||||
14895 | if (ExtOpcode != ISD::ZERO_EXTEND && ExtOpcode != ISD::SIGN_EXTEND) | ||||
14896 | return SDValue(); | ||||
14897 | |||||
14898 | EVT Op0VT = A.getOperand(0).getValueType(); | ||||
14899 | if (Op0VT != MVT::v8i8 && Op0VT != MVT::v16i8) | ||||
14900 | return SDValue(); | ||||
14901 | |||||
14902 | SDLoc DL(Op0); | ||||
14903 | // For non-mla reductions B can be set to 1. For MLA we take the operand of | ||||
14904 | // the extend B. | ||||
14905 | if (!B) | ||||
14906 | B = DAG.getConstant(1, DL, Op0VT); | ||||
14907 | else | ||||
14908 | B = B.getOperand(0); | ||||
14909 | |||||
14910 | SDValue Zeros = | ||||
14911 | DAG.getConstant(0, DL, Op0VT == MVT::v8i8 ? MVT::v2i32 : MVT::v4i32); | ||||
14912 | auto DotOpcode = | ||||
14913 | (ExtOpcode == ISD::ZERO_EXTEND) ? AArch64ISD::UDOT : AArch64ISD::SDOT; | ||||
14914 | SDValue Dot = DAG.getNode(DotOpcode, DL, Zeros.getValueType(), Zeros, | ||||
14915 | A.getOperand(0), B); | ||||
14916 | return DAG.getNode(ISD::VECREDUCE_ADD, DL, N->getValueType(0), Dot); | ||||
14917 | } | ||||
14918 | |||||
14919 | // Given an (integer) vecreduce, we know the order of the inputs does not | ||||
14920 | // matter. We can convert UADDV(add(zext(extract_lo(x)), zext(extract_hi(x)))) | ||||
14921 | // into UADDV(UADDLP(x)). This can also happen through an extra add, where we | ||||
14922 | // transform UADDV(add(y, add(zext(extract_lo(x)), zext(extract_hi(x))))). | ||||
14923 | static SDValue performUADDVCombine(SDNode *N, SelectionDAG &DAG) { | ||||
14924 | auto DetectAddExtract = [&](SDValue A) { | ||||
14925 | // Look for add(zext(extract_lo(x)), zext(extract_hi(x))), returning | ||||
14926 | // UADDLP(x) if found. | ||||
14927 | if (A.getOpcode() != ISD::ADD) | ||||
14928 | return SDValue(); | ||||
14929 | EVT VT = A.getValueType(); | ||||
14930 | SDValue Op0 = A.getOperand(0); | ||||
14931 | SDValue Op1 = A.getOperand(1); | ||||
14932 | if (Op0.getOpcode() != Op0.getOpcode() || | ||||
14933 | (Op0.getOpcode() != ISD::ZERO_EXTEND && | ||||
14934 | Op0.getOpcode() != ISD::SIGN_EXTEND)) | ||||
14935 | return SDValue(); | ||||
14936 | SDValue Ext0 = Op0.getOperand(0); | ||||
14937 | SDValue Ext1 = Op1.getOperand(0); | ||||
14938 | if (Ext0.getOpcode() != ISD::EXTRACT_SUBVECTOR || | ||||
14939 | Ext1.getOpcode() != ISD::EXTRACT_SUBVECTOR || | ||||
14940 | Ext0.getOperand(0) != Ext1.getOperand(0)) | ||||
14941 | return SDValue(); | ||||
14942 | // Check that the type is twice the add types, and the extract are from | ||||
14943 | // upper/lower parts of the same source. | ||||
14944 | if (Ext0.getOperand(0).getValueType().getVectorNumElements() != | ||||
14945 | VT.getVectorNumElements() * 2) | ||||
14946 | return SDValue(); | ||||
14947 | if ((Ext0.getConstantOperandVal(1) != 0 && | ||||
14948 | Ext1.getConstantOperandVal(1) != VT.getVectorNumElements()) && | ||||
14949 | (Ext1.getConstantOperandVal(1) != 0 && | ||||
14950 | Ext0.getConstantOperandVal(1) != VT.getVectorNumElements())) | ||||
14951 | return SDValue(); | ||||
14952 | unsigned Opcode = Op0.getOpcode() == ISD::ZERO_EXTEND ? AArch64ISD::UADDLP | ||||
14953 | : AArch64ISD::SADDLP; | ||||
14954 | return DAG.getNode(Opcode, SDLoc(A), VT, Ext0.getOperand(0)); | ||||
14955 | }; | ||||
14956 | |||||
14957 | SDValue A = N->getOperand(0); | ||||
14958 | if (SDValue R = DetectAddExtract(A)) | ||||
14959 | return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), R); | ||||
14960 | if (A.getOpcode() == ISD::ADD) { | ||||
14961 | if (SDValue R = DetectAddExtract(A.getOperand(0))) | ||||
14962 | return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), | ||||
14963 | DAG.getNode(ISD::ADD, SDLoc(A), A.getValueType(), R, | ||||
14964 | A.getOperand(1))); | ||||
14965 | if (SDValue R = DetectAddExtract(A.getOperand(1))) | ||||
14966 | return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), | ||||
14967 | DAG.getNode(ISD::ADD, SDLoc(A), A.getValueType(), R, | ||||
14968 | A.getOperand(0))); | ||||
14969 | } | ||||
14970 | return SDValue(); | ||||
14971 | } | ||||
14972 | |||||
14973 | |||||
14974 | static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG, | ||||
14975 | TargetLowering::DAGCombinerInfo &DCI, | ||||
14976 | const AArch64Subtarget *Subtarget) { | ||||
14977 | if (DCI.isBeforeLegalizeOps()) | ||||
14978 | return SDValue(); | ||||
14979 | |||||
14980 | return foldVectorXorShiftIntoCmp(N, DAG, Subtarget); | ||||
14981 | } | ||||
14982 | |||||
14983 | SDValue | ||||
14984 | AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, | ||||
14985 | SelectionDAG &DAG, | ||||
14986 | SmallVectorImpl<SDNode *> &Created) const { | ||||
14987 | AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); | ||||
14988 | if (isIntDivCheap(N->getValueType(0), Attr)) | ||||
14989 | return SDValue(N,0); // Lower SDIV as SDIV | ||||
14990 | |||||
14991 | EVT VT = N->getValueType(0); | ||||
14992 | |||||
14993 | // For scalable and fixed types, mark them as cheap so we can handle it much | ||||
14994 | // later. This allows us to handle larger than legal types. | ||||
14995 | if (VT.isScalableVector() || Subtarget->useSVEForFixedLengthVectors()) | ||||
14996 | return SDValue(N, 0); | ||||
14997 | |||||
14998 | // fold (sdiv X, pow2) | ||||
14999 | if ((VT != MVT::i32 && VT != MVT::i64) || | ||||
15000 | !(Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2())) | ||||
15001 | return SDValue(); | ||||
15002 | |||||
15003 | SDLoc DL(N); | ||||
15004 | SDValue N0 = N->getOperand(0); | ||||
15005 | unsigned Lg2 = Divisor.countTrailingZeros(); | ||||
15006 | SDValue Zero = DAG.getConstant(0, DL, VT); | ||||
15007 | SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT); | ||||
15008 | |||||
15009 | // Add (N0 < 0) ? Pow2 - 1 : 0; | ||||
15010 | SDValue CCVal; | ||||
15011 | SDValue Cmp = getAArch64Cmp(N0, Zero, ISD::SETLT, CCVal, DAG, DL); | ||||
15012 | SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne); | ||||
15013 | SDValue CSel = DAG.getNode(AArch64ISD::CSEL, DL, VT, Add, N0, CCVal, Cmp); | ||||
15014 | |||||
15015 | Created.push_back(Cmp.getNode()); | ||||
15016 | Created.push_back(Add.getNode()); | ||||
15017 | Created.push_back(CSel.getNode()); | ||||
15018 | |||||
15019 | // Divide by pow2. | ||||
15020 | SDValue SRA = | ||||
15021 | DAG.getNode(ISD::SRA, DL, VT, CSel, DAG.getConstant(Lg2, DL, MVT::i64)); | ||||
15022 | |||||
15023 | // If we're dividing by a positive value, we're done. Otherwise, we must | ||||
15024 | // negate the result. | ||||
15025 | if (Divisor.isNonNegative()) | ||||
15026 | return SRA; | ||||
15027 | |||||
15028 | Created.push_back(SRA.getNode()); | ||||
15029 | return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA); | ||||
15030 | } | ||||
15031 | |||||
15032 | SDValue | ||||
15033 | AArch64TargetLowering::BuildSREMPow2(SDNode *N, const APInt &Divisor, | ||||
15034 | SelectionDAG &DAG, | ||||
15035 | SmallVectorImpl<SDNode *> &Created) const { | ||||
15036 | AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); | ||||
15037 | if (isIntDivCheap(N->getValueType(0), Attr)) | ||||
15038 | return SDValue(N, 0); // Lower SREM as SREM | ||||
15039 | |||||
15040 | EVT VT = N->getValueType(0); | ||||
15041 | |||||
15042 | // For scalable and fixed types, mark them as cheap so we can handle it much | ||||
15043 | // later. This allows us to handle larger than legal types. | ||||
15044 | if (VT.isScalableVector() || Subtarget->useSVEForFixedLengthVectors()) | ||||
15045 | return SDValue(N, 0); | ||||
15046 | |||||
15047 | // fold (srem X, pow2) | ||||
15048 | if ((VT != MVT::i32 && VT != MVT::i64) || | ||||
15049 | !(Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2())) | ||||
15050 | return SDValue(); | ||||
15051 | |||||
15052 | unsigned Lg2 = Divisor.countTrailingZeros(); | ||||
15053 | if (Lg2 == 0) | ||||
15054 | return SDValue(); | ||||
15055 | |||||
15056 | SDLoc DL(N); | ||||
15057 | SDValue N0 = N->getOperand(0); | ||||
15058 | SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT); | ||||
15059 | SDValue Zero = DAG.getConstant(0, DL, VT); | ||||
15060 | SDValue CCVal, CSNeg; | ||||
15061 | if (Lg2 == 1) { | ||||
15062 | SDValue Cmp = getAArch64Cmp(N0, Zero, ISD::SETGE, CCVal, DAG, DL); | ||||
15063 | SDValue And = DAG.getNode(ISD::AND, DL, VT, N0, Pow2MinusOne); | ||||
15064 | CSNeg = DAG.getNode(AArch64ISD::CSNEG, DL, VT, And, And, CCVal, Cmp); | ||||
15065 | |||||
15066 | Created.push_back(Cmp.getNode()); | ||||
15067 | Created.push_back(And.getNode()); | ||||
15068 | } else { | ||||
15069 | SDValue CCVal = DAG.getConstant(AArch64CC::MI, DL, MVT_CC); | ||||
15070 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); | ||||
15071 | |||||
15072 | SDValue Negs = DAG.getNode(AArch64ISD::SUBS, DL, VTs, Zero, N0); | ||||
15073 | SDValue AndPos = DAG.getNode(ISD::AND, DL, VT, N0, Pow2MinusOne); | ||||
15074 | SDValue AndNeg = DAG.getNode(ISD::AND, DL, VT, Negs, Pow2MinusOne); | ||||
15075 | CSNeg = DAG.getNode(AArch64ISD::CSNEG, DL, VT, AndPos, AndNeg, CCVal, | ||||
15076 | Negs.getValue(1)); | ||||
15077 | |||||
15078 | Created.push_back(Negs.getNode()); | ||||
15079 | Created.push_back(AndPos.getNode()); | ||||
15080 | Created.push_back(AndNeg.getNode()); | ||||
15081 | } | ||||
15082 | |||||
15083 | return CSNeg; | ||||
15084 | } | ||||
15085 | |||||
15086 | static std::optional<unsigned> IsSVECntIntrinsic(SDValue S) { | ||||
15087 | switch(getIntrinsicID(S.getNode())) { | ||||
15088 | default: | ||||
15089 | break; | ||||
15090 | case Intrinsic::aarch64_sve_cntb: | ||||
15091 | return 8; | ||||
15092 | case Intrinsic::aarch64_sve_cnth: | ||||
15093 | return 16; | ||||
15094 | case Intrinsic::aarch64_sve_cntw: | ||||
15095 | return 32; | ||||
15096 | case Intrinsic::aarch64_sve_cntd: | ||||
15097 | return 64; | ||||
15098 | } | ||||
15099 | return {}; | ||||
15100 | } | ||||
15101 | |||||
15102 | /// Calculates what the pre-extend type is, based on the extension | ||||
15103 | /// operation node provided by \p Extend. | ||||
15104 | /// | ||||
15105 | /// In the case that \p Extend is a SIGN_EXTEND or a ZERO_EXTEND, the | ||||
15106 | /// pre-extend type is pulled directly from the operand, while other extend | ||||
15107 | /// operations need a bit more inspection to get this information. | ||||
15108 | /// | ||||
15109 | /// \param Extend The SDNode from the DAG that represents the extend operation | ||||
15110 | /// | ||||
15111 | /// \returns The type representing the \p Extend source type, or \p MVT::Other | ||||
15112 | /// if no valid type can be determined | ||||
15113 | static EVT calculatePreExtendType(SDValue Extend) { | ||||
15114 | switch (Extend.getOpcode()) { | ||||
15115 | case ISD::SIGN_EXTEND: | ||||
15116 | case ISD::ZERO_EXTEND: | ||||
15117 | return Extend.getOperand(0).getValueType(); | ||||
15118 | case ISD::AssertSext: | ||||
15119 | case ISD::AssertZext: | ||||
15120 | case ISD::SIGN_EXTEND_INREG: { | ||||
15121 | VTSDNode *TypeNode = dyn_cast<VTSDNode>(Extend.getOperand(1)); | ||||
15122 | if (!TypeNode) | ||||
15123 | return MVT::Other; | ||||
15124 | return TypeNode->getVT(); | ||||
15125 | } | ||||
15126 | case ISD::AND: { | ||||
15127 | ConstantSDNode *Constant = | ||||
15128 | dyn_cast<ConstantSDNode>(Extend.getOperand(1).getNode()); | ||||
15129 | if (!Constant) | ||||
15130 | return MVT::Other; | ||||
15131 | |||||
15132 | uint32_t Mask = Constant->getZExtValue(); | ||||
15133 | |||||
15134 | if (Mask == UCHAR_MAX(127*2 +1)) | ||||
15135 | return MVT::i8; | ||||
15136 | else if (Mask == USHRT_MAX(32767 *2 +1)) | ||||
15137 | return MVT::i16; | ||||
15138 | else if (Mask == UINT_MAX(2147483647 *2U +1U)) | ||||
15139 | return MVT::i32; | ||||
15140 | |||||
15141 | return MVT::Other; | ||||
15142 | } | ||||
15143 | default: | ||||
15144 | return MVT::Other; | ||||
15145 | } | ||||
15146 | } | ||||
15147 | |||||
15148 | /// Combines a buildvector(sext/zext) or shuffle(sext/zext, undef) node pattern | ||||
15149 | /// into sext/zext(buildvector) or sext/zext(shuffle) making use of the vector | ||||
15150 | /// SExt/ZExt rather than the scalar SExt/ZExt | ||||
15151 | static SDValue performBuildShuffleExtendCombine(SDValue BV, SelectionDAG &DAG) { | ||||
15152 | EVT VT = BV.getValueType(); | ||||
15153 | if (BV.getOpcode() != ISD::BUILD_VECTOR && | ||||
15154 | BV.getOpcode() != ISD::VECTOR_SHUFFLE) | ||||
15155 | return SDValue(); | ||||
15156 | |||||
15157 | // Use the first item in the buildvector/shuffle to get the size of the | ||||
15158 | // extend, and make sure it looks valid. | ||||
15159 | SDValue Extend = BV->getOperand(0); | ||||
15160 | unsigned ExtendOpcode = Extend.getOpcode(); | ||||
15161 | bool IsSExt = ExtendOpcode == ISD::SIGN_EXTEND || | ||||
15162 | ExtendOpcode == ISD::SIGN_EXTEND_INREG || | ||||
15163 | ExtendOpcode == ISD::AssertSext; | ||||
15164 | if (!IsSExt && ExtendOpcode != ISD::ZERO_EXTEND && | ||||
15165 | ExtendOpcode != ISD::AssertZext && ExtendOpcode != ISD::AND) | ||||
15166 | return SDValue(); | ||||
15167 | // Shuffle inputs are vector, limit to SIGN_EXTEND and ZERO_EXTEND to ensure | ||||
15168 | // calculatePreExtendType will work without issue. | ||||
15169 | if (BV.getOpcode() == ISD::VECTOR_SHUFFLE && | ||||
15170 | ExtendOpcode != ISD::SIGN_EXTEND && ExtendOpcode != ISD::ZERO_EXTEND) | ||||
15171 | return SDValue(); | ||||
15172 | |||||
15173 | // Restrict valid pre-extend data type | ||||
15174 | EVT PreExtendType = calculatePreExtendType(Extend); | ||||
15175 | if (PreExtendType == MVT::Other || | ||||
15176 | PreExtendType.getScalarSizeInBits() != VT.getScalarSizeInBits() / 2) | ||||
15177 | return SDValue(); | ||||
15178 | |||||
15179 | // Make sure all other operands are equally extended | ||||
15180 | for (SDValue Op : drop_begin(BV->ops())) { | ||||
15181 | if (Op.isUndef()) | ||||
15182 | continue; | ||||
15183 | unsigned Opc = Op.getOpcode(); | ||||
15184 | bool OpcIsSExt = Opc == ISD::SIGN_EXTEND || Opc == ISD::SIGN_EXTEND_INREG || | ||||
15185 | Opc == ISD::AssertSext; | ||||
15186 | if (OpcIsSExt != IsSExt || calculatePreExtendType(Op) != PreExtendType) | ||||
15187 | return SDValue(); | ||||
15188 | } | ||||
15189 | |||||
15190 | SDValue NBV; | ||||
15191 | SDLoc DL(BV); | ||||
15192 | if (BV.getOpcode() == ISD::BUILD_VECTOR) { | ||||
15193 | EVT PreExtendVT = VT.changeVectorElementType(PreExtendType); | ||||
15194 | EVT PreExtendLegalType = | ||||
15195 | PreExtendType.getScalarSizeInBits() < 32 ? MVT::i32 : PreExtendType; | ||||
15196 | SmallVector<SDValue, 8> NewOps; | ||||
15197 | for (SDValue Op : BV->ops()) | ||||
15198 | NewOps.push_back(Op.isUndef() ? DAG.getUNDEF(PreExtendLegalType) | ||||
15199 | : DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, | ||||
15200 | PreExtendLegalType)); | ||||
15201 | NBV = DAG.getNode(ISD::BUILD_VECTOR, DL, PreExtendVT, NewOps); | ||||
15202 | } else { // BV.getOpcode() == ISD::VECTOR_SHUFFLE | ||||
15203 | EVT PreExtendVT = VT.changeVectorElementType(PreExtendType.getScalarType()); | ||||
15204 | NBV = DAG.getVectorShuffle(PreExtendVT, DL, BV.getOperand(0).getOperand(0), | ||||
15205 | BV.getOperand(1).isUndef() | ||||
15206 | ? DAG.getUNDEF(PreExtendVT) | ||||
15207 | : BV.getOperand(1).getOperand(0), | ||||
15208 | cast<ShuffleVectorSDNode>(BV)->getMask()); | ||||
15209 | } | ||||
15210 | return DAG.getNode(IsSExt ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, VT, NBV); | ||||
15211 | } | ||||
15212 | |||||
15213 | /// Combines a mul(dup(sext/zext)) node pattern into mul(sext/zext(dup)) | ||||
15214 | /// making use of the vector SExt/ZExt rather than the scalar SExt/ZExt | ||||
15215 | static SDValue performMulVectorExtendCombine(SDNode *Mul, SelectionDAG &DAG) { | ||||
15216 | // If the value type isn't a vector, none of the operands are going to be dups | ||||
15217 | EVT VT = Mul->getValueType(0); | ||||
15218 | if (VT != MVT::v8i16 && VT != MVT::v4i32 && VT != MVT::v2i64) | ||||
15219 | return SDValue(); | ||||
15220 | |||||
15221 | SDValue Op0 = performBuildShuffleExtendCombine(Mul->getOperand(0), DAG); | ||||
15222 | SDValue Op1 = performBuildShuffleExtendCombine(Mul->getOperand(1), DAG); | ||||
15223 | |||||
15224 | // Neither operands have been changed, don't make any further changes | ||||
15225 | if (!Op0 && !Op1) | ||||
15226 | return SDValue(); | ||||
15227 | |||||
15228 | SDLoc DL(Mul); | ||||
15229 | return DAG.getNode(Mul->getOpcode(), DL, VT, Op0 ? Op0 : Mul->getOperand(0), | ||||
15230 | Op1 ? Op1 : Mul->getOperand(1)); | ||||
15231 | } | ||||
15232 | |||||
15233 | // Combine v4i32 Mul(And(Srl(X, 15), 0x10001), 0xffff) -> v8i16 CMLTz | ||||
15234 | // Same for other types with equivalent constants. | ||||
15235 | static SDValue performMulVectorCmpZeroCombine(SDNode *N, SelectionDAG &DAG) { | ||||
15236 | EVT VT = N->getValueType(0); | ||||
15237 | if (VT != MVT::v2i64 && VT != MVT::v1i64 && VT != MVT::v2i32 && | ||||
15238 | VT != MVT::v4i32 && VT != MVT::v4i16 && VT != MVT::v8i16) | ||||
15239 | return SDValue(); | ||||
15240 | if (N->getOperand(0).getOpcode() != ISD::AND || | ||||
15241 | N->getOperand(0).getOperand(0).getOpcode() != ISD::SRL) | ||||
15242 | return SDValue(); | ||||
15243 | |||||
15244 | SDValue And = N->getOperand(0); | ||||
15245 | SDValue Srl = And.getOperand(0); | ||||
15246 | |||||
15247 | APInt V1, V2, V3; | ||||
15248 | if (!ISD::isConstantSplatVector(N->getOperand(1).getNode(), V1) || | ||||
15249 | !ISD::isConstantSplatVector(And.getOperand(1).getNode(), V2) || | ||||
15250 | !ISD::isConstantSplatVector(Srl.getOperand(1).getNode(), V3)) | ||||
15251 | return SDValue(); | ||||
15252 | |||||
15253 | unsigned HalfSize = VT.getScalarSizeInBits() / 2; | ||||
15254 | if (!V1.isMask(HalfSize) || V2 != (1ULL | 1ULL << HalfSize) || | ||||
15255 | V3 != (HalfSize - 1)) | ||||
15256 | return SDValue(); | ||||
15257 | |||||
15258 | EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), | ||||
15259 | EVT::getIntegerVT(*DAG.getContext(), HalfSize), | ||||
15260 | VT.getVectorElementCount() * 2); | ||||
15261 | |||||
15262 | SDLoc DL(N); | ||||
15263 | SDValue In = DAG.getNode(AArch64ISD::NVCAST, DL, HalfVT, Srl.getOperand(0)); | ||||
15264 | SDValue CM = DAG.getNode(AArch64ISD::CMLTz, DL, HalfVT, In); | ||||
15265 | return DAG.getNode(AArch64ISD::NVCAST, DL, VT, CM); | ||||
15266 | } | ||||
15267 | |||||
15268 | static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG, | ||||
15269 | TargetLowering::DAGCombinerInfo &DCI, | ||||
15270 | const AArch64Subtarget *Subtarget) { | ||||
15271 | |||||
15272 | if (SDValue Ext = performMulVectorExtendCombine(N, DAG)) | ||||
15273 | return Ext; | ||||
15274 | if (SDValue Ext = performMulVectorCmpZeroCombine(N, DAG)) | ||||
15275 | return Ext; | ||||
15276 | |||||
15277 | if (DCI.isBeforeLegalizeOps()) | ||||
15278 | return SDValue(); | ||||
15279 | |||||
15280 | // Canonicalize X*(Y+1) -> X*Y+X and (X+1)*Y -> X*Y+Y, | ||||
15281 | // and in MachineCombiner pass, add+mul will be combined into madd. | ||||
15282 | // Similarly, X*(1-Y) -> X - X*Y and (1-Y)*X -> X - Y*X. | ||||
15283 | SDLoc DL(N); | ||||
15284 | EVT VT = N->getValueType(0); | ||||
15285 | SDValue N0 = N->getOperand(0); | ||||
15286 | SDValue N1 = N->getOperand(1); | ||||
15287 | SDValue MulOper; | ||||
15288 | unsigned AddSubOpc; | ||||
15289 | |||||
15290 | auto IsAddSubWith1 = [&](SDValue V) -> bool { | ||||
15291 | AddSubOpc = V->getOpcode(); | ||||
15292 | if ((AddSubOpc == ISD::ADD || AddSubOpc == ISD::SUB) && V->hasOneUse()) { | ||||
15293 | SDValue Opnd = V->getOperand(1); | ||||
15294 | MulOper = V->getOperand(0); | ||||
15295 | if (AddSubOpc == ISD::SUB) | ||||
15296 | std::swap(Opnd, MulOper); | ||||
15297 | if (auto C = dyn_cast<ConstantSDNode>(Opnd)) | ||||
15298 | return C->isOne(); | ||||
15299 | } | ||||
15300 | return false; | ||||
15301 | }; | ||||
15302 | |||||
15303 | if (IsAddSubWith1(N0)) { | ||||
15304 | SDValue MulVal = DAG.getNode(ISD::MUL, DL, VT, N1, MulOper); | ||||
15305 | return DAG.getNode(AddSubOpc, DL, VT, N1, MulVal); | ||||
15306 | } | ||||
15307 | |||||
15308 | if (IsAddSubWith1(N1)) { | ||||
15309 | SDValue MulVal = DAG.getNode(ISD::MUL, DL, VT, N0, MulOper); | ||||
15310 | return DAG.getNode(AddSubOpc, DL, VT, N0, MulVal); | ||||
15311 | } | ||||
15312 | |||||
15313 | // The below optimizations require a constant RHS. | ||||
15314 | if (!isa<ConstantSDNode>(N1)) | ||||
15315 | return SDValue(); | ||||
15316 | |||||
15317 | ConstantSDNode *C = cast<ConstantSDNode>(N1); | ||||
15318 | const APInt &ConstValue = C->getAPIntValue(); | ||||
15319 | |||||
15320 | // Allow the scaling to be folded into the `cnt` instruction by preventing | ||||
15321 | // the scaling to be obscured here. This makes it easier to pattern match. | ||||
15322 | if (IsSVECntIntrinsic(N0) || | ||||
15323 | (N0->getOpcode() == ISD::TRUNCATE && | ||||
15324 | (IsSVECntIntrinsic(N0->getOperand(0))))) | ||||
15325 | if (ConstValue.sge(1) && ConstValue.sle(16)) | ||||
15326 | return SDValue(); | ||||
15327 | |||||
15328 | // Multiplication of a power of two plus/minus one can be done more | ||||
15329 | // cheaply as as shift+add/sub. For now, this is true unilaterally. If | ||||
15330 | // future CPUs have a cheaper MADD instruction, this may need to be | ||||
15331 | // gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and | ||||
15332 | // 64-bit is 5 cycles, so this is always a win. | ||||
15333 | // More aggressively, some multiplications N0 * C can be lowered to | ||||
15334 | // shift+add+shift if the constant C = A * B where A = 2^N + 1 and B = 2^M, | ||||
15335 | // e.g. 6=3*2=(2+1)*2, 45=(1+4)*(1+8) | ||||
15336 | // TODO: lower more cases. | ||||
15337 | |||||
15338 | // TrailingZeroes is used to test if the mul can be lowered to | ||||
15339 | // shift+add+shift. | ||||
15340 | unsigned TrailingZeroes = ConstValue.countTrailingZeros(); | ||||
15341 | if (TrailingZeroes) { | ||||
15342 | // Conservatively do not lower to shift+add+shift if the mul might be | ||||
15343 | // folded into smul or umul. | ||||
15344 | if (N0->hasOneUse() && (isSignExtended(N0.getNode(), DAG) || | ||||
15345 | isZeroExtended(N0.getNode(), DAG))) | ||||
15346 | return SDValue(); | ||||
15347 | // Conservatively do not lower to shift+add+shift if the mul might be | ||||
15348 | // folded into madd or msub. | ||||
15349 | if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ADD || | ||||
15350 | N->use_begin()->getOpcode() == ISD::SUB)) | ||||
15351 | return SDValue(); | ||||
15352 | } | ||||
15353 | // Use ShiftedConstValue instead of ConstValue to support both shift+add/sub | ||||
15354 | // and shift+add+shift. | ||||
15355 | APInt ShiftedConstValue = ConstValue.ashr(TrailingZeroes); | ||||
15356 | unsigned ShiftAmt; | ||||
15357 | |||||
15358 | auto Shl = [&](SDValue N0, unsigned N1) { | ||||
15359 | SDValue RHS = DAG.getConstant(N1, DL, MVT::i64); | ||||
15360 | return DAG.getNode(ISD::SHL, DL, VT, N0, RHS); | ||||
15361 | }; | ||||
15362 | auto Add = [&](SDValue N0, SDValue N1) { | ||||
15363 | return DAG.getNode(ISD::ADD, DL, VT, N0, N1); | ||||
15364 | }; | ||||
15365 | auto Sub = [&](SDValue N0, SDValue N1) { | ||||
15366 | return DAG.getNode(ISD::SUB, DL, VT, N0, N1); | ||||
15367 | }; | ||||
15368 | auto Negate = [&](SDValue N) { | ||||
15369 | SDValue Zero = DAG.getConstant(0, DL, VT); | ||||
15370 | return DAG.getNode(ISD::SUB, DL, VT, Zero, N); | ||||
15371 | }; | ||||
15372 | |||||
15373 | // Can the const C be decomposed into (1+2^M1)*(1+2^N1), eg: | ||||
15374 | // C = 45 is equal to (1+4)*(1+8), we don't decompose it into (1+2)*(16-1) as | ||||
15375 | // the (2^N - 1) can't be execused via a single instruction. | ||||
15376 | auto isPowPlusPlusConst = [](APInt C, APInt &M, APInt &N) { | ||||
15377 | unsigned BitWidth = C.getBitWidth(); | ||||
15378 | for (unsigned i = 1; i < BitWidth / 2; i++) { | ||||
15379 | APInt Rem; | ||||
15380 | APInt X(BitWidth, (1 << i) + 1); | ||||
15381 | APInt::sdivrem(C, X, N, Rem); | ||||
15382 | APInt NVMinus1 = N - 1; | ||||
15383 | if (Rem == 0 && NVMinus1.isPowerOf2()) { | ||||
15384 | M = X; | ||||
15385 | return true; | ||||
15386 | } | ||||
15387 | } | ||||
15388 | return false; | ||||
15389 | }; | ||||
15390 | |||||
15391 | if (ConstValue.isNonNegative()) { | ||||
15392 | // (mul x, (2^N + 1) * 2^M) => (shl (add (shl x, N), x), M) | ||||
15393 | // (mul x, 2^N - 1) => (sub (shl x, N), x) | ||||
15394 | // (mul x, (2^(N-M) - 1) * 2^M) => (sub (shl x, N), (shl x, M)) | ||||
15395 | // (mul x, (2^M + 1) * (2^N + 1)) | ||||
15396 | // => MV = (add (shl x, M), x); (add (shl MV, N), MV) | ||||
15397 | APInt SCVMinus1 = ShiftedConstValue - 1; | ||||
15398 | APInt SCVPlus1 = ShiftedConstValue + 1; | ||||
15399 | APInt CVPlus1 = ConstValue + 1; | ||||
15400 | APInt CVM, CVN; | ||||
15401 | if (SCVMinus1.isPowerOf2()) { | ||||
15402 | ShiftAmt = SCVMinus1.logBase2(); | ||||
15403 | return Shl(Add(Shl(N0, ShiftAmt), N0), TrailingZeroes); | ||||
15404 | } else if (CVPlus1.isPowerOf2()) { | ||||
15405 | ShiftAmt = CVPlus1.logBase2(); | ||||
15406 | return Sub(Shl(N0, ShiftAmt), N0); | ||||
15407 | } else if (SCVPlus1.isPowerOf2()) { | ||||
15408 | ShiftAmt = SCVPlus1.logBase2() + TrailingZeroes; | ||||
15409 | return Sub(Shl(N0, ShiftAmt), Shl(N0, TrailingZeroes)); | ||||
15410 | } else if (Subtarget->hasLSLFast() && | ||||
15411 | isPowPlusPlusConst(ConstValue, CVM, CVN)) { | ||||
15412 | APInt CVMMinus1 = CVM - 1; | ||||
15413 | APInt CVNMinus1 = CVN - 1; | ||||
15414 | unsigned ShiftM1 = CVMMinus1.logBase2(); | ||||
15415 | unsigned ShiftN1 = CVNMinus1.logBase2(); | ||||
15416 | // LSLFast implicate that Shifts <= 3 places are fast | ||||
15417 | if (ShiftM1 <= 3 && ShiftN1 <= 3) { | ||||
15418 | SDValue MVal = Add(Shl(N0, ShiftM1), N0); | ||||
15419 | return Add(Shl(MVal, ShiftN1), MVal); | ||||
15420 | } | ||||
15421 | } | ||||
15422 | } else { | ||||
15423 | // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) | ||||
15424 | // (mul x, -(2^N + 1)) => - (add (shl x, N), x) | ||||
15425 | // (mul x, -(2^(N-M) - 1) * 2^M) => (sub (shl x, M), (shl x, N)) | ||||
15426 | APInt SCVPlus1 = -ShiftedConstValue + 1; | ||||
15427 | APInt CVNegPlus1 = -ConstValue + 1; | ||||
15428 | APInt CVNegMinus1 = -ConstValue - 1; | ||||
15429 | if (CVNegPlus1.isPowerOf2()) { | ||||
15430 | ShiftAmt = CVNegPlus1.logBase2(); | ||||
15431 | return Sub(N0, Shl(N0, ShiftAmt)); | ||||
15432 | } else if (CVNegMinus1.isPowerOf2()) { | ||||
15433 | ShiftAmt = CVNegMinus1.logBase2(); | ||||
15434 | return Negate(Add(Shl(N0, ShiftAmt), N0)); | ||||
15435 | } else if (SCVPlus1.isPowerOf2()) { | ||||
15436 | ShiftAmt = SCVPlus1.logBase2() + TrailingZeroes; | ||||
15437 | return Sub(Shl(N0, TrailingZeroes), Shl(N0, ShiftAmt)); | ||||
15438 | } | ||||
15439 | } | ||||
15440 | |||||
15441 | return SDValue(); | ||||
15442 | } | ||||
15443 | |||||
15444 | static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N, | ||||
15445 | SelectionDAG &DAG) { | ||||
15446 | // Take advantage of vector comparisons producing 0 or -1 in each lane to | ||||
15447 | // optimize away operation when it's from a constant. | ||||
15448 | // | ||||
15449 | // The general transformation is: | ||||
15450 | // UNARYOP(AND(VECTOR_CMP(x,y), constant)) --> | ||||
15451 | // AND(VECTOR_CMP(x,y), constant2) | ||||
15452 | // constant2 = UNARYOP(constant) | ||||
15453 | |||||
15454 | // Early exit if this isn't a vector operation, the operand of the | ||||
15455 | // unary operation isn't a bitwise AND, or if the sizes of the operations | ||||
15456 | // aren't the same. | ||||
15457 | EVT VT = N->getValueType(0); | ||||
15458 | if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND || | ||||
15459 | N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC || | ||||
15460 | VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits()) | ||||
15461 | return SDValue(); | ||||
15462 | |||||
15463 | // Now check that the other operand of the AND is a constant. We could | ||||
15464 | // make the transformation for non-constant splats as well, but it's unclear | ||||
15465 | // that would be a benefit as it would not eliminate any operations, just | ||||
15466 | // perform one more step in scalar code before moving to the vector unit. | ||||
15467 | if (BuildVectorSDNode *BV = | ||||
15468 | dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) { | ||||
15469 | // Bail out if the vector isn't a constant. | ||||
15470 | if (!BV->isConstant()) | ||||
15471 | return SDValue(); | ||||
15472 | |||||
15473 | // Everything checks out. Build up the new and improved node. | ||||
15474 | SDLoc DL(N); | ||||
15475 | EVT IntVT = BV->getValueType(0); | ||||
15476 | // Create a new constant of the appropriate type for the transformed | ||||
15477 | // DAG. | ||||
15478 | SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0)); | ||||
15479 | // The AND node needs bitcasts to/from an integer vector type around it. | ||||
15480 | SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst); | ||||
15481 | SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT, | ||||
15482 | N->getOperand(0)->getOperand(0), MaskConst); | ||||
15483 | SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd); | ||||
15484 | return Res; | ||||
15485 | } | ||||
15486 | |||||
15487 | return SDValue(); | ||||
15488 | } | ||||
15489 | |||||
15490 | static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG, | ||||
15491 | const AArch64Subtarget *Subtarget) { | ||||
15492 | // First try to optimize away the conversion when it's conditionally from | ||||
15493 | // a constant. Vectors only. | ||||
15494 | if (SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG)) | ||||
15495 | return Res; | ||||
15496 | |||||
15497 | EVT VT = N->getValueType(0); | ||||
15498 | if (VT != MVT::f32 && VT != MVT::f64) | ||||
15499 | return SDValue(); | ||||
15500 | |||||
15501 | // Only optimize when the source and destination types have the same width. | ||||
15502 | if (VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits()) | ||||
15503 | return SDValue(); | ||||
15504 | |||||
15505 | // If the result of an integer load is only used by an integer-to-float | ||||
15506 | // conversion, use a fp load instead and a AdvSIMD scalar {S|U}CVTF instead. | ||||
15507 | // This eliminates an "integer-to-vector-move" UOP and improves throughput. | ||||
15508 | SDValue N0 = N->getOperand(0); | ||||
15509 | if (Subtarget->hasNEON() && ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && | ||||
15510 | // Do not change the width of a volatile load. | ||||
15511 | !cast<LoadSDNode>(N0)->isVolatile()) { | ||||
15512 | LoadSDNode *LN0 = cast<LoadSDNode>(N0); | ||||
15513 | SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(), | ||||
15514 | LN0->getPointerInfo(), LN0->getAlign(), | ||||
15515 | LN0->getMemOperand()->getFlags()); | ||||
15516 | |||||
15517 | // Make sure successors of the original load stay after it by updating them | ||||
15518 | // to use the new Chain. | ||||
15519 | DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), Load.getValue(1)); | ||||
15520 | |||||
15521 | unsigned Opcode = | ||||
15522 | (N->getOpcode() == ISD::SINT_TO_FP) ? AArch64ISD::SITOF : AArch64ISD::UITOF; | ||||
15523 | return DAG.getNode(Opcode, SDLoc(N), VT, Load); | ||||
15524 | } | ||||
15525 | |||||
15526 | return SDValue(); | ||||
15527 | } | ||||
15528 | |||||
15529 | /// Fold a floating-point multiply by power of two into floating-point to | ||||
15530 | /// fixed-point conversion. | ||||
15531 | static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG, | ||||
15532 | TargetLowering::DAGCombinerInfo &DCI, | ||||
15533 | const AArch64Subtarget *Subtarget) { | ||||
15534 | if (!Subtarget->hasNEON() || Subtarget->forceStreamingCompatibleSVE()) | ||||
15535 | return SDValue(); | ||||
15536 | |||||
15537 | if (!N->getValueType(0).isSimple()) | ||||
15538 | return SDValue(); | ||||
15539 | |||||
15540 | SDValue Op = N->getOperand(0); | ||||
15541 | if (!Op.getValueType().isSimple() || Op.getOpcode() != ISD::FMUL) | ||||
15542 | return SDValue(); | ||||
15543 | |||||
15544 | if (!Op.getValueType().is64BitVector() && !Op.getValueType().is128BitVector()) | ||||
15545 | return SDValue(); | ||||
15546 | |||||
15547 | SDValue ConstVec = Op->getOperand(1); | ||||
15548 | if (!isa<BuildVectorSDNode>(ConstVec)) | ||||
15549 | return SDValue(); | ||||
15550 | |||||
15551 | MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); | ||||
15552 | uint32_t FloatBits = FloatTy.getSizeInBits(); | ||||
15553 | if (FloatBits != 32 && FloatBits != 64 && | ||||
15554 | (FloatBits != 16 || !Subtarget->hasFullFP16())) | ||||
15555 | return SDValue(); | ||||
15556 | |||||
15557 | MVT IntTy = N->getSimpleValueType(0).getVectorElementType(); | ||||
15558 | uint32_t IntBits = IntTy.getSizeInBits(); | ||||
15559 | if (IntBits != 16 && IntBits != 32 && IntBits != 64) | ||||
15560 | return SDValue(); | ||||
15561 | |||||
15562 | // Avoid conversions where iN is larger than the float (e.g., float -> i64). | ||||
15563 | if (IntBits > FloatBits) | ||||
15564 | return SDValue(); | ||||
15565 | |||||
15566 | BitVector UndefElements; | ||||
15567 | BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); | ||||
15568 | int32_t Bits = IntBits == 64 ? 64 : 32; | ||||
15569 | int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, Bits + 1); | ||||
15570 | if (C == -1 || C == 0 || C > Bits) | ||||
15571 | return SDValue(); | ||||
15572 | |||||
15573 | EVT ResTy = Op.getValueType().changeVectorElementTypeToInteger(); | ||||
15574 | if (!DAG.getTargetLoweringInfo().isTypeLegal(ResTy)) | ||||
15575 | return SDValue(); | ||||
15576 | |||||
15577 | if (N->getOpcode() == ISD::FP_TO_SINT_SAT || | ||||
15578 | N->getOpcode() == ISD::FP_TO_UINT_SAT) { | ||||
15579 | EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT(); | ||||
15580 | if (SatVT.getScalarSizeInBits() != IntBits || IntBits != FloatBits) | ||||
15581 | return SDValue(); | ||||
15582 | } | ||||
15583 | |||||
15584 | SDLoc DL(N); | ||||
15585 | bool IsSigned = (N->getOpcode() == ISD::FP_TO_SINT || | ||||
15586 | N->getOpcode() == ISD::FP_TO_SINT_SAT); | ||||
15587 | unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfp2fxs | ||||
15588 | : Intrinsic::aarch64_neon_vcvtfp2fxu; | ||||
15589 | SDValue FixConv = | ||||
15590 | DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, ResTy, | ||||
15591 | DAG.getConstant(IntrinsicOpcode, DL, MVT::i32), | ||||
15592 | Op->getOperand(0), DAG.getConstant(C, DL, MVT::i32)); | ||||
15593 | // We can handle smaller integers by generating an extra trunc. | ||||
15594 | if (IntBits < FloatBits) | ||||
15595 | FixConv = DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), FixConv); | ||||
15596 | |||||
15597 | return FixConv; | ||||
15598 | } | ||||
15599 | |||||
15600 | /// Fold a floating-point divide by power of two into fixed-point to | ||||
15601 | /// floating-point conversion. | ||||
15602 | static SDValue performFDivCombine(SDNode *N, SelectionDAG &DAG, | ||||
15603 | TargetLowering::DAGCombinerInfo &DCI, | ||||
15604 | const AArch64Subtarget *Subtarget) { | ||||
15605 | if (!Subtarget->hasNEON()) | ||||
15606 | return SDValue(); | ||||
15607 | |||||
15608 | SDValue Op = N->getOperand(0); | ||||
15609 | unsigned Opc = Op->getOpcode(); | ||||
15610 | if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || | ||||
15611 | !Op.getOperand(0).getValueType().isSimple() || | ||||
15612 | (Opc != ISD::SINT_TO_FP && Opc != ISD::UINT_TO_FP)) | ||||
15613 | return SDValue(); | ||||
15614 | |||||
15615 | SDValue ConstVec = N->getOperand(1); | ||||
15616 | if (!isa<BuildVectorSDNode>(ConstVec)) | ||||
15617 | return SDValue(); | ||||
15618 | |||||
15619 | MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType(); | ||||
15620 | int32_t IntBits = IntTy.getSizeInBits(); | ||||
15621 | if (IntBits != 16 && IntBits != 32 && IntBits != 64) | ||||
15622 | return SDValue(); | ||||
15623 | |||||
15624 | MVT FloatTy = N->getSimpleValueType(0).getVectorElementType(); | ||||
15625 | int32_t FloatBits = FloatTy.getSizeInBits(); | ||||
15626 | if (FloatBits != 32 && FloatBits != 64) | ||||
15627 | return SDValue(); | ||||
15628 | |||||
15629 | // Avoid conversions where iN is larger than the float (e.g., i64 -> float). | ||||
15630 | if (IntBits > FloatBits) | ||||
15631 | return SDValue(); | ||||
15632 | |||||
15633 | BitVector UndefElements; | ||||
15634 | BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); | ||||
15635 | int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, FloatBits + 1); | ||||
15636 | if (C == -1 || C == 0 || C > FloatBits) | ||||
15637 | return SDValue(); | ||||
15638 | |||||
15639 | MVT ResTy; | ||||
15640 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); | ||||
15641 | switch (NumLanes) { | ||||
15642 | default: | ||||
15643 | return SDValue(); | ||||
15644 | case 2: | ||||
15645 | ResTy = FloatBits == 32 ? MVT::v2i32 : MVT::v2i64; | ||||
15646 | break; | ||||
15647 | case 4: | ||||
15648 | ResTy = FloatBits == 32 ? MVT::v4i32 : MVT::v4i64; | ||||
15649 | break; | ||||
15650 | } | ||||
15651 | |||||
15652 | if (ResTy == MVT::v4i64 && DCI.isBeforeLegalizeOps()) | ||||
15653 | return SDValue(); | ||||
15654 | |||||
15655 | SDLoc DL(N); | ||||
15656 | SDValue ConvInput = Op.getOperand(0); | ||||
15657 | bool IsSigned = Opc == ISD::SINT_TO_FP; | ||||
15658 | if (IntBits < FloatBits) | ||||
15659 | ConvInput = DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, | ||||
15660 | ResTy, ConvInput); | ||||
15661 | |||||
15662 | unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfxs2fp | ||||
15663 | : Intrinsic::aarch64_neon_vcvtfxu2fp; | ||||
15664 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(), | ||||
15665 | DAG.getConstant(IntrinsicOpcode, DL, MVT::i32), ConvInput, | ||||
15666 | DAG.getConstant(C, DL, MVT::i32)); | ||||
15667 | } | ||||
15668 | |||||
15669 | /// An EXTR instruction is made up of two shifts, ORed together. This helper | ||||
15670 | /// searches for and classifies those shifts. | ||||
15671 | static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount, | ||||
15672 | bool &FromHi) { | ||||
15673 | if (N.getOpcode() == ISD::SHL) | ||||
15674 | FromHi = false; | ||||
15675 | else if (N.getOpcode() == ISD::SRL) | ||||
15676 | FromHi = true; | ||||
15677 | else | ||||
15678 | return false; | ||||
15679 | |||||
15680 | if (!isa<ConstantSDNode>(N.getOperand(1))) | ||||
15681 | return false; | ||||
15682 | |||||
15683 | ShiftAmount = N->getConstantOperandVal(1); | ||||
15684 | Src = N->getOperand(0); | ||||
15685 | return true; | ||||
15686 | } | ||||
15687 | |||||
15688 | /// EXTR instruction extracts a contiguous chunk of bits from two existing | ||||
15689 | /// registers viewed as a high/low pair. This function looks for the pattern: | ||||
15690 | /// <tt>(or (shl VAL1, \#N), (srl VAL2, \#RegWidth-N))</tt> and replaces it | ||||
15691 | /// with an EXTR. Can't quite be done in TableGen because the two immediates | ||||
15692 | /// aren't independent. | ||||
15693 | static SDValue tryCombineToEXTR(SDNode *N, | ||||
15694 | TargetLowering::DAGCombinerInfo &DCI) { | ||||
15695 | SelectionDAG &DAG = DCI.DAG; | ||||
15696 | SDLoc DL(N); | ||||
15697 | EVT VT = N->getValueType(0); | ||||
15698 | |||||
15699 | assert(N->getOpcode() == ISD::OR && "Unexpected root")(static_cast <bool> (N->getOpcode() == ISD::OR && "Unexpected root") ? void (0) : __assert_fail ("N->getOpcode() == ISD::OR && \"Unexpected root\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 15699, __extension__ __PRETTY_FUNCTION__)); | ||||
15700 | |||||
15701 | if (VT != MVT::i32 && VT != MVT::i64) | ||||
15702 | return SDValue(); | ||||
15703 | |||||
15704 | SDValue LHS; | ||||
15705 | uint32_t ShiftLHS = 0; | ||||
15706 | bool LHSFromHi = false; | ||||
15707 | if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi)) | ||||
15708 | return SDValue(); | ||||
15709 | |||||
15710 | SDValue RHS; | ||||
15711 | uint32_t ShiftRHS = 0; | ||||
15712 | bool RHSFromHi = false; | ||||
15713 | if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi)) | ||||
15714 | return SDValue(); | ||||
15715 | |||||
15716 | // If they're both trying to come from the high part of the register, they're | ||||
15717 | // not really an EXTR. | ||||
15718 | if (LHSFromHi == RHSFromHi) | ||||
15719 | return SDValue(); | ||||
15720 | |||||
15721 | if (ShiftLHS + ShiftRHS != VT.getSizeInBits()) | ||||
15722 | return SDValue(); | ||||
15723 | |||||
15724 | if (LHSFromHi) { | ||||
15725 | std::swap(LHS, RHS); | ||||
15726 | std::swap(ShiftLHS, ShiftRHS); | ||||
15727 | } | ||||
15728 | |||||
15729 | return DAG.getNode(AArch64ISD::EXTR, DL, VT, LHS, RHS, | ||||
15730 | DAG.getConstant(ShiftRHS, DL, MVT::i64)); | ||||
15731 | } | ||||
15732 | |||||
15733 | static SDValue tryCombineToBSL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, | ||||
15734 | const AArch64TargetLowering &TLI) { | ||||
15735 | EVT VT = N->getValueType(0); | ||||
15736 | SelectionDAG &DAG = DCI.DAG; | ||||
15737 | SDLoc DL(N); | ||||
15738 | |||||
15739 | if (!VT.isVector()) | ||||
15740 | return SDValue(); | ||||
15741 | |||||
15742 | // The combining code currently only works for NEON vectors. In particular, | ||||
15743 | // it does not work for SVE when dealing with vectors wider than 128 bits. | ||||
15744 | // It also doesn't work for streaming mode because it causes generating | ||||
15745 | // bsl instructions that are invalid in streaming mode. | ||||
15746 | if (TLI.useSVEForFixedLengthVectorVT( | ||||
15747 | VT, | ||||
15748 | DAG.getSubtarget<AArch64Subtarget>().forceStreamingCompatibleSVE())) | ||||
15749 | return SDValue(); | ||||
15750 | |||||
15751 | SDValue N0 = N->getOperand(0); | ||||
15752 | if (N0.getOpcode() != ISD::AND) | ||||
15753 | return SDValue(); | ||||
15754 | |||||
15755 | SDValue N1 = N->getOperand(1); | ||||
15756 | if (N1.getOpcode() != ISD::AND) | ||||
15757 | return SDValue(); | ||||
15758 | |||||
15759 | // InstCombine does (not (neg a)) => (add a -1). | ||||
15760 | // Try: (or (and (neg a) b) (and (add a -1) c)) => (bsl (neg a) b c) | ||||
15761 | // Loop over all combinations of AND operands. | ||||
15762 | for (int i = 1; i >= 0; --i) { | ||||
15763 | for (int j = 1; j >= 0; --j) { | ||||
15764 | SDValue O0 = N0->getOperand(i); | ||||
15765 | SDValue O1 = N1->getOperand(j); | ||||
15766 | SDValue Sub, Add, SubSibling, AddSibling; | ||||
15767 | |||||
15768 | // Find a SUB and an ADD operand, one from each AND. | ||||
15769 | if (O0.getOpcode() == ISD::SUB && O1.getOpcode() == ISD::ADD) { | ||||
15770 | Sub = O0; | ||||
15771 | Add = O1; | ||||
15772 | SubSibling = N0->getOperand(1 - i); | ||||
15773 | AddSibling = N1->getOperand(1 - j); | ||||
15774 | } else if (O0.getOpcode() == ISD::ADD && O1.getOpcode() == ISD::SUB) { | ||||
15775 | Add = O0; | ||||
15776 | Sub = O1; | ||||
15777 | AddSibling = N0->getOperand(1 - i); | ||||
15778 | SubSibling = N1->getOperand(1 - j); | ||||
15779 | } else | ||||
15780 | continue; | ||||
15781 | |||||
15782 | if (!ISD::isBuildVectorAllZeros(Sub.getOperand(0).getNode())) | ||||
15783 | continue; | ||||
15784 | |||||
15785 | // Constant ones is always righthand operand of the Add. | ||||
15786 | if (!ISD::isBuildVectorAllOnes(Add.getOperand(1).getNode())) | ||||
15787 | continue; | ||||
15788 | |||||
15789 | if (Sub.getOperand(1) != Add.getOperand(0)) | ||||
15790 | continue; | ||||
15791 | |||||
15792 | return DAG.getNode(AArch64ISD::BSP, DL, VT, Sub, SubSibling, AddSibling); | ||||
15793 | } | ||||
15794 | } | ||||
15795 | |||||
15796 | // (or (and a b) (and (not a) c)) => (bsl a b c) | ||||
15797 | // We only have to look for constant vectors here since the general, variable | ||||
15798 | // case can be handled in TableGen. | ||||
15799 | unsigned Bits = VT.getScalarSizeInBits(); | ||||
15800 | uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1); | ||||
15801 | for (int i = 1; i >= 0; --i) | ||||
15802 | for (int j = 1; j >= 0; --j) { | ||||
15803 | BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(i)); | ||||
15804 | BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(j)); | ||||
15805 | if (!BVN0 || !BVN1) | ||||
15806 | continue; | ||||
15807 | |||||
15808 | bool FoundMatch = true; | ||||
15809 | for (unsigned k = 0; k < VT.getVectorNumElements(); ++k) { | ||||
15810 | ConstantSDNode *CN0 = dyn_cast<ConstantSDNode>(BVN0->getOperand(k)); | ||||
15811 | ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(BVN1->getOperand(k)); | ||||
15812 | if (!CN0 || !CN1 || | ||||
15813 | CN0->getZExtValue() != (BitMask & ~CN1->getZExtValue())) { | ||||
15814 | FoundMatch = false; | ||||
15815 | break; | ||||
15816 | } | ||||
15817 | } | ||||
15818 | |||||
15819 | if (FoundMatch) | ||||
15820 | return DAG.getNode(AArch64ISD::BSP, DL, VT, SDValue(BVN0, 0), | ||||
15821 | N0->getOperand(1 - i), N1->getOperand(1 - j)); | ||||
15822 | } | ||||
15823 | |||||
15824 | return SDValue(); | ||||
15825 | } | ||||
15826 | |||||
15827 | // Given a tree of and/or(csel(0, 1, cc0), csel(0, 1, cc1)), we may be able to | ||||
15828 | // convert to csel(ccmp(.., cc0)), depending on cc1: | ||||
15829 | |||||
15830 | // (AND (CSET cc0 cmp0) (CSET cc1 (CMP x1 y1))) | ||||
15831 | // => | ||||
15832 | // (CSET cc1 (CCMP x1 y1 !cc1 cc0 cmp0)) | ||||
15833 | // | ||||
15834 | // (OR (CSET cc0 cmp0) (CSET cc1 (CMP x1 y1))) | ||||
15835 | // => | ||||
15836 | // (CSET cc1 (CCMP x1 y1 cc1 !cc0 cmp0)) | ||||
15837 | static SDValue performANDORCSELCombine(SDNode *N, SelectionDAG &DAG) { | ||||
15838 | EVT VT = N->getValueType(0); | ||||
15839 | SDValue CSel0 = N->getOperand(0); | ||||
15840 | SDValue CSel1 = N->getOperand(1); | ||||
15841 | |||||
15842 | if (CSel0.getOpcode() != AArch64ISD::CSEL || | ||||
15843 | CSel1.getOpcode() != AArch64ISD::CSEL) | ||||
15844 | return SDValue(); | ||||
15845 | |||||
15846 | if (!CSel0->hasOneUse() || !CSel1->hasOneUse()) | ||||
15847 | return SDValue(); | ||||
15848 | |||||
15849 | if (!isNullConstant(CSel0.getOperand(0)) || | ||||
15850 | !isOneConstant(CSel0.getOperand(1)) || | ||||
15851 | !isNullConstant(CSel1.getOperand(0)) || | ||||
15852 | !isOneConstant(CSel1.getOperand(1))) | ||||
15853 | return SDValue(); | ||||
15854 | |||||
15855 | SDValue Cmp0 = CSel0.getOperand(3); | ||||
15856 | SDValue Cmp1 = CSel1.getOperand(3); | ||||
15857 | AArch64CC::CondCode CC0 = (AArch64CC::CondCode)CSel0.getConstantOperandVal(2); | ||||
15858 | AArch64CC::CondCode CC1 = (AArch64CC::CondCode)CSel1.getConstantOperandVal(2); | ||||
15859 | if (!Cmp0->hasOneUse() || !Cmp1->hasOneUse()) | ||||
15860 | return SDValue(); | ||||
15861 | if (Cmp1.getOpcode() != AArch64ISD::SUBS && | ||||
15862 | Cmp0.getOpcode() == AArch64ISD::SUBS) { | ||||
15863 | std::swap(Cmp0, Cmp1); | ||||
15864 | std::swap(CC0, CC1); | ||||
15865 | } | ||||
15866 | |||||
15867 | if (Cmp1.getOpcode() != AArch64ISD::SUBS) | ||||
15868 | return SDValue(); | ||||
15869 | |||||
15870 | SDLoc DL(N); | ||||
15871 | SDValue CCmp, Condition; | ||||
15872 | unsigned NZCV; | ||||
15873 | |||||
15874 | if (N->getOpcode() == ISD::AND) { | ||||
15875 | AArch64CC::CondCode InvCC0 = AArch64CC::getInvertedCondCode(CC0); | ||||
15876 | Condition = DAG.getConstant(InvCC0, DL, MVT_CC); | ||||
15877 | NZCV = AArch64CC::getNZCVToSatisfyCondCode(CC1); | ||||
15878 | } else { | ||||
15879 | AArch64CC::CondCode InvCC1 = AArch64CC::getInvertedCondCode(CC1); | ||||
15880 | Condition = DAG.getConstant(CC0, DL, MVT_CC); | ||||
15881 | NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvCC1); | ||||
15882 | } | ||||
15883 | |||||
15884 | SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32); | ||||
15885 | |||||
15886 | auto *Op1 = dyn_cast<ConstantSDNode>(Cmp1.getOperand(1)); | ||||
15887 | if (Op1 && Op1->getAPIntValue().isNegative() && | ||||
15888 | Op1->getAPIntValue().sgt(-32)) { | ||||
15889 | // CCMP accept the constant int the range [0, 31] | ||||
15890 | // if the Op1 is a constant in the range [-31, -1], we | ||||
15891 | // can select to CCMN to avoid the extra mov | ||||
15892 | SDValue AbsOp1 = | ||||
15893 | DAG.getConstant(Op1->getAPIntValue().abs(), DL, Op1->getValueType(0)); | ||||
15894 | CCmp = DAG.getNode(AArch64ISD::CCMN, DL, MVT_CC, Cmp1.getOperand(0), AbsOp1, | ||||
15895 | NZCVOp, Condition, Cmp0); | ||||
15896 | } else { | ||||
15897 | CCmp = DAG.getNode(AArch64ISD::CCMP, DL, MVT_CC, Cmp1.getOperand(0), | ||||
15898 | Cmp1.getOperand(1), NZCVOp, Condition, Cmp0); | ||||
15899 | } | ||||
15900 | return DAG.getNode(AArch64ISD::CSEL, DL, VT, CSel0.getOperand(0), | ||||
15901 | CSel0.getOperand(1), DAG.getConstant(CC1, DL, MVT::i32), | ||||
15902 | CCmp); | ||||
15903 | } | ||||
15904 | |||||
15905 | static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, | ||||
15906 | const AArch64Subtarget *Subtarget, | ||||
15907 | const AArch64TargetLowering &TLI) { | ||||
15908 | SelectionDAG &DAG = DCI.DAG; | ||||
15909 | EVT VT = N->getValueType(0); | ||||
15910 | |||||
15911 | if (SDValue R = performANDORCSELCombine(N, DAG)) | ||||
15912 | return R; | ||||
15913 | |||||
15914 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) | ||||
15915 | return SDValue(); | ||||
15916 | |||||
15917 | // Attempt to form an EXTR from (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) | ||||
15918 | if (SDValue Res = tryCombineToEXTR(N, DCI)) | ||||
15919 | return Res; | ||||
15920 | |||||
15921 | if (SDValue Res = tryCombineToBSL(N, DCI, TLI)) | ||||
15922 | return Res; | ||||
15923 | |||||
15924 | return SDValue(); | ||||
15925 | } | ||||
15926 | |||||
15927 | static bool isConstantSplatVectorMaskForType(SDNode *N, EVT MemVT) { | ||||
15928 | if (!MemVT.getVectorElementType().isSimple()) | ||||
15929 | return false; | ||||
15930 | |||||
15931 | uint64_t MaskForTy = 0ull; | ||||
15932 | switch (MemVT.getVectorElementType().getSimpleVT().SimpleTy) { | ||||
15933 | case MVT::i8: | ||||
15934 | MaskForTy = 0xffull; | ||||
15935 | break; | ||||
15936 | case MVT::i16: | ||||
15937 | MaskForTy = 0xffffull; | ||||
15938 | break; | ||||
15939 | case MVT::i32: | ||||
15940 | MaskForTy = 0xffffffffull; | ||||
15941 | break; | ||||
15942 | default: | ||||
15943 | return false; | ||||
15944 | break; | ||||
15945 | } | ||||
15946 | |||||
15947 | if (N->getOpcode() == AArch64ISD::DUP || N->getOpcode() == ISD::SPLAT_VECTOR) | ||||
15948 | if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) | ||||
15949 | return Op0->getAPIntValue().getLimitedValue() == MaskForTy; | ||||
15950 | |||||
15951 | return false; | ||||
15952 | } | ||||
15953 | |||||
15954 | static SDValue performSVEAndCombine(SDNode *N, | ||||
15955 | TargetLowering::DAGCombinerInfo &DCI) { | ||||
15956 | if (DCI.isBeforeLegalizeOps()) | ||||
15957 | return SDValue(); | ||||
15958 | |||||
15959 | SelectionDAG &DAG = DCI.DAG; | ||||
15960 | SDValue Src = N->getOperand(0); | ||||
15961 | unsigned Opc = Src->getOpcode(); | ||||
15962 | |||||
15963 | // Zero/any extend of an unsigned unpack | ||||
15964 | if (Opc == AArch64ISD::UUNPKHI || Opc == AArch64ISD::UUNPKLO) { | ||||
15965 | SDValue UnpkOp = Src->getOperand(0); | ||||
15966 | SDValue Dup = N->getOperand(1); | ||||
15967 | |||||
15968 | if (Dup.getOpcode() != ISD::SPLAT_VECTOR) | ||||
15969 | return SDValue(); | ||||
15970 | |||||
15971 | SDLoc DL(N); | ||||
15972 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Dup->getOperand(0)); | ||||
15973 | if (!C) | ||||
15974 | return SDValue(); | ||||
15975 | |||||
15976 | uint64_t ExtVal = C->getZExtValue(); | ||||
15977 | |||||
15978 | // If the mask is fully covered by the unpack, we don't need to push | ||||
15979 | // a new AND onto the operand | ||||
15980 | EVT EltTy = UnpkOp->getValueType(0).getVectorElementType(); | ||||
15981 | if ((ExtVal == 0xFF && EltTy == MVT::i8) || | ||||
15982 | (ExtVal == 0xFFFF && EltTy == MVT::i16) || | ||||
15983 | (ExtVal == 0xFFFFFFFF && EltTy == MVT::i32)) | ||||
15984 | return Src; | ||||
15985 | |||||
15986 | // Truncate to prevent a DUP with an over wide constant | ||||
15987 | APInt Mask = C->getAPIntValue().trunc(EltTy.getSizeInBits()); | ||||
15988 | |||||
15989 | // Otherwise, make sure we propagate the AND to the operand | ||||
15990 | // of the unpack | ||||
15991 | Dup = DAG.getNode(ISD::SPLAT_VECTOR, DL, UnpkOp->getValueType(0), | ||||
15992 | DAG.getConstant(Mask.zextOrTrunc(32), DL, MVT::i32)); | ||||
15993 | |||||
15994 | SDValue And = DAG.getNode(ISD::AND, DL, | ||||
15995 | UnpkOp->getValueType(0), UnpkOp, Dup); | ||||
15996 | |||||
15997 | return DAG.getNode(Opc, DL, N->getValueType(0), And); | ||||
15998 | } | ||||
15999 | |||||
16000 | if (!EnableCombineMGatherIntrinsics) | ||||
16001 | return SDValue(); | ||||
16002 | |||||
16003 | SDValue Mask = N->getOperand(1); | ||||
16004 | |||||
16005 | if (!Src.hasOneUse()) | ||||
16006 | return SDValue(); | ||||
16007 | |||||
16008 | EVT MemVT; | ||||
16009 | |||||
16010 | // SVE load instructions perform an implicit zero-extend, which makes them | ||||
16011 | // perfect candidates for combining. | ||||
16012 | switch (Opc) { | ||||
16013 | case AArch64ISD::LD1_MERGE_ZERO: | ||||
16014 | case AArch64ISD::LDNF1_MERGE_ZERO: | ||||
16015 | case AArch64ISD::LDFF1_MERGE_ZERO: | ||||
16016 | MemVT = cast<VTSDNode>(Src->getOperand(3))->getVT(); | ||||
16017 | break; | ||||
16018 | case AArch64ISD::GLD1_MERGE_ZERO: | ||||
16019 | case AArch64ISD::GLD1_SCALED_MERGE_ZERO: | ||||
16020 | case AArch64ISD::GLD1_SXTW_MERGE_ZERO: | ||||
16021 | case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO: | ||||
16022 | case AArch64ISD::GLD1_UXTW_MERGE_ZERO: | ||||
16023 | case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO: | ||||
16024 | case AArch64ISD::GLD1_IMM_MERGE_ZERO: | ||||
16025 | case AArch64ISD::GLDFF1_MERGE_ZERO: | ||||
16026 | case AArch64ISD::GLDFF1_SCALED_MERGE_ZERO: | ||||
16027 | case AArch64ISD::GLDFF1_SXTW_MERGE_ZERO: | ||||
16028 | case AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO: | ||||
16029 | case AArch64ISD::GLDFF1_UXTW_MERGE_ZERO: | ||||
16030 | case AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO: | ||||
16031 | case AArch64ISD::GLDFF1_IMM_MERGE_ZERO: | ||||
16032 | case AArch64ISD::GLDNT1_MERGE_ZERO: | ||||
16033 | MemVT = cast<VTSDNode>(Src->getOperand(4))->getVT(); | ||||
16034 | break; | ||||
16035 | default: | ||||
16036 | return SDValue(); | ||||
16037 | } | ||||
16038 | |||||
16039 | if (isConstantSplatVectorMaskForType(Mask.getNode(), MemVT)) | ||||
16040 | return Src; | ||||
16041 | |||||
16042 | return SDValue(); | ||||
16043 | } | ||||
16044 | |||||
16045 | static SDValue performANDCombine(SDNode *N, | ||||
16046 | TargetLowering::DAGCombinerInfo &DCI) { | ||||
16047 | SelectionDAG &DAG = DCI.DAG; | ||||
16048 | SDValue LHS = N->getOperand(0); | ||||
16049 | SDValue RHS = N->getOperand(1); | ||||
16050 | EVT VT = N->getValueType(0); | ||||
16051 | |||||
16052 | if (SDValue R = performANDORCSELCombine(N, DAG)) | ||||
16053 | return R; | ||||
16054 | |||||
16055 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) | ||||
16056 | return SDValue(); | ||||
16057 | |||||
16058 | if (VT.isScalableVector()) | ||||
16059 | return performSVEAndCombine(N, DCI); | ||||
16060 | |||||
16061 | // The combining code below works only for NEON vectors. In particular, it | ||||
16062 | // does not work for SVE when dealing with vectors wider than 128 bits. | ||||
16063 | if (!VT.is64BitVector() && !VT.is128BitVector()) | ||||
16064 | return SDValue(); | ||||
16065 | |||||
16066 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode()); | ||||
16067 | if (!BVN) | ||||
16068 | return SDValue(); | ||||
16069 | |||||
16070 | // AND does not accept an immediate, so check if we can use a BIC immediate | ||||
16071 | // instruction instead. We do this here instead of using a (and x, (mvni imm)) | ||||
16072 | // pattern in isel, because some immediates may be lowered to the preferred | ||||
16073 | // (and x, (movi imm)) form, even though an mvni representation also exists. | ||||
16074 | APInt DefBits(VT.getSizeInBits(), 0); | ||||
16075 | APInt UndefBits(VT.getSizeInBits(), 0); | ||||
16076 | if (resolveBuildVector(BVN, DefBits, UndefBits)) { | ||||
16077 | SDValue NewOp; | ||||
16078 | |||||
16079 | DefBits = ~DefBits; | ||||
16080 | if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG, | ||||
16081 | DefBits, &LHS)) || | ||||
16082 | (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG, | ||||
16083 | DefBits, &LHS))) | ||||
16084 | return NewOp; | ||||
16085 | |||||
16086 | UndefBits = ~UndefBits; | ||||
16087 | if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG, | ||||
16088 | UndefBits, &LHS)) || | ||||
16089 | (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG, | ||||
16090 | UndefBits, &LHS))) | ||||
16091 | return NewOp; | ||||
16092 | } | ||||
16093 | |||||
16094 | return SDValue(); | ||||
16095 | } | ||||
16096 | |||||
16097 | static bool hasPairwiseAdd(unsigned Opcode, EVT VT, bool FullFP16) { | ||||
16098 | switch (Opcode) { | ||||
16099 | case ISD::STRICT_FADD: | ||||
16100 | case ISD::FADD: | ||||
16101 | return (FullFP16 && VT == MVT::f16) || VT == MVT::f32 || VT == MVT::f64; | ||||
16102 | case ISD::ADD: | ||||
16103 | return VT == MVT::i64; | ||||
16104 | default: | ||||
16105 | return false; | ||||
16106 | } | ||||
16107 | } | ||||
16108 | |||||
16109 | static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op, | ||||
16110 | AArch64CC::CondCode Cond); | ||||
16111 | |||||
16112 | static bool isPredicateCCSettingOp(SDValue N) { | ||||
16113 | if ((N.getOpcode() == ISD::SETCC) || | ||||
16114 | (N.getOpcode() == ISD::INTRINSIC_WO_CHAIN && | ||||
16115 | (N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilege || | ||||
16116 | N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilegt || | ||||
16117 | N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilehi || | ||||
16118 | N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilehs || | ||||
16119 | N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilele || | ||||
16120 | N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelo || | ||||
16121 | N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilels || | ||||
16122 | N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelt || | ||||
16123 | // get_active_lane_mask is lowered to a whilelo instruction. | ||||
16124 | N.getConstantOperandVal(0) == Intrinsic::get_active_lane_mask))) | ||||
16125 | return true; | ||||
16126 | |||||
16127 | return false; | ||||
16128 | } | ||||
16129 | |||||
16130 | // Materialize : i1 = extract_vector_elt t37, Constant:i64<0> | ||||
16131 | // ... into: "ptrue p, all" + PTEST | ||||
16132 | static SDValue | ||||
16133 | performFirstTrueTestVectorCombine(SDNode *N, | ||||
16134 | TargetLowering::DAGCombinerInfo &DCI, | ||||
16135 | const AArch64Subtarget *Subtarget) { | ||||
16136 | assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT)(static_cast <bool> (N->getOpcode() == ISD::EXTRACT_VECTOR_ELT ) ? void (0) : __assert_fail ("N->getOpcode() == ISD::EXTRACT_VECTOR_ELT" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 16136, __extension__ __PRETTY_FUNCTION__)); | ||||
16137 | // Make sure PTEST can be legalised with illegal types. | ||||
16138 | if (!Subtarget->hasSVE() || DCI.isBeforeLegalize()) | ||||
16139 | return SDValue(); | ||||
16140 | |||||
16141 | SDValue N0 = N->getOperand(0); | ||||
16142 | EVT VT = N0.getValueType(); | ||||
16143 | |||||
16144 | if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1 || | ||||
16145 | !isNullConstant(N->getOperand(1))) | ||||
16146 | return SDValue(); | ||||
16147 | |||||
16148 | // Restricted the DAG combine to only cases where we're extracting from a | ||||
16149 | // flag-setting operation. | ||||
16150 | if (!isPredicateCCSettingOp(N0)) | ||||
16151 | return SDValue(); | ||||
16152 | |||||
16153 | // Extracts of lane 0 for SVE can be expressed as PTEST(Op, FIRST) ? 1 : 0 | ||||
16154 | SelectionDAG &DAG = DCI.DAG; | ||||
16155 | SDValue Pg = getPTrue(DAG, SDLoc(N), VT, AArch64SVEPredPattern::all); | ||||
16156 | return getPTest(DAG, N->getValueType(0), Pg, N0, AArch64CC::FIRST_ACTIVE); | ||||
16157 | } | ||||
16158 | |||||
16159 | // Materialize : Idx = (add (mul vscale, NumEls), -1) | ||||
16160 | // i1 = extract_vector_elt t37, Constant:i64<Idx> | ||||
16161 | // ... into: "ptrue p, all" + PTEST | ||||
16162 | static SDValue | ||||
16163 | performLastTrueTestVectorCombine(SDNode *N, | ||||
16164 | TargetLowering::DAGCombinerInfo &DCI, | ||||
16165 | const AArch64Subtarget *Subtarget) { | ||||
16166 | assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT)(static_cast <bool> (N->getOpcode() == ISD::EXTRACT_VECTOR_ELT ) ? void (0) : __assert_fail ("N->getOpcode() == ISD::EXTRACT_VECTOR_ELT" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 16166, __extension__ __PRETTY_FUNCTION__)); | ||||
16167 | // Make sure PTEST is legal types. | ||||
16168 | if (!Subtarget->hasSVE() || DCI.isBeforeLegalize()) | ||||
16169 | return SDValue(); | ||||
16170 | |||||
16171 | SDValue N0 = N->getOperand(0); | ||||
16172 | EVT OpVT = N0.getValueType(); | ||||
16173 | |||||
16174 | if (!OpVT.isScalableVector() || OpVT.getVectorElementType() != MVT::i1) | ||||
16175 | return SDValue(); | ||||
16176 | |||||
16177 | // Idx == (add (mul vscale, NumEls), -1) | ||||
16178 | SDValue Idx = N->getOperand(1); | ||||
16179 | if (Idx.getOpcode() != ISD::ADD || !isAllOnesConstant(Idx.getOperand(1))) | ||||
16180 | return SDValue(); | ||||
16181 | |||||
16182 | SDValue VS = Idx.getOperand(0); | ||||
16183 | if (VS.getOpcode() != ISD::VSCALE) | ||||
16184 | return SDValue(); | ||||
16185 | |||||
16186 | unsigned NumEls = OpVT.getVectorElementCount().getKnownMinValue(); | ||||
16187 | if (VS.getConstantOperandVal(0) != NumEls) | ||||
16188 | return SDValue(); | ||||
16189 | |||||
16190 | // Extracts of lane EC-1 for SVE can be expressed as PTEST(Op, LAST) ? 1 : 0 | ||||
16191 | SelectionDAG &DAG = DCI.DAG; | ||||
16192 | SDValue Pg = getPTrue(DAG, SDLoc(N), OpVT, AArch64SVEPredPattern::all); | ||||
16193 | return getPTest(DAG, N->getValueType(0), Pg, N0, AArch64CC::LAST_ACTIVE); | ||||
16194 | } | ||||
16195 | |||||
16196 | static SDValue | ||||
16197 | performExtractVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, | ||||
16198 | const AArch64Subtarget *Subtarget) { | ||||
16199 | assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT)(static_cast <bool> (N->getOpcode() == ISD::EXTRACT_VECTOR_ELT ) ? void (0) : __assert_fail ("N->getOpcode() == ISD::EXTRACT_VECTOR_ELT" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 16199, __extension__ __PRETTY_FUNCTION__)); | ||||
16200 | if (SDValue Res = performFirstTrueTestVectorCombine(N, DCI, Subtarget)) | ||||
16201 | return Res; | ||||
16202 | if (SDValue Res = performLastTrueTestVectorCombine(N, DCI, Subtarget)) | ||||
16203 | return Res; | ||||
16204 | |||||
16205 | SelectionDAG &DAG = DCI.DAG; | ||||
16206 | SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); | ||||
16207 | ConstantSDNode *ConstantN1 = dyn_cast<ConstantSDNode>(N1); | ||||
16208 | |||||
16209 | EVT VT = N->getValueType(0); | ||||
16210 | const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16(); | ||||
16211 | bool IsStrict = N0->isStrictFPOpcode(); | ||||
16212 | |||||
16213 | // extract(dup x) -> x | ||||
16214 | if (N0.getOpcode() == AArch64ISD::DUP) | ||||
16215 | return DAG.getZExtOrTrunc(N0.getOperand(0), SDLoc(N), VT); | ||||
16216 | |||||
16217 | // Rewrite for pairwise fadd pattern | ||||
16218 | // (f32 (extract_vector_elt | ||||
16219 | // (fadd (vXf32 Other) | ||||
16220 | // (vector_shuffle (vXf32 Other) undef <1,X,...> )) 0)) | ||||
16221 | // -> | ||||
16222 | // (f32 (fadd (extract_vector_elt (vXf32 Other) 0) | ||||
16223 | // (extract_vector_elt (vXf32 Other) 1)) | ||||
16224 | // For strict_fadd we need to make sure the old strict_fadd can be deleted, so | ||||
16225 | // we can only do this when it's used only by the extract_vector_elt. | ||||
16226 | if (ConstantN1 && ConstantN1->getZExtValue() == 0 && | ||||
16227 | hasPairwiseAdd(N0->getOpcode(), VT, FullFP16) && | ||||
16228 | (!IsStrict || N0.hasOneUse())) { | ||||
16229 | SDLoc DL(N0); | ||||
16230 | SDValue N00 = N0->getOperand(IsStrict ? 1 : 0); | ||||
16231 | SDValue N01 = N0->getOperand(IsStrict ? 2 : 1); | ||||
16232 | |||||
16233 | ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(N01); | ||||
16234 | SDValue Other = N00; | ||||
16235 | |||||
16236 | // And handle the commutative case. | ||||
16237 | if (!Shuffle) { | ||||
16238 | Shuffle = dyn_cast<ShuffleVectorSDNode>(N00); | ||||
16239 | Other = N01; | ||||
16240 | } | ||||
16241 | |||||
16242 | if (Shuffle && Shuffle->getMaskElt(0) == 1 && | ||||
16243 | Other == Shuffle->getOperand(0)) { | ||||
16244 | SDValue Extract1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Other, | ||||
16245 | DAG.getConstant(0, DL, MVT::i64)); | ||||
16246 | SDValue Extract2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Other, | ||||
16247 | DAG.getConstant(1, DL, MVT::i64)); | ||||
16248 | if (!IsStrict) | ||||
16249 | return DAG.getNode(N0->getOpcode(), DL, VT, Extract1, Extract2); | ||||
16250 | |||||
16251 | // For strict_fadd we need uses of the final extract_vector to be replaced | ||||
16252 | // with the strict_fadd, but we also need uses of the chain output of the | ||||
16253 | // original strict_fadd to use the chain output of the new strict_fadd as | ||||
16254 | // otherwise it may not be deleted. | ||||
16255 | SDValue Ret = DAG.getNode(N0->getOpcode(), DL, | ||||
16256 | {VT, MVT::Other}, | ||||
16257 | {N0->getOperand(0), Extract1, Extract2}); | ||||
16258 | DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Ret); | ||||
16259 | DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Ret.getValue(1)); | ||||
16260 | return SDValue(N, 0); | ||||
16261 | } | ||||
16262 | } | ||||
16263 | |||||
16264 | return SDValue(); | ||||
16265 | } | ||||
16266 | |||||
16267 | static SDValue performConcatVectorsCombine(SDNode *N, | ||||
16268 | TargetLowering::DAGCombinerInfo &DCI, | ||||
16269 | SelectionDAG &DAG) { | ||||
16270 | SDLoc dl(N); | ||||
16271 | EVT VT = N->getValueType(0); | ||||
16272 | SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); | ||||
16273 | unsigned N0Opc = N0->getOpcode(), N1Opc = N1->getOpcode(); | ||||
16274 | |||||
16275 | if (VT.isScalableVector()) | ||||
16276 | return SDValue(); | ||||
16277 | |||||
16278 | // Optimize concat_vectors of truncated vectors, where the intermediate | ||||
16279 | // type is illegal, to avoid said illegality, e.g., | ||||
16280 | // (v4i16 (concat_vectors (v2i16 (truncate (v2i64))), | ||||
16281 | // (v2i16 (truncate (v2i64))))) | ||||
16282 | // -> | ||||
16283 | // (v4i16 (truncate (vector_shuffle (v4i32 (bitcast (v2i64))), | ||||
16284 | // (v4i32 (bitcast (v2i64))), | ||||
16285 | // <0, 2, 4, 6>))) | ||||
16286 | // This isn't really target-specific, but ISD::TRUNCATE legality isn't keyed | ||||
16287 | // on both input and result type, so we might generate worse code. | ||||
16288 | // On AArch64 we know it's fine for v2i64->v4i16 and v4i32->v8i8. | ||||
16289 | if (N->getNumOperands() == 2 && N0Opc == ISD::TRUNCATE && | ||||
16290 | N1Opc == ISD::TRUNCATE) { | ||||
16291 | SDValue N00 = N0->getOperand(0); | ||||
16292 | SDValue N10 = N1->getOperand(0); | ||||
16293 | EVT N00VT = N00.getValueType(); | ||||
16294 | |||||
16295 | if (N00VT == N10.getValueType() && | ||||
16296 | (N00VT == MVT::v2i64 || N00VT == MVT::v4i32) && | ||||
16297 | N00VT.getScalarSizeInBits() == 4 * VT.getScalarSizeInBits()) { | ||||
16298 | MVT MidVT = (N00VT == MVT::v2i64 ? MVT::v4i32 : MVT::v8i16); | ||||
16299 | SmallVector<int, 8> Mask(MidVT.getVectorNumElements()); | ||||
16300 | for (size_t i = 0; i < Mask.size(); ++i) | ||||
16301 | Mask[i] = i * 2; | ||||
16302 | return DAG.getNode(ISD::TRUNCATE, dl, VT, | ||||
16303 | DAG.getVectorShuffle( | ||||
16304 | MidVT, dl, | ||||
16305 | DAG.getNode(ISD::BITCAST, dl, MidVT, N00), | ||||
16306 | DAG.getNode(ISD::BITCAST, dl, MidVT, N10), Mask)); | ||||
16307 | } | ||||
16308 | } | ||||
16309 | |||||
16310 | if (N->getOperand(0).getValueType() == MVT::v4i8) { | ||||
16311 | // If we have a concat of v4i8 loads, convert them to a buildvector of f32 | ||||
16312 | // loads to prevent having to go through the v4i8 load legalization that | ||||
16313 | // needs to extend each element into a larger type. | ||||
16314 | if (N->getNumOperands() % 2 == 0 && all_of(N->op_values(), [](SDValue V) { | ||||
16315 | if (V.getValueType() != MVT::v4i8) | ||||
16316 | return false; | ||||
16317 | if (V.isUndef()) | ||||
16318 | return true; | ||||
16319 | LoadSDNode *LD = dyn_cast<LoadSDNode>(V); | ||||
16320 | return LD && V.hasOneUse() && LD->isSimple() && !LD->isIndexed() && | ||||
16321 | LD->getExtensionType() == ISD::NON_EXTLOAD; | ||||
16322 | })) { | ||||
16323 | EVT NVT = | ||||
16324 | EVT::getVectorVT(*DAG.getContext(), MVT::f32, N->getNumOperands()); | ||||
16325 | SmallVector<SDValue> Ops; | ||||
16326 | |||||
16327 | for (unsigned i = 0; i < N->getNumOperands(); i++) { | ||||
16328 | SDValue V = N->getOperand(i); | ||||
16329 | if (V.isUndef()) | ||||
16330 | Ops.push_back(DAG.getUNDEF(MVT::f32)); | ||||
16331 | else { | ||||
16332 | LoadSDNode *LD = cast<LoadSDNode>(V); | ||||
16333 | SDValue NewLoad = | ||||
16334 | DAG.getLoad(MVT::f32, dl, LD->getChain(), LD->getBasePtr(), | ||||
16335 | LD->getMemOperand()); | ||||
16336 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLoad.getValue(1)); | ||||
16337 | Ops.push_back(NewLoad); | ||||
16338 | } | ||||
16339 | } | ||||
16340 | return DAG.getBitcast(N->getValueType(0), | ||||
16341 | DAG.getBuildVector(NVT, dl, Ops)); | ||||
16342 | } | ||||
16343 | } | ||||
16344 | |||||
16345 | // Canonicalise concat_vectors to replace concatenations of truncated nots | ||||
16346 | // with nots of concatenated truncates. This in some cases allows for multiple | ||||
16347 | // redundant negations to be eliminated. | ||||
16348 | // (concat_vectors (v4i16 (truncate (not (v4i32)))), | ||||
16349 | // (v4i16 (truncate (not (v4i32))))) | ||||
16350 | // -> | ||||
16351 | // (not (concat_vectors (v4i16 (truncate (v4i32))), | ||||
16352 | // (v4i16 (truncate (v4i32))))) | ||||
16353 | if (N->getNumOperands() == 2 && N0Opc == ISD::TRUNCATE && | ||||
16354 | N1Opc == ISD::TRUNCATE && N->isOnlyUserOf(N0.getNode()) && | ||||
16355 | N->isOnlyUserOf(N1.getNode())) { | ||||
16356 | auto isBitwiseVectorNegate = [](SDValue V) { | ||||
16357 | return V->getOpcode() == ISD::XOR && | ||||
16358 | ISD::isConstantSplatVectorAllOnes(V.getOperand(1).getNode()); | ||||
16359 | }; | ||||
16360 | SDValue N00 = N0->getOperand(0); | ||||
16361 | SDValue N10 = N1->getOperand(0); | ||||
16362 | if (isBitwiseVectorNegate(N00) && N0->isOnlyUserOf(N00.getNode()) && | ||||
16363 | isBitwiseVectorNegate(N10) && N1->isOnlyUserOf(N10.getNode())) { | ||||
16364 | return DAG.getNOT( | ||||
16365 | dl, | ||||
16366 | DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, | ||||
16367 | DAG.getNode(ISD::TRUNCATE, dl, N0.getValueType(), | ||||
16368 | N00->getOperand(0)), | ||||
16369 | DAG.getNode(ISD::TRUNCATE, dl, N1.getValueType(), | ||||
16370 | N10->getOperand(0))), | ||||
16371 | VT); | ||||
16372 | } | ||||
16373 | } | ||||
16374 | |||||
16375 | // Wait till after everything is legalized to try this. That way we have | ||||
16376 | // legal vector types and such. | ||||
16377 | if (DCI.isBeforeLegalizeOps()) | ||||
16378 | return SDValue(); | ||||
16379 | |||||
16380 | // Optimise concat_vectors of two [us]avgceils or [us]avgfloors that use | ||||
16381 | // extracted subvectors from the same original vectors. Combine these into a | ||||
16382 | // single avg that operates on the two original vectors. | ||||
16383 | // avgceil is the target independant name for rhadd, avgfloor is a hadd. | ||||
16384 | // Example: | ||||
16385 | // (concat_vectors (v8i8 (avgceils (extract_subvector (v16i8 OpA, <0>), | ||||
16386 | // extract_subvector (v16i8 OpB, <0>))), | ||||
16387 | // (v8i8 (avgceils (extract_subvector (v16i8 OpA, <8>), | ||||
16388 | // extract_subvector (v16i8 OpB, <8>))))) | ||||
16389 | // -> | ||||
16390 | // (v16i8(avgceils(v16i8 OpA, v16i8 OpB))) | ||||
16391 | if (N->getNumOperands() == 2 && N0Opc == N1Opc && | ||||
16392 | (N0Opc == ISD::AVGCEILU || N0Opc == ISD::AVGCEILS || | ||||
16393 | N0Opc == ISD::AVGFLOORU || N0Opc == ISD::AVGFLOORS)) { | ||||
16394 | SDValue N00 = N0->getOperand(0); | ||||
16395 | SDValue N01 = N0->getOperand(1); | ||||
16396 | SDValue N10 = N1->getOperand(0); | ||||
16397 | SDValue N11 = N1->getOperand(1); | ||||
16398 | |||||
16399 | EVT N00VT = N00.getValueType(); | ||||
16400 | EVT N10VT = N10.getValueType(); | ||||
16401 | |||||
16402 | if (N00->getOpcode() == ISD::EXTRACT_SUBVECTOR && | ||||
16403 | N01->getOpcode() == ISD::EXTRACT_SUBVECTOR && | ||||
16404 | N10->getOpcode() == ISD::EXTRACT_SUBVECTOR && | ||||
16405 | N11->getOpcode() == ISD::EXTRACT_SUBVECTOR && N00VT == N10VT) { | ||||
16406 | SDValue N00Source = N00->getOperand(0); | ||||
16407 | SDValue N01Source = N01->getOperand(0); | ||||
16408 | SDValue N10Source = N10->getOperand(0); | ||||
16409 | SDValue N11Source = N11->getOperand(0); | ||||
16410 | |||||
16411 | if (N00Source == N10Source && N01Source == N11Source && | ||||
16412 | N00Source.getValueType() == VT && N01Source.getValueType() == VT) { | ||||
16413 | assert(N0.getValueType() == N1.getValueType())(static_cast <bool> (N0.getValueType() == N1.getValueType ()) ? void (0) : __assert_fail ("N0.getValueType() == N1.getValueType()" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 16413, __extension__ __PRETTY_FUNCTION__)); | ||||
16414 | |||||
16415 | uint64_t N00Index = N00.getConstantOperandVal(1); | ||||
16416 | uint64_t N01Index = N01.getConstantOperandVal(1); | ||||
16417 | uint64_t N10Index = N10.getConstantOperandVal(1); | ||||
16418 | uint64_t N11Index = N11.getConstantOperandVal(1); | ||||
16419 | |||||
16420 | if (N00Index == N01Index && N10Index == N11Index && N00Index == 0 && | ||||
16421 | N10Index == N00VT.getVectorNumElements()) | ||||
16422 | return DAG.getNode(N0Opc, dl, VT, N00Source, N01Source); | ||||
16423 | } | ||||
16424 | } | ||||
16425 | } | ||||
16426 | |||||
16427 | // If we see a (concat_vectors (v1x64 A), (v1x64 A)) it's really a vector | ||||
16428 | // splat. The indexed instructions are going to be expecting a DUPLANE64, so | ||||
16429 | // canonicalise to that. | ||||
16430 | if (N->getNumOperands() == 2 && N0 == N1 && VT.getVectorNumElements() == 2) { | ||||
16431 | assert(VT.getScalarSizeInBits() == 64)(static_cast <bool> (VT.getScalarSizeInBits() == 64) ? void (0) : __assert_fail ("VT.getScalarSizeInBits() == 64", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 16431, __extension__ __PRETTY_FUNCTION__)); | ||||
16432 | return DAG.getNode(AArch64ISD::DUPLANE64, dl, VT, WidenVector(N0, DAG), | ||||
16433 | DAG.getConstant(0, dl, MVT::i64)); | ||||
16434 | } | ||||
16435 | |||||
16436 | // Canonicalise concat_vectors so that the right-hand vector has as few | ||||
16437 | // bit-casts as possible before its real operation. The primary matching | ||||
16438 | // destination for these operations will be the narrowing "2" instructions, | ||||
16439 | // which depend on the operation being performed on this right-hand vector. | ||||
16440 | // For example, | ||||
16441 | // (concat_vectors LHS, (v1i64 (bitconvert (v4i16 RHS)))) | ||||
16442 | // becomes | ||||
16443 | // (bitconvert (concat_vectors (v4i16 (bitconvert LHS)), RHS)) | ||||
16444 | |||||
16445 | if (N->getNumOperands() != 2 || N1Opc != ISD::BITCAST) | ||||
16446 | return SDValue(); | ||||
16447 | SDValue RHS = N1->getOperand(0); | ||||
16448 | MVT RHSTy = RHS.getValueType().getSimpleVT(); | ||||
16449 | // If the RHS is not a vector, this is not the pattern we're looking for. | ||||
16450 | if (!RHSTy.isVector()) | ||||
16451 | return SDValue(); | ||||
16452 | |||||
16453 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n" ; } } while (false) | ||||
16454 | dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n" ; } } while (false); | ||||
16455 | |||||
16456 | MVT ConcatTy = MVT::getVectorVT(RHSTy.getVectorElementType(), | ||||
16457 | RHSTy.getVectorNumElements() * 2); | ||||
16458 | return DAG.getNode(ISD::BITCAST, dl, VT, | ||||
16459 | DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatTy, | ||||
16460 | DAG.getNode(ISD::BITCAST, dl, RHSTy, N0), | ||||
16461 | RHS)); | ||||
16462 | } | ||||
16463 | |||||
16464 | static SDValue | ||||
16465 | performExtractSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, | ||||
16466 | SelectionDAG &DAG) { | ||||
16467 | if (DCI.isBeforeLegalizeOps()) | ||||
16468 | return SDValue(); | ||||
16469 | |||||
16470 | EVT VT = N->getValueType(0); | ||||
16471 | if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1) | ||||
16472 | return SDValue(); | ||||
16473 | |||||
16474 | SDValue V = N->getOperand(0); | ||||
16475 | |||||
16476 | // NOTE: This combine exists in DAGCombiner, but that version's legality check | ||||
16477 | // blocks this combine because the non-const case requires custom lowering. | ||||
16478 | // | ||||
16479 | // ty1 extract_vector(ty2 splat(const))) -> ty1 splat(const) | ||||
16480 | if (V.getOpcode() == ISD::SPLAT_VECTOR) | ||||
16481 | if (isa<ConstantSDNode>(V.getOperand(0))) | ||||
16482 | return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), VT, V.getOperand(0)); | ||||
16483 | |||||
16484 | return SDValue(); | ||||
16485 | } | ||||
16486 | |||||
16487 | static SDValue | ||||
16488 | performInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, | ||||
16489 | SelectionDAG &DAG) { | ||||
16490 | SDLoc DL(N); | ||||
16491 | SDValue Vec = N->getOperand(0); | ||||
16492 | SDValue SubVec = N->getOperand(1); | ||||
16493 | uint64_t IdxVal = N->getConstantOperandVal(2); | ||||
16494 | EVT VecVT = Vec.getValueType(); | ||||
16495 | EVT SubVT = SubVec.getValueType(); | ||||
16496 | |||||
16497 | // Only do this for legal fixed vector types. | ||||
16498 | if (!VecVT.isFixedLengthVector() || | ||||
16499 | !DAG.getTargetLoweringInfo().isTypeLegal(VecVT) || | ||||
16500 | !DAG.getTargetLoweringInfo().isTypeLegal(SubVT)) | ||||
16501 | return SDValue(); | ||||
16502 | |||||
16503 | // Ignore widening patterns. | ||||
16504 | if (IdxVal == 0 && Vec.isUndef()) | ||||
16505 | return SDValue(); | ||||
16506 | |||||
16507 | // Subvector must be half the width and an "aligned" insertion. | ||||
16508 | unsigned NumSubElts = SubVT.getVectorNumElements(); | ||||
16509 | if ((SubVT.getSizeInBits() * 2) != VecVT.getSizeInBits() || | ||||
16510 | (IdxVal != 0 && IdxVal != NumSubElts)) | ||||
16511 | return SDValue(); | ||||
16512 | |||||
16513 | // Fold insert_subvector -> concat_vectors | ||||
16514 | // insert_subvector(Vec,Sub,lo) -> concat_vectors(Sub,extract(Vec,hi)) | ||||
16515 | // insert_subvector(Vec,Sub,hi) -> concat_vectors(extract(Vec,lo),Sub) | ||||
16516 | SDValue Lo, Hi; | ||||
16517 | if (IdxVal == 0) { | ||||
16518 | Lo = SubVec; | ||||
16519 | Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec, | ||||
16520 | DAG.getVectorIdxConstant(NumSubElts, DL)); | ||||
16521 | } else { | ||||
16522 | Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec, | ||||
16523 | DAG.getVectorIdxConstant(0, DL)); | ||||
16524 | Hi = SubVec; | ||||
16525 | } | ||||
16526 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT, Lo, Hi); | ||||
16527 | } | ||||
16528 | |||||
16529 | static SDValue tryCombineFixedPointConvert(SDNode *N, | ||||
16530 | TargetLowering::DAGCombinerInfo &DCI, | ||||
16531 | SelectionDAG &DAG) { | ||||
16532 | // Wait until after everything is legalized to try this. That way we have | ||||
16533 | // legal vector types and such. | ||||
16534 | if (DCI.isBeforeLegalizeOps()) | ||||
16535 | return SDValue(); | ||||
16536 | // Transform a scalar conversion of a value from a lane extract into a | ||||
16537 | // lane extract of a vector conversion. E.g., from foo1 to foo2: | ||||
16538 | // double foo1(int64x2_t a) { return vcvtd_n_f64_s64(a[1], 9); } | ||||
16539 | // double foo2(int64x2_t a) { return vcvtq_n_f64_s64(a, 9)[1]; } | ||||
16540 | // | ||||
16541 | // The second form interacts better with instruction selection and the | ||||
16542 | // register allocator to avoid cross-class register copies that aren't | ||||
16543 | // coalescable due to a lane reference. | ||||
16544 | |||||
16545 | // Check the operand and see if it originates from a lane extract. | ||||
16546 | SDValue Op1 = N->getOperand(1); | ||||
16547 | if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT) | ||||
16548 | return SDValue(); | ||||
16549 | |||||
16550 | // Yep, no additional predication needed. Perform the transform. | ||||
16551 | SDValue IID = N->getOperand(0); | ||||
16552 | SDValue Shift = N->getOperand(2); | ||||
16553 | SDValue Vec = Op1.getOperand(0); | ||||
16554 | SDValue Lane = Op1.getOperand(1); | ||||
16555 | EVT ResTy = N->getValueType(0); | ||||
16556 | EVT VecResTy; | ||||
16557 | SDLoc DL(N); | ||||
16558 | |||||
16559 | // The vector width should be 128 bits by the time we get here, even | ||||
16560 | // if it started as 64 bits (the extract_vector handling will have | ||||
16561 | // done so). Bail if it is not. | ||||
16562 | if (Vec.getValueSizeInBits() != 128) | ||||
16563 | return SDValue(); | ||||
16564 | |||||
16565 | if (Vec.getValueType() == MVT::v4i32) | ||||
16566 | VecResTy = MVT::v4f32; | ||||
16567 | else if (Vec.getValueType() == MVT::v2i64) | ||||
16568 | VecResTy = MVT::v2f64; | ||||
16569 | else | ||||
16570 | return SDValue(); | ||||
16571 | |||||
16572 | SDValue Convert = | ||||
16573 | DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VecResTy, IID, Vec, Shift); | ||||
16574 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResTy, Convert, Lane); | ||||
16575 | } | ||||
16576 | |||||
16577 | // AArch64 high-vector "long" operations are formed by performing the non-high | ||||
16578 | // version on an extract_subvector of each operand which gets the high half: | ||||
16579 | // | ||||
16580 | // (longop2 LHS, RHS) == (longop (extract_high LHS), (extract_high RHS)) | ||||
16581 | // | ||||
16582 | // However, there are cases which don't have an extract_high explicitly, but | ||||
16583 | // have another operation that can be made compatible with one for free. For | ||||
16584 | // example: | ||||
16585 | // | ||||
16586 | // (dupv64 scalar) --> (extract_high (dup128 scalar)) | ||||
16587 | // | ||||
16588 | // This routine does the actual conversion of such DUPs, once outer routines | ||||
16589 | // have determined that everything else is in order. | ||||
16590 | // It also supports immediate DUP-like nodes (MOVI/MVNi), which we can fold | ||||
16591 | // similarly here. | ||||
16592 | static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG) { | ||||
16593 | MVT VT = N.getSimpleValueType(); | ||||
16594 | if (N.getOpcode() == ISD::EXTRACT_SUBVECTOR && | ||||
16595 | N.getConstantOperandVal(1) == 0) | ||||
16596 | N = N.getOperand(0); | ||||
16597 | |||||
16598 | switch (N.getOpcode()) { | ||||
16599 | case AArch64ISD::DUP: | ||||
16600 | case AArch64ISD::DUPLANE8: | ||||
16601 | case AArch64ISD::DUPLANE16: | ||||
16602 | case AArch64ISD::DUPLANE32: | ||||
16603 | case AArch64ISD::DUPLANE64: | ||||
16604 | case AArch64ISD::MOVI: | ||||
16605 | case AArch64ISD::MOVIshift: | ||||
16606 | case AArch64ISD::MOVIedit: | ||||
16607 | case AArch64ISD::MOVImsl: | ||||
16608 | case AArch64ISD::MVNIshift: | ||||
16609 | case AArch64ISD::MVNImsl: | ||||
16610 | break; | ||||
16611 | default: | ||||
16612 | // FMOV could be supported, but isn't very useful, as it would only occur | ||||
16613 | // if you passed a bitcast' floating point immediate to an eligible long | ||||
16614 | // integer op (addl, smull, ...). | ||||
16615 | return SDValue(); | ||||
16616 | } | ||||
16617 | |||||
16618 | if (!VT.is64BitVector()) | ||||
16619 | return SDValue(); | ||||
16620 | |||||
16621 | SDLoc DL(N); | ||||
16622 | unsigned NumElems = VT.getVectorNumElements(); | ||||
16623 | if (N.getValueType().is64BitVector()) { | ||||
16624 | MVT ElementTy = VT.getVectorElementType(); | ||||
16625 | MVT NewVT = MVT::getVectorVT(ElementTy, NumElems * 2); | ||||
16626 | N = DAG.getNode(N->getOpcode(), DL, NewVT, N->ops()); | ||||
16627 | } | ||||
16628 | |||||
16629 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, N, | ||||
16630 | DAG.getConstant(NumElems, DL, MVT::i64)); | ||||
16631 | } | ||||
16632 | |||||
16633 | static bool isEssentiallyExtractHighSubvector(SDValue N) { | ||||
16634 | if (N.getOpcode() == ISD::BITCAST) | ||||
16635 | N = N.getOperand(0); | ||||
16636 | if (N.getOpcode() != ISD::EXTRACT_SUBVECTOR) | ||||
16637 | return false; | ||||
16638 | if (N.getOperand(0).getValueType().isScalableVector()) | ||||
16639 | return false; | ||||
16640 | return cast<ConstantSDNode>(N.getOperand(1))->getAPIntValue() == | ||||
16641 | N.getOperand(0).getValueType().getVectorNumElements() / 2; | ||||
16642 | } | ||||
16643 | |||||
16644 | /// Helper structure to keep track of ISD::SET_CC operands. | ||||
16645 | struct GenericSetCCInfo { | ||||
16646 | const SDValue *Opnd0; | ||||
16647 | const SDValue *Opnd1; | ||||
16648 | ISD::CondCode CC; | ||||
16649 | }; | ||||
16650 | |||||
16651 | /// Helper structure to keep track of a SET_CC lowered into AArch64 code. | ||||
16652 | struct AArch64SetCCInfo { | ||||
16653 | const SDValue *Cmp; | ||||
16654 | AArch64CC::CondCode CC; | ||||
16655 | }; | ||||
16656 | |||||
16657 | /// Helper structure to keep track of SetCC information. | ||||
16658 | union SetCCInfo { | ||||
16659 | GenericSetCCInfo Generic; | ||||
16660 | AArch64SetCCInfo AArch64; | ||||
16661 | }; | ||||
16662 | |||||
16663 | /// Helper structure to be able to read SetCC information. If set to | ||||
16664 | /// true, IsAArch64 field, Info is a AArch64SetCCInfo, otherwise Info is a | ||||
16665 | /// GenericSetCCInfo. | ||||
16666 | struct SetCCInfoAndKind { | ||||
16667 | SetCCInfo Info; | ||||
16668 | bool IsAArch64; | ||||
16669 | }; | ||||
16670 | |||||
16671 | /// Check whether or not \p Op is a SET_CC operation, either a generic or | ||||
16672 | /// an | ||||
16673 | /// AArch64 lowered one. | ||||
16674 | /// \p SetCCInfo is filled accordingly. | ||||
16675 | /// \post SetCCInfo is meanginfull only when this function returns true. | ||||
16676 | /// \return True when Op is a kind of SET_CC operation. | ||||
16677 | static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo) { | ||||
16678 | // If this is a setcc, this is straight forward. | ||||
16679 | if (Op.getOpcode() == ISD::SETCC) { | ||||
16680 | SetCCInfo.Info.Generic.Opnd0 = &Op.getOperand(0); | ||||
16681 | SetCCInfo.Info.Generic.Opnd1 = &Op.getOperand(1); | ||||
16682 | SetCCInfo.Info.Generic.CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); | ||||
16683 | SetCCInfo.IsAArch64 = false; | ||||
16684 | return true; | ||||
16685 | } | ||||
16686 | // Otherwise, check if this is a matching csel instruction. | ||||
16687 | // In other words: | ||||
16688 | // - csel 1, 0, cc | ||||
16689 | // - csel 0, 1, !cc | ||||
16690 | if (Op.getOpcode() != AArch64ISD::CSEL) | ||||
16691 | return false; | ||||
16692 | // Set the information about the operands. | ||||
16693 | // TODO: we want the operands of the Cmp not the csel | ||||
16694 | SetCCInfo.Info.AArch64.Cmp = &Op.getOperand(3); | ||||
16695 | SetCCInfo.IsAArch64 = true; | ||||
16696 | SetCCInfo.Info.AArch64.CC = static_cast<AArch64CC::CondCode>( | ||||
16697 | cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); | ||||
16698 | |||||
16699 | // Check that the operands matches the constraints: | ||||
16700 | // (1) Both operands must be constants. | ||||
16701 | // (2) One must be 1 and the other must be 0. | ||||
16702 | ConstantSDNode *TValue = dyn_cast<ConstantSDNode>(Op.getOperand(0)); | ||||
16703 | ConstantSDNode *FValue = dyn_cast<ConstantSDNode>(Op.getOperand(1)); | ||||
16704 | |||||
16705 | // Check (1). | ||||
16706 | if (!TValue || !FValue) | ||||
16707 | return false; | ||||
16708 | |||||
16709 | // Check (2). | ||||
16710 | if (!TValue->isOne()) { | ||||
16711 | // Update the comparison when we are interested in !cc. | ||||
16712 | std::swap(TValue, FValue); | ||||
16713 | SetCCInfo.Info.AArch64.CC = | ||||
16714 | AArch64CC::getInvertedCondCode(SetCCInfo.Info.AArch64.CC); | ||||
16715 | } | ||||
16716 | return TValue->isOne() && FValue->isZero(); | ||||
16717 | } | ||||
16718 | |||||
16719 | // Returns true if Op is setcc or zext of setcc. | ||||
16720 | static bool isSetCCOrZExtSetCC(const SDValue& Op, SetCCInfoAndKind &Info) { | ||||
16721 | if (isSetCC(Op, Info)) | ||||
16722 | return true; | ||||
16723 | return ((Op.getOpcode() == ISD::ZERO_EXTEND) && | ||||
16724 | isSetCC(Op->getOperand(0), Info)); | ||||
16725 | } | ||||
16726 | |||||
16727 | // The folding we want to perform is: | ||||
16728 | // (add x, [zext] (setcc cc ...) ) | ||||
16729 | // --> | ||||
16730 | // (csel x, (add x, 1), !cc ...) | ||||
16731 | // | ||||
16732 | // The latter will get matched to a CSINC instruction. | ||||
16733 | static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG) { | ||||
16734 | assert(Op && Op->getOpcode() == ISD::ADD && "Unexpected operation!")(static_cast <bool> (Op && Op->getOpcode() == ISD::ADD && "Unexpected operation!") ? void (0) : __assert_fail ("Op && Op->getOpcode() == ISD::ADD && \"Unexpected operation!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 16734, __extension__ __PRETTY_FUNCTION__)); | ||||
16735 | SDValue LHS = Op->getOperand(0); | ||||
16736 | SDValue RHS = Op->getOperand(1); | ||||
16737 | SetCCInfoAndKind InfoAndKind; | ||||
16738 | |||||
16739 | // If both operands are a SET_CC, then we don't want to perform this | ||||
16740 | // folding and create another csel as this results in more instructions | ||||
16741 | // (and higher register usage). | ||||
16742 | if (isSetCCOrZExtSetCC(LHS, InfoAndKind) && | ||||
16743 | isSetCCOrZExtSetCC(RHS, InfoAndKind)) | ||||
16744 | return SDValue(); | ||||
16745 | |||||
16746 | // If neither operand is a SET_CC, give up. | ||||
16747 | if (!isSetCCOrZExtSetCC(LHS, InfoAndKind)) { | ||||
16748 | std::swap(LHS, RHS); | ||||
16749 | if (!isSetCCOrZExtSetCC(LHS, InfoAndKind)) | ||||
16750 | return SDValue(); | ||||
16751 | } | ||||
16752 | |||||
16753 | // FIXME: This could be generatized to work for FP comparisons. | ||||
16754 | EVT CmpVT = InfoAndKind.IsAArch64 | ||||
16755 | ? InfoAndKind.Info.AArch64.Cmp->getOperand(0).getValueType() | ||||
16756 | : InfoAndKind.Info.Generic.Opnd0->getValueType(); | ||||
16757 | if (CmpVT != MVT::i32 && CmpVT != MVT::i64) | ||||
16758 | return SDValue(); | ||||
16759 | |||||
16760 | SDValue CCVal; | ||||
16761 | SDValue Cmp; | ||||
16762 | SDLoc dl(Op); | ||||
16763 | if (InfoAndKind.IsAArch64) { | ||||
16764 | CCVal = DAG.getConstant( | ||||
16765 | AArch64CC::getInvertedCondCode(InfoAndKind.Info.AArch64.CC), dl, | ||||
16766 | MVT::i32); | ||||
16767 | Cmp = *InfoAndKind.Info.AArch64.Cmp; | ||||
16768 | } else | ||||
16769 | Cmp = getAArch64Cmp( | ||||
16770 | *InfoAndKind.Info.Generic.Opnd0, *InfoAndKind.Info.Generic.Opnd1, | ||||
16771 | ISD::getSetCCInverse(InfoAndKind.Info.Generic.CC, CmpVT), CCVal, DAG, | ||||
16772 | dl); | ||||
16773 | |||||
16774 | EVT VT = Op->getValueType(0); | ||||
16775 | LHS = DAG.getNode(ISD::ADD, dl, VT, RHS, DAG.getConstant(1, dl, VT)); | ||||
16776 | return DAG.getNode(AArch64ISD::CSEL, dl, VT, RHS, LHS, CCVal, Cmp); | ||||
16777 | } | ||||
16778 | |||||
16779 | // ADD(UADDV a, UADDV b) --> UADDV(ADD a, b) | ||||
16780 | static SDValue performAddUADDVCombine(SDNode *N, SelectionDAG &DAG) { | ||||
16781 | EVT VT = N->getValueType(0); | ||||
16782 | // Only scalar integer and vector types. | ||||
16783 | if (N->getOpcode() != ISD::ADD || !VT.isScalarInteger()) | ||||
16784 | return SDValue(); | ||||
16785 | |||||
16786 | SDValue LHS = N->getOperand(0); | ||||
16787 | SDValue RHS = N->getOperand(1); | ||||
16788 | if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | ||||
16789 | RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT || LHS.getValueType() != VT) | ||||
16790 | return SDValue(); | ||||
16791 | |||||
16792 | auto *LHSN1 = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); | ||||
16793 | auto *RHSN1 = dyn_cast<ConstantSDNode>(RHS->getOperand(1)); | ||||
16794 | if (!LHSN1 || LHSN1 != RHSN1 || !RHSN1->isZero()) | ||||
16795 | return SDValue(); | ||||
16796 | |||||
16797 | SDValue Op1 = LHS->getOperand(0); | ||||
16798 | SDValue Op2 = RHS->getOperand(0); | ||||
16799 | EVT OpVT1 = Op1.getValueType(); | ||||
16800 | EVT OpVT2 = Op2.getValueType(); | ||||
16801 | if (Op1.getOpcode() != AArch64ISD::UADDV || OpVT1 != OpVT2 || | ||||
16802 | Op2.getOpcode() != AArch64ISD::UADDV || | ||||
16803 | OpVT1.getVectorElementType() != VT) | ||||
16804 | return SDValue(); | ||||
16805 | |||||
16806 | SDValue Val1 = Op1.getOperand(0); | ||||
16807 | SDValue Val2 = Op2.getOperand(0); | ||||
16808 | EVT ValVT = Val1->getValueType(0); | ||||
16809 | SDLoc DL(N); | ||||
16810 | SDValue AddVal = DAG.getNode(ISD::ADD, DL, ValVT, Val1, Val2); | ||||
16811 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, | ||||
16812 | DAG.getNode(AArch64ISD::UADDV, DL, ValVT, AddVal), | ||||
16813 | DAG.getConstant(0, DL, MVT::i64)); | ||||
16814 | } | ||||
16815 | |||||
16816 | /// Perform the scalar expression combine in the form of: | ||||
16817 | /// CSEL(c, 1, cc) + b => CSINC(b+c, b, cc) | ||||
16818 | /// CSNEG(c, -1, cc) + b => CSINC(b+c, b, cc) | ||||
16819 | static SDValue performAddCSelIntoCSinc(SDNode *N, SelectionDAG &DAG) { | ||||
16820 | EVT VT = N->getValueType(0); | ||||
16821 | if (!VT.isScalarInteger() || N->getOpcode() != ISD::ADD) | ||||
16822 | return SDValue(); | ||||
16823 | |||||
16824 | SDValue LHS = N->getOperand(0); | ||||
16825 | SDValue RHS = N->getOperand(1); | ||||
16826 | |||||
16827 | // Handle commutivity. | ||||
16828 | if (LHS.getOpcode() != AArch64ISD::CSEL && | ||||
16829 | LHS.getOpcode() != AArch64ISD::CSNEG) { | ||||
16830 | std::swap(LHS, RHS); | ||||
16831 | if (LHS.getOpcode() != AArch64ISD::CSEL && | ||||
16832 | LHS.getOpcode() != AArch64ISD::CSNEG) { | ||||
16833 | return SDValue(); | ||||
16834 | } | ||||
16835 | } | ||||
16836 | |||||
16837 | if (!LHS.hasOneUse()) | ||||
16838 | return SDValue(); | ||||
16839 | |||||
16840 | AArch64CC::CondCode AArch64CC = | ||||
16841 | static_cast<AArch64CC::CondCode>(LHS.getConstantOperandVal(2)); | ||||
16842 | |||||
16843 | // The CSEL should include a const one operand, and the CSNEG should include | ||||
16844 | // One or NegOne operand. | ||||
16845 | ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(LHS.getOperand(0)); | ||||
16846 | ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); | ||||
16847 | if (!CTVal || !CFVal) | ||||
16848 | return SDValue(); | ||||
16849 | |||||
16850 | if (!(LHS.getOpcode() == AArch64ISD::CSEL && | ||||
16851 | (CTVal->isOne() || CFVal->isOne())) && | ||||
16852 | !(LHS.getOpcode() == AArch64ISD::CSNEG && | ||||
16853 | (CTVal->isOne() || CFVal->isAllOnes()))) | ||||
16854 | return SDValue(); | ||||
16855 | |||||
16856 | // Switch CSEL(1, c, cc) to CSEL(c, 1, !cc) | ||||
16857 | if (LHS.getOpcode() == AArch64ISD::CSEL && CTVal->isOne() && | ||||
16858 | !CFVal->isOne()) { | ||||
16859 | std::swap(CTVal, CFVal); | ||||
16860 | AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC); | ||||
16861 | } | ||||
16862 | |||||
16863 | SDLoc DL(N); | ||||
16864 | // Switch CSNEG(1, c, cc) to CSNEG(-c, -1, !cc) | ||||
16865 | if (LHS.getOpcode() == AArch64ISD::CSNEG && CTVal->isOne() && | ||||
16866 | !CFVal->isAllOnes()) { | ||||
16867 | APInt C = -1 * CFVal->getAPIntValue(); | ||||
16868 | CTVal = cast<ConstantSDNode>(DAG.getConstant(C, DL, VT)); | ||||
16869 | CFVal = cast<ConstantSDNode>(DAG.getAllOnesConstant(DL, VT)); | ||||
16870 | AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC); | ||||
16871 | } | ||||
16872 | |||||
16873 | // It might be neutral for larger constants, as the immediate need to be | ||||
16874 | // materialized in a register. | ||||
16875 | APInt ADDC = CTVal->getAPIntValue(); | ||||
16876 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | ||||
16877 | if (!TLI.isLegalAddImmediate(ADDC.getSExtValue())) | ||||
16878 | return SDValue(); | ||||
16879 | |||||
16880 | assert(((LHS.getOpcode() == AArch64ISD::CSEL && CFVal->isOne()) ||(static_cast <bool> (((LHS.getOpcode() == AArch64ISD::CSEL && CFVal->isOne()) || (LHS.getOpcode() == AArch64ISD ::CSNEG && CFVal->isAllOnes())) && "Unexpected constant value" ) ? void (0) : __assert_fail ("((LHS.getOpcode() == AArch64ISD::CSEL && CFVal->isOne()) || (LHS.getOpcode() == AArch64ISD::CSNEG && CFVal->isAllOnes())) && \"Unexpected constant value\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 16882, __extension__ __PRETTY_FUNCTION__)) | ||||
16881 | (LHS.getOpcode() == AArch64ISD::CSNEG && CFVal->isAllOnes())) &&(static_cast <bool> (((LHS.getOpcode() == AArch64ISD::CSEL && CFVal->isOne()) || (LHS.getOpcode() == AArch64ISD ::CSNEG && CFVal->isAllOnes())) && "Unexpected constant value" ) ? void (0) : __assert_fail ("((LHS.getOpcode() == AArch64ISD::CSEL && CFVal->isOne()) || (LHS.getOpcode() == AArch64ISD::CSNEG && CFVal->isAllOnes())) && \"Unexpected constant value\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 16882, __extension__ __PRETTY_FUNCTION__)) | ||||
16882 | "Unexpected constant value")(static_cast <bool> (((LHS.getOpcode() == AArch64ISD::CSEL && CFVal->isOne()) || (LHS.getOpcode() == AArch64ISD ::CSNEG && CFVal->isAllOnes())) && "Unexpected constant value" ) ? void (0) : __assert_fail ("((LHS.getOpcode() == AArch64ISD::CSEL && CFVal->isOne()) || (LHS.getOpcode() == AArch64ISD::CSNEG && CFVal->isAllOnes())) && \"Unexpected constant value\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 16882, __extension__ __PRETTY_FUNCTION__)); | ||||
16883 | |||||
16884 | SDValue NewNode = DAG.getNode(ISD::ADD, DL, VT, RHS, SDValue(CTVal, 0)); | ||||
16885 | SDValue CCVal = DAG.getConstant(AArch64CC, DL, MVT::i32); | ||||
16886 | SDValue Cmp = LHS.getOperand(3); | ||||
16887 | |||||
16888 | return DAG.getNode(AArch64ISD::CSINC, DL, VT, NewNode, RHS, CCVal, Cmp); | ||||
16889 | } | ||||
16890 | |||||
16891 | // ADD(UDOT(zero, x, y), A) --> UDOT(A, x, y) | ||||
16892 | static SDValue performAddDotCombine(SDNode *N, SelectionDAG &DAG) { | ||||
16893 | EVT VT = N->getValueType(0); | ||||
16894 | if (N->getOpcode() != ISD::ADD) | ||||
16895 | return SDValue(); | ||||
16896 | |||||
16897 | SDValue Dot = N->getOperand(0); | ||||
16898 | SDValue A = N->getOperand(1); | ||||
16899 | // Handle commutivity | ||||
16900 | auto isZeroDot = [](SDValue Dot) { | ||||
16901 | return (Dot.getOpcode() == AArch64ISD::UDOT || | ||||
16902 | Dot.getOpcode() == AArch64ISD::SDOT) && | ||||
16903 | isZerosVector(Dot.getOperand(0).getNode()); | ||||
16904 | }; | ||||
16905 | if (!isZeroDot(Dot)) | ||||
16906 | std::swap(Dot, A); | ||||
16907 | if (!isZeroDot(Dot)) | ||||
16908 | return SDValue(); | ||||
16909 | |||||
16910 | return DAG.getNode(Dot.getOpcode(), SDLoc(N), VT, A, Dot.getOperand(1), | ||||
16911 | Dot.getOperand(2)); | ||||
16912 | } | ||||
16913 | |||||
16914 | static bool isNegatedInteger(SDValue Op) { | ||||
16915 | return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0)); | ||||
16916 | } | ||||
16917 | |||||
16918 | static SDValue getNegatedInteger(SDValue Op, SelectionDAG &DAG) { | ||||
16919 | SDLoc DL(Op); | ||||
16920 | EVT VT = Op.getValueType(); | ||||
16921 | SDValue Zero = DAG.getConstant(0, DL, VT); | ||||
16922 | return DAG.getNode(ISD::SUB, DL, VT, Zero, Op); | ||||
16923 | } | ||||
16924 | |||||
16925 | // Try to fold | ||||
16926 | // | ||||
16927 | // (neg (csel X, Y)) -> (csel (neg X), (neg Y)) | ||||
16928 | // | ||||
16929 | // The folding helps csel to be matched with csneg without generating | ||||
16930 | // redundant neg instruction, which includes negation of the csel expansion | ||||
16931 | // of abs node lowered by lowerABS. | ||||
16932 | static SDValue performNegCSelCombine(SDNode *N, SelectionDAG &DAG) { | ||||
16933 | if (!isNegatedInteger(SDValue(N, 0))) | ||||
16934 | return SDValue(); | ||||
16935 | |||||
16936 | SDValue CSel = N->getOperand(1); | ||||
16937 | if (CSel.getOpcode() != AArch64ISD::CSEL || !CSel->hasOneUse()) | ||||
16938 | return SDValue(); | ||||
16939 | |||||
16940 | SDValue N0 = CSel.getOperand(0); | ||||
16941 | SDValue N1 = CSel.getOperand(1); | ||||
16942 | |||||
16943 | // If both of them is not negations, it's not worth the folding as it | ||||
16944 | // introduces two additional negations while reducing one negation. | ||||
16945 | if (!isNegatedInteger(N0) && !isNegatedInteger(N1)) | ||||
16946 | return SDValue(); | ||||
16947 | |||||
16948 | SDValue N0N = getNegatedInteger(N0, DAG); | ||||
16949 | SDValue N1N = getNegatedInteger(N1, DAG); | ||||
16950 | |||||
16951 | SDLoc DL(N); | ||||
16952 | EVT VT = CSel.getValueType(); | ||||
16953 | return DAG.getNode(AArch64ISD::CSEL, DL, VT, N0N, N1N, CSel.getOperand(2), | ||||
16954 | CSel.getOperand(3)); | ||||
16955 | } | ||||
16956 | |||||
16957 | // The basic add/sub long vector instructions have variants with "2" on the end | ||||
16958 | // which act on the high-half of their inputs. They are normally matched by | ||||
16959 | // patterns like: | ||||
16960 | // | ||||
16961 | // (add (zeroext (extract_high LHS)), | ||||
16962 | // (zeroext (extract_high RHS))) | ||||
16963 | // -> uaddl2 vD, vN, vM | ||||
16964 | // | ||||
16965 | // However, if one of the extracts is something like a duplicate, this | ||||
16966 | // instruction can still be used profitably. This function puts the DAG into a | ||||
16967 | // more appropriate form for those patterns to trigger. | ||||
16968 | static SDValue performAddSubLongCombine(SDNode *N, | ||||
16969 | TargetLowering::DAGCombinerInfo &DCI, | ||||
16970 | SelectionDAG &DAG) { | ||||
16971 | if (DCI.isBeforeLegalizeOps()) | ||||
16972 | return SDValue(); | ||||
16973 | |||||
16974 | MVT VT = N->getSimpleValueType(0); | ||||
16975 | if (!VT.is128BitVector()) { | ||||
16976 | if (N->getOpcode() == ISD::ADD) | ||||
16977 | return performSetccAddFolding(N, DAG); | ||||
16978 | return SDValue(); | ||||
16979 | } | ||||
16980 | |||||
16981 | // Make sure both branches are extended in the same way. | ||||
16982 | SDValue LHS = N->getOperand(0); | ||||
16983 | SDValue RHS = N->getOperand(1); | ||||
16984 | if ((LHS.getOpcode() != ISD::ZERO_EXTEND && | ||||
16985 | LHS.getOpcode() != ISD::SIGN_EXTEND) || | ||||
16986 | LHS.getOpcode() != RHS.getOpcode()) | ||||
16987 | return SDValue(); | ||||
16988 | |||||
16989 | unsigned ExtType = LHS.getOpcode(); | ||||
16990 | |||||
16991 | // It's not worth doing if at least one of the inputs isn't already an | ||||
16992 | // extract, but we don't know which it'll be so we have to try both. | ||||
16993 | if (isEssentiallyExtractHighSubvector(LHS.getOperand(0))) { | ||||
16994 | RHS = tryExtendDUPToExtractHigh(RHS.getOperand(0), DAG); | ||||
16995 | if (!RHS.getNode()) | ||||
16996 | return SDValue(); | ||||
16997 | |||||
16998 | RHS = DAG.getNode(ExtType, SDLoc(N), VT, RHS); | ||||
16999 | } else if (isEssentiallyExtractHighSubvector(RHS.getOperand(0))) { | ||||
17000 | LHS = tryExtendDUPToExtractHigh(LHS.getOperand(0), DAG); | ||||
17001 | if (!LHS.getNode()) | ||||
17002 | return SDValue(); | ||||
17003 | |||||
17004 | LHS = DAG.getNode(ExtType, SDLoc(N), VT, LHS); | ||||
17005 | } | ||||
17006 | |||||
17007 | return DAG.getNode(N->getOpcode(), SDLoc(N), VT, LHS, RHS); | ||||
17008 | } | ||||
17009 | |||||
17010 | static bool isCMP(SDValue Op) { | ||||
17011 | return Op.getOpcode() == AArch64ISD::SUBS && | ||||
17012 | !Op.getNode()->hasAnyUseOfValue(0); | ||||
17013 | } | ||||
17014 | |||||
17015 | // (CSEL 1 0 CC Cond) => CC | ||||
17016 | // (CSEL 0 1 CC Cond) => !CC | ||||
17017 | static std::optional<AArch64CC::CondCode> getCSETCondCode(SDValue Op) { | ||||
17018 | if (Op.getOpcode() != AArch64ISD::CSEL) | ||||
17019 | return std::nullopt; | ||||
17020 | auto CC = static_cast<AArch64CC::CondCode>(Op.getConstantOperandVal(2)); | ||||
17021 | if (CC == AArch64CC::AL || CC == AArch64CC::NV) | ||||
17022 | return std::nullopt; | ||||
17023 | SDValue OpLHS = Op.getOperand(0); | ||||
17024 | SDValue OpRHS = Op.getOperand(1); | ||||
17025 | if (isOneConstant(OpLHS) && isNullConstant(OpRHS)) | ||||
17026 | return CC; | ||||
17027 | if (isNullConstant(OpLHS) && isOneConstant(OpRHS)) | ||||
17028 | return getInvertedCondCode(CC); | ||||
17029 | |||||
17030 | return std::nullopt; | ||||
17031 | } | ||||
17032 | |||||
17033 | // (ADC{S} l r (CMP (CSET HS carry) 1)) => (ADC{S} l r carry) | ||||
17034 | // (SBC{S} l r (CMP 0 (CSET LO carry))) => (SBC{S} l r carry) | ||||
17035 | static SDValue foldOverflowCheck(SDNode *Op, SelectionDAG &DAG, bool IsAdd) { | ||||
17036 | SDValue CmpOp = Op->getOperand(2); | ||||
17037 | if (!isCMP(CmpOp)) | ||||
17038 | return SDValue(); | ||||
17039 | |||||
17040 | if (IsAdd) { | ||||
17041 | if (!isOneConstant(CmpOp.getOperand(1))) | ||||
17042 | return SDValue(); | ||||
17043 | } else { | ||||
17044 | if (!isNullConstant(CmpOp.getOperand(0))) | ||||
17045 | return SDValue(); | ||||
17046 | } | ||||
17047 | |||||
17048 | SDValue CsetOp = CmpOp->getOperand(IsAdd ? 0 : 1); | ||||
17049 | auto CC = getCSETCondCode(CsetOp); | ||||
17050 | if (CC != (IsAdd ? AArch64CC::HS : AArch64CC::LO)) | ||||
17051 | return SDValue(); | ||||
17052 | |||||
17053 | return DAG.getNode(Op->getOpcode(), SDLoc(Op), Op->getVTList(), | ||||
17054 | Op->getOperand(0), Op->getOperand(1), | ||||
17055 | CsetOp.getOperand(3)); | ||||
17056 | } | ||||
17057 | |||||
17058 | // (ADC x 0 cond) => (CINC x HS cond) | ||||
17059 | static SDValue foldADCToCINC(SDNode *N, SelectionDAG &DAG) { | ||||
17060 | SDValue LHS = N->getOperand(0); | ||||
17061 | SDValue RHS = N->getOperand(1); | ||||
17062 | SDValue Cond = N->getOperand(2); | ||||
17063 | |||||
17064 | if (!isNullConstant(RHS)) | ||||
17065 | return SDValue(); | ||||
17066 | |||||
17067 | EVT VT = N->getValueType(0); | ||||
17068 | SDLoc DL(N); | ||||
17069 | |||||
17070 | // (CINC x cc cond) <=> (CSINC x x !cc cond) | ||||
17071 | SDValue CC = DAG.getConstant(AArch64CC::LO, DL, MVT::i32); | ||||
17072 | return DAG.getNode(AArch64ISD::CSINC, DL, VT, LHS, LHS, CC, Cond); | ||||
17073 | } | ||||
17074 | |||||
17075 | // Transform vector add(zext i8 to i32, zext i8 to i32) | ||||
17076 | // into sext(add(zext(i8 to i16), zext(i8 to i16)) to i32) | ||||
17077 | // This allows extra uses of saddl/uaddl at the lower vector widths, and less | ||||
17078 | // extends. | ||||
17079 | static SDValue performVectorAddSubExtCombine(SDNode *N, SelectionDAG &DAG) { | ||||
17080 | EVT VT = N->getValueType(0); | ||||
17081 | if (!VT.isFixedLengthVector() || VT.getSizeInBits() <= 128 || | ||||
17082 | (N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && | ||||
17083 | N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND) || | ||||
17084 | (N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && | ||||
17085 | N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND) || | ||||
17086 | N->getOperand(0).getOperand(0).getValueType() != | ||||
17087 | N->getOperand(1).getOperand(0).getValueType()) | ||||
17088 | return SDValue(); | ||||
17089 | |||||
17090 | SDValue N0 = N->getOperand(0).getOperand(0); | ||||
17091 | SDValue N1 = N->getOperand(1).getOperand(0); | ||||
17092 | EVT InVT = N0.getValueType(); | ||||
17093 | |||||
17094 | EVT S1 = InVT.getScalarType(); | ||||
17095 | EVT S2 = VT.getScalarType(); | ||||
17096 | if ((S2 == MVT::i32 && S1 == MVT::i8) || | ||||
17097 | (S2 == MVT::i64 && (S1 == MVT::i8 || S1 == MVT::i16))) { | ||||
17098 | SDLoc DL(N); | ||||
17099 | EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), | ||||
17100 | S2.getHalfSizedIntegerVT(*DAG.getContext()), | ||||
17101 | VT.getVectorElementCount()); | ||||
17102 | SDValue NewN0 = DAG.getNode(N->getOperand(0).getOpcode(), DL, HalfVT, N0); | ||||
17103 | SDValue NewN1 = DAG.getNode(N->getOperand(1).getOpcode(), DL, HalfVT, N1); | ||||
17104 | SDValue NewOp = DAG.getNode(N->getOpcode(), DL, HalfVT, NewN0, NewN1); | ||||
17105 | return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, NewOp); | ||||
17106 | } | ||||
17107 | return SDValue(); | ||||
17108 | } | ||||
17109 | |||||
17110 | static SDValue performBuildVectorCombine(SDNode *N, | ||||
17111 | TargetLowering::DAGCombinerInfo &DCI, | ||||
17112 | SelectionDAG &DAG) { | ||||
17113 | SDLoc DL(N); | ||||
17114 | EVT VT = N->getValueType(0); | ||||
17115 | |||||
17116 | // A build vector of two extracted elements is equivalent to an | ||||
17117 | // extract subvector where the inner vector is any-extended to the | ||||
17118 | // extract_vector_elt VT. | ||||
17119 | // (build_vector (extract_elt_iXX_to_i32 vec Idx+0) | ||||
17120 | // (extract_elt_iXX_to_i32 vec Idx+1)) | ||||
17121 | // => (extract_subvector (anyext_iXX_to_i32 vec) Idx) | ||||
17122 | |||||
17123 | // For now, only consider the v2i32 case, which arises as a result of | ||||
17124 | // legalization. | ||||
17125 | if (VT != MVT::v2i32) | ||||
17126 | return SDValue(); | ||||
17127 | |||||
17128 | SDValue Elt0 = N->getOperand(0), Elt1 = N->getOperand(1); | ||||
17129 | // Reminder, EXTRACT_VECTOR_ELT has the effect of any-extending to its VT. | ||||
17130 | if (Elt0->getOpcode() == ISD::EXTRACT_VECTOR_ELT && | ||||
17131 | Elt1->getOpcode() == ISD::EXTRACT_VECTOR_ELT && | ||||
17132 | // Constant index. | ||||
17133 | isa<ConstantSDNode>(Elt0->getOperand(1)) && | ||||
17134 | isa<ConstantSDNode>(Elt1->getOperand(1)) && | ||||
17135 | // Both EXTRACT_VECTOR_ELT from same vector... | ||||
17136 | Elt0->getOperand(0) == Elt1->getOperand(0) && | ||||
17137 | // ... and contiguous. First element's index +1 == second element's index. | ||||
17138 | Elt0->getConstantOperandVal(1) + 1 == Elt1->getConstantOperandVal(1) && | ||||
17139 | // EXTRACT_SUBVECTOR requires that Idx be a constant multiple of | ||||
17140 | // ResultType's known minimum vector length. | ||||
17141 | Elt0->getConstantOperandVal(1) % VT.getVectorMinNumElements() == 0) { | ||||
17142 | SDValue VecToExtend = Elt0->getOperand(0); | ||||
17143 | EVT ExtVT = VecToExtend.getValueType().changeVectorElementType(MVT::i32); | ||||
17144 | if (!DAG.getTargetLoweringInfo().isTypeLegal(ExtVT)) | ||||
17145 | return SDValue(); | ||||
17146 | |||||
17147 | SDValue SubvectorIdx = DAG.getVectorIdxConstant(Elt0->getConstantOperandVal(1), DL); | ||||
17148 | |||||
17149 | SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, DL, ExtVT, VecToExtend); | ||||
17150 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Ext, | ||||
17151 | SubvectorIdx); | ||||
17152 | } | ||||
17153 | |||||
17154 | return SDValue(); | ||||
17155 | } | ||||
17156 | |||||
17157 | // Check an node is an extend or shift operand | ||||
17158 | static bool isExtendOrShiftOperand(SDValue N) { | ||||
17159 | unsigned Opcode = N.getOpcode(); | ||||
17160 | if (Opcode == ISD::SIGN_EXTEND || Opcode == ISD::SIGN_EXTEND_INREG || | ||||
17161 | Opcode == ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND) { | ||||
17162 | EVT SrcVT; | ||||
17163 | if (Opcode == ISD::SIGN_EXTEND_INREG) | ||||
17164 | SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT(); | ||||
17165 | else | ||||
17166 | SrcVT = N.getOperand(0).getValueType(); | ||||
17167 | |||||
17168 | return SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8; | ||||
17169 | } else if (Opcode == ISD::AND) { | ||||
17170 | ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1)); | ||||
17171 | if (!CSD) | ||||
17172 | return false; | ||||
17173 | uint64_t AndMask = CSD->getZExtValue(); | ||||
17174 | return AndMask == 0xff || AndMask == 0xffff || AndMask == 0xffffffff; | ||||
17175 | } else if (Opcode == ISD::SHL || Opcode == ISD::SRL || Opcode == ISD::SRA) { | ||||
17176 | return isa<ConstantSDNode>(N.getOperand(1)); | ||||
17177 | } | ||||
17178 | |||||
17179 | return false; | ||||
17180 | } | ||||
17181 | |||||
17182 | // (N - Y) + Z --> (Z - Y) + N | ||||
17183 | // when N is an extend or shift operand | ||||
17184 | static SDValue performAddCombineSubShift(SDNode *N, SDValue SUB, SDValue Z, | ||||
17185 | SelectionDAG &DAG) { | ||||
17186 | auto IsOneUseExtend = [](SDValue N) { | ||||
17187 | return N.hasOneUse() && isExtendOrShiftOperand(N); | ||||
17188 | }; | ||||
17189 | |||||
17190 | // DAGCombiner will revert the combination when Z is constant cause | ||||
17191 | // dead loop. So don't enable the combination when Z is constant. | ||||
17192 | // If Z is one use shift C, we also can't do the optimization. | ||||
17193 | // It will falling to self infinite loop. | ||||
17194 | if (isa<ConstantSDNode>(Z) || IsOneUseExtend(Z)) | ||||
17195 | return SDValue(); | ||||
17196 | |||||
17197 | if (SUB.getOpcode() != ISD::SUB || !SUB.hasOneUse()) | ||||
17198 | return SDValue(); | ||||
17199 | |||||
17200 | SDValue Shift = SUB.getOperand(0); | ||||
17201 | if (!IsOneUseExtend(Shift)) | ||||
17202 | return SDValue(); | ||||
17203 | |||||
17204 | SDLoc DL(N); | ||||
17205 | EVT VT = N->getValueType(0); | ||||
17206 | |||||
17207 | SDValue Y = SUB.getOperand(1); | ||||
17208 | SDValue NewSub = DAG.getNode(ISD::SUB, DL, VT, Z, Y); | ||||
17209 | return DAG.getNode(ISD::ADD, DL, VT, NewSub, Shift); | ||||
17210 | } | ||||
17211 | |||||
17212 | static SDValue performAddCombineForShiftedOperands(SDNode *N, | ||||
17213 | SelectionDAG &DAG) { | ||||
17214 | // NOTE: Swapping LHS and RHS is not done for SUB, since SUB is not | ||||
17215 | // commutative. | ||||
17216 | if (N->getOpcode() != ISD::ADD) | ||||
17217 | return SDValue(); | ||||
17218 | |||||
17219 | // Bail out when value type is not one of {i32, i64}, since AArch64 ADD with | ||||
17220 | // shifted register is only available for i32 and i64. | ||||
17221 | EVT VT = N->getValueType(0); | ||||
17222 | if (VT != MVT::i32 && VT != MVT::i64) | ||||
17223 | return SDValue(); | ||||
17224 | |||||
17225 | SDLoc DL(N); | ||||
17226 | SDValue LHS = N->getOperand(0); | ||||
17227 | SDValue RHS = N->getOperand(1); | ||||
17228 | |||||
17229 | if (SDValue Val = performAddCombineSubShift(N, LHS, RHS, DAG)) | ||||
17230 | return Val; | ||||
17231 | if (SDValue Val = performAddCombineSubShift(N, RHS, LHS, DAG)) | ||||
17232 | return Val; | ||||
17233 | |||||
17234 | uint64_t LHSImm = 0, RHSImm = 0; | ||||
17235 | // If both operand are shifted by imm and shift amount is not greater than 4 | ||||
17236 | // for one operand, swap LHS and RHS to put operand with smaller shift amount | ||||
17237 | // on RHS. | ||||
17238 | // | ||||
17239 | // On many AArch64 processors (Cortex A78, Neoverse N1/N2/V1, etc), ADD with | ||||
17240 | // LSL shift (shift <= 4) has smaller latency and larger throughput than ADD | ||||
17241 | // with LSL (shift > 4). For the rest of processors, this is no-op for | ||||
17242 | // performance or correctness. | ||||
17243 | if (isOpcWithIntImmediate(LHS.getNode(), ISD::SHL, LHSImm) && | ||||
17244 | isOpcWithIntImmediate(RHS.getNode(), ISD::SHL, RHSImm) && LHSImm <= 4 && | ||||
17245 | RHSImm > 4 && LHS.hasOneUse()) | ||||
17246 | return DAG.getNode(ISD::ADD, DL, VT, RHS, LHS); | ||||
17247 | |||||
17248 | return SDValue(); | ||||
17249 | } | ||||
17250 | |||||
17251 | static SDValue performAddSubCombine(SDNode *N, | ||||
17252 | TargetLowering::DAGCombinerInfo &DCI, | ||||
17253 | SelectionDAG &DAG) { | ||||
17254 | // Try to change sum of two reductions. | ||||
17255 | if (SDValue Val = performAddUADDVCombine(N, DAG)) | ||||
17256 | return Val; | ||||
17257 | if (SDValue Val = performAddDotCombine(N, DAG)) | ||||
17258 | return Val; | ||||
17259 | if (SDValue Val = performAddCSelIntoCSinc(N, DAG)) | ||||
17260 | return Val; | ||||
17261 | if (SDValue Val = performNegCSelCombine(N, DAG)) | ||||
17262 | return Val; | ||||
17263 | if (SDValue Val = performVectorAddSubExtCombine(N, DAG)) | ||||
17264 | return Val; | ||||
17265 | if (SDValue Val = performAddCombineForShiftedOperands(N, DAG)) | ||||
17266 | return Val; | ||||
17267 | |||||
17268 | return performAddSubLongCombine(N, DCI, DAG); | ||||
17269 | } | ||||
17270 | |||||
17271 | // Massage DAGs which we can use the high-half "long" operations on into | ||||
17272 | // something isel will recognize better. E.g. | ||||
17273 | // | ||||
17274 | // (aarch64_neon_umull (extract_high vec) (dupv64 scalar)) --> | ||||
17275 | // (aarch64_neon_umull (extract_high (v2i64 vec))) | ||||
17276 | // (extract_high (v2i64 (dup128 scalar))))) | ||||
17277 | // | ||||
17278 | static SDValue tryCombineLongOpWithDup(unsigned IID, SDNode *N, | ||||
17279 | TargetLowering::DAGCombinerInfo &DCI, | ||||
17280 | SelectionDAG &DAG) { | ||||
17281 | if (DCI.isBeforeLegalizeOps()) | ||||
17282 | return SDValue(); | ||||
17283 | |||||
17284 | SDValue LHS = N->getOperand((IID == Intrinsic::not_intrinsic) ? 0 : 1); | ||||
17285 | SDValue RHS = N->getOperand((IID == Intrinsic::not_intrinsic) ? 1 : 2); | ||||
17286 | assert(LHS.getValueType().is64BitVector() &&(static_cast <bool> (LHS.getValueType().is64BitVector() && RHS.getValueType().is64BitVector() && "unexpected shape for long operation" ) ? void (0) : __assert_fail ("LHS.getValueType().is64BitVector() && RHS.getValueType().is64BitVector() && \"unexpected shape for long operation\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 17288, __extension__ __PRETTY_FUNCTION__)) | ||||
17287 | RHS.getValueType().is64BitVector() &&(static_cast <bool> (LHS.getValueType().is64BitVector() && RHS.getValueType().is64BitVector() && "unexpected shape for long operation" ) ? void (0) : __assert_fail ("LHS.getValueType().is64BitVector() && RHS.getValueType().is64BitVector() && \"unexpected shape for long operation\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 17288, __extension__ __PRETTY_FUNCTION__)) | ||||
17288 | "unexpected shape for long operation")(static_cast <bool> (LHS.getValueType().is64BitVector() && RHS.getValueType().is64BitVector() && "unexpected shape for long operation" ) ? void (0) : __assert_fail ("LHS.getValueType().is64BitVector() && RHS.getValueType().is64BitVector() && \"unexpected shape for long operation\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 17288, __extension__ __PRETTY_FUNCTION__)); | ||||
17289 | |||||
17290 | // Either node could be a DUP, but it's not worth doing both of them (you'd | ||||
17291 | // just as well use the non-high version) so look for a corresponding extract | ||||
17292 | // operation on the other "wing". | ||||
17293 | if (isEssentiallyExtractHighSubvector(LHS)) { | ||||
17294 | RHS = tryExtendDUPToExtractHigh(RHS, DAG); | ||||
17295 | if (!RHS.getNode()) | ||||
17296 | return SDValue(); | ||||
17297 | } else if (isEssentiallyExtractHighSubvector(RHS)) { | ||||
17298 | LHS = tryExtendDUPToExtractHigh(LHS, DAG); | ||||
17299 | if (!LHS.getNode()) | ||||
17300 | return SDValue(); | ||||
17301 | } | ||||
17302 | |||||
17303 | if (IID == Intrinsic::not_intrinsic) | ||||
17304 | return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), LHS, RHS); | ||||
17305 | |||||
17306 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), N->getValueType(0), | ||||
17307 | N->getOperand(0), LHS, RHS); | ||||
17308 | } | ||||
17309 | |||||
17310 | static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) { | ||||
17311 | MVT ElemTy = N->getSimpleValueType(0).getScalarType(); | ||||
17312 | unsigned ElemBits = ElemTy.getSizeInBits(); | ||||
17313 | |||||
17314 | int64_t ShiftAmount; | ||||
17315 | if (BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(2))) { | ||||
17316 | APInt SplatValue, SplatUndef; | ||||
17317 | unsigned SplatBitSize; | ||||
17318 | bool HasAnyUndefs; | ||||
17319 | if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, | ||||
17320 | HasAnyUndefs, ElemBits) || | ||||
17321 | SplatBitSize != ElemBits) | ||||
17322 | return SDValue(); | ||||
17323 | |||||
17324 | ShiftAmount = SplatValue.getSExtValue(); | ||||
17325 | } else if (ConstantSDNode *CVN = dyn_cast<ConstantSDNode>(N->getOperand(2))) { | ||||
17326 | ShiftAmount = CVN->getSExtValue(); | ||||
17327 | } else | ||||
17328 | return SDValue(); | ||||
17329 | |||||
17330 | unsigned Opcode; | ||||
17331 | bool IsRightShift; | ||||
17332 | switch (IID) { | ||||
17333 | default: | ||||
17334 | llvm_unreachable("Unknown shift intrinsic")::llvm::llvm_unreachable_internal("Unknown shift intrinsic", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 17334); | ||||
17335 | case Intrinsic::aarch64_neon_sqshl: | ||||
17336 | Opcode = AArch64ISD::SQSHL_I; | ||||
17337 | IsRightShift = false; | ||||
17338 | break; | ||||
17339 | case Intrinsic::aarch64_neon_uqshl: | ||||
17340 | Opcode = AArch64ISD::UQSHL_I; | ||||
17341 | IsRightShift = false; | ||||
17342 | break; | ||||
17343 | case Intrinsic::aarch64_neon_srshl: | ||||
17344 | Opcode = AArch64ISD::SRSHR_I; | ||||
17345 | IsRightShift = true; | ||||
17346 | break; | ||||
17347 | case Intrinsic::aarch64_neon_urshl: | ||||
17348 | Opcode = AArch64ISD::URSHR_I; | ||||
17349 | IsRightShift = true; | ||||
17350 | break; | ||||
17351 | case Intrinsic::aarch64_neon_sqshlu: | ||||
17352 | Opcode = AArch64ISD::SQSHLU_I; | ||||
17353 | IsRightShift = false; | ||||
17354 | break; | ||||
17355 | case Intrinsic::aarch64_neon_sshl: | ||||
17356 | case Intrinsic::aarch64_neon_ushl: | ||||
17357 | // For positive shift amounts we can use SHL, as ushl/sshl perform a regular | ||||
17358 | // left shift for positive shift amounts. Below, we only replace the current | ||||
17359 | // node with VSHL, if this condition is met. | ||||
17360 | Opcode = AArch64ISD::VSHL; | ||||
17361 | IsRightShift = false; | ||||
17362 | break; | ||||
17363 | } | ||||
17364 | |||||
17365 | if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(int)ElemBits) { | ||||
17366 | SDLoc dl(N); | ||||
17367 | return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1), | ||||
17368 | DAG.getConstant(-ShiftAmount, dl, MVT::i32)); | ||||
17369 | } else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount < ElemBits) { | ||||
17370 | SDLoc dl(N); | ||||
17371 | return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1), | ||||
17372 | DAG.getConstant(ShiftAmount, dl, MVT::i32)); | ||||
17373 | } | ||||
17374 | |||||
17375 | return SDValue(); | ||||
17376 | } | ||||
17377 | |||||
17378 | // The CRC32[BH] instructions ignore the high bits of their data operand. Since | ||||
17379 | // the intrinsics must be legal and take an i32, this means there's almost | ||||
17380 | // certainly going to be a zext in the DAG which we can eliminate. | ||||
17381 | static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG) { | ||||
17382 | SDValue AndN = N->getOperand(2); | ||||
17383 | if (AndN.getOpcode() != ISD::AND) | ||||
17384 | return SDValue(); | ||||
17385 | |||||
17386 | ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(AndN.getOperand(1)); | ||||
17387 | if (!CMask || CMask->getZExtValue() != Mask) | ||||
17388 | return SDValue(); | ||||
17389 | |||||
17390 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), MVT::i32, | ||||
17391 | N->getOperand(0), N->getOperand(1), AndN.getOperand(0)); | ||||
17392 | } | ||||
17393 | |||||
17394 | static SDValue combineAcrossLanesIntrinsic(unsigned Opc, SDNode *N, | ||||
17395 | SelectionDAG &DAG) { | ||||
17396 | SDLoc dl(N); | ||||
17397 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), | ||||
17398 | DAG.getNode(Opc, dl, | ||||
17399 | N->getOperand(1).getSimpleValueType(), | ||||
17400 | N->getOperand(1)), | ||||
17401 | DAG.getConstant(0, dl, MVT::i64)); | ||||
17402 | } | ||||
17403 | |||||
17404 | static SDValue LowerSVEIntrinsicIndex(SDNode *N, SelectionDAG &DAG) { | ||||
17405 | SDLoc DL(N); | ||||
17406 | SDValue Op1 = N->getOperand(1); | ||||
17407 | SDValue Op2 = N->getOperand(2); | ||||
17408 | EVT ScalarTy = Op2.getValueType(); | ||||
17409 | if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16)) | ||||
17410 | ScalarTy = MVT::i32; | ||||
17411 | |||||
17412 | // Lower index_vector(base, step) to mul(step step_vector(1)) + splat(base). | ||||
17413 | SDValue StepVector = DAG.getStepVector(DL, N->getValueType(0)); | ||||
17414 | SDValue Step = DAG.getNode(ISD::SPLAT_VECTOR, DL, N->getValueType(0), Op2); | ||||
17415 | SDValue Mul = DAG.getNode(ISD::MUL, DL, N->getValueType(0), StepVector, Step); | ||||
17416 | SDValue Base = DAG.getNode(ISD::SPLAT_VECTOR, DL, N->getValueType(0), Op1); | ||||
17417 | return DAG.getNode(ISD::ADD, DL, N->getValueType(0), Mul, Base); | ||||
17418 | } | ||||
17419 | |||||
17420 | static SDValue LowerSVEIntrinsicDUP(SDNode *N, SelectionDAG &DAG) { | ||||
17421 | SDLoc dl(N); | ||||
17422 | SDValue Scalar = N->getOperand(3); | ||||
17423 | EVT ScalarTy = Scalar.getValueType(); | ||||
17424 | |||||
17425 | if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16)) | ||||
17426 | Scalar = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Scalar); | ||||
17427 | |||||
17428 | SDValue Passthru = N->getOperand(1); | ||||
17429 | SDValue Pred = N->getOperand(2); | ||||
17430 | return DAG.getNode(AArch64ISD::DUP_MERGE_PASSTHRU, dl, N->getValueType(0), | ||||
17431 | Pred, Scalar, Passthru); | ||||
17432 | } | ||||
17433 | |||||
17434 | static SDValue LowerSVEIntrinsicEXT(SDNode *N, SelectionDAG &DAG) { | ||||
17435 | SDLoc dl(N); | ||||
17436 | LLVMContext &Ctx = *DAG.getContext(); | ||||
17437 | EVT VT = N->getValueType(0); | ||||
17438 | |||||
17439 | assert(VT.isScalableVector() && "Expected a scalable vector.")(static_cast <bool> (VT.isScalableVector() && "Expected a scalable vector." ) ? void (0) : __assert_fail ("VT.isScalableVector() && \"Expected a scalable vector.\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 17439, __extension__ __PRETTY_FUNCTION__)); | ||||
17440 | |||||
17441 | // Current lowering only supports the SVE-ACLE types. | ||||
17442 | if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock) | ||||
17443 | return SDValue(); | ||||
17444 | |||||
17445 | unsigned ElemSize = VT.getVectorElementType().getSizeInBits() / 8; | ||||
17446 | unsigned ByteSize = VT.getSizeInBits().getKnownMinSize() / 8; | ||||
17447 | EVT ByteVT = | ||||
17448 | EVT::getVectorVT(Ctx, MVT::i8, ElementCount::getScalable(ByteSize)); | ||||
17449 | |||||
17450 | // Convert everything to the domain of EXT (i.e bytes). | ||||
17451 | SDValue Op0 = DAG.getNode(ISD::BITCAST, dl, ByteVT, N->getOperand(1)); | ||||
17452 | SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, ByteVT, N->getOperand(2)); | ||||
17453 | SDValue Op2 = DAG.getNode(ISD::MUL, dl, MVT::i32, N->getOperand(3), | ||||
17454 | DAG.getConstant(ElemSize, dl, MVT::i32)); | ||||
17455 | |||||
17456 | SDValue EXT = DAG.getNode(AArch64ISD::EXT, dl, ByteVT, Op0, Op1, Op2); | ||||
17457 | return DAG.getNode(ISD::BITCAST, dl, VT, EXT); | ||||
17458 | } | ||||
17459 | |||||
17460 | static SDValue tryConvertSVEWideCompare(SDNode *N, ISD::CondCode CC, | ||||
17461 | TargetLowering::DAGCombinerInfo &DCI, | ||||
17462 | SelectionDAG &DAG) { | ||||
17463 | if (DCI.isBeforeLegalize()) | ||||
17464 | return SDValue(); | ||||
17465 | |||||
17466 | SDValue Comparator = N->getOperand(3); | ||||
17467 | if (Comparator.getOpcode() == AArch64ISD::DUP || | ||||
17468 | Comparator.getOpcode() == ISD::SPLAT_VECTOR) { | ||||
17469 | unsigned IID = getIntrinsicID(N); | ||||
17470 | EVT VT = N->getValueType(0); | ||||
17471 | EVT CmpVT = N->getOperand(2).getValueType(); | ||||
17472 | SDValue Pred = N->getOperand(1); | ||||
17473 | SDValue Imm; | ||||
17474 | SDLoc DL(N); | ||||
17475 | |||||
17476 | switch (IID) { | ||||
17477 | default: | ||||
17478 | llvm_unreachable("Called with wrong intrinsic!")::llvm::llvm_unreachable_internal("Called with wrong intrinsic!" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 17478); | ||||
17479 | break; | ||||
17480 | |||||
17481 | // Signed comparisons | ||||
17482 | case Intrinsic::aarch64_sve_cmpeq_wide: | ||||
17483 | case Intrinsic::aarch64_sve_cmpne_wide: | ||||
17484 | case Intrinsic::aarch64_sve_cmpge_wide: | ||||
17485 | case Intrinsic::aarch64_sve_cmpgt_wide: | ||||
17486 | case Intrinsic::aarch64_sve_cmplt_wide: | ||||
17487 | case Intrinsic::aarch64_sve_cmple_wide: { | ||||
17488 | if (auto *CN = dyn_cast<ConstantSDNode>(Comparator.getOperand(0))) { | ||||
17489 | int64_t ImmVal = CN->getSExtValue(); | ||||
17490 | if (ImmVal >= -16 && ImmVal <= 15) | ||||
17491 | Imm = DAG.getConstant(ImmVal, DL, MVT::i32); | ||||
17492 | else | ||||
17493 | return SDValue(); | ||||
17494 | } | ||||
17495 | break; | ||||
17496 | } | ||||
17497 | // Unsigned comparisons | ||||
17498 | case Intrinsic::aarch64_sve_cmphs_wide: | ||||
17499 | case Intrinsic::aarch64_sve_cmphi_wide: | ||||
17500 | case Intrinsic::aarch64_sve_cmplo_wide: | ||||
17501 | case Intrinsic::aarch64_sve_cmpls_wide: { | ||||
17502 | if (auto *CN = dyn_cast<ConstantSDNode>(Comparator.getOperand(0))) { | ||||
17503 | uint64_t ImmVal = CN->getZExtValue(); | ||||
17504 | if (ImmVal <= 127) | ||||
17505 | Imm = DAG.getConstant(ImmVal, DL, MVT::i32); | ||||
17506 | else | ||||
17507 | return SDValue(); | ||||
17508 | } | ||||
17509 | break; | ||||
17510 | } | ||||
17511 | } | ||||
17512 | |||||
17513 | if (!Imm) | ||||
17514 | return SDValue(); | ||||
17515 | |||||
17516 | SDValue Splat = DAG.getNode(ISD::SPLAT_VECTOR, DL, CmpVT, Imm); | ||||
17517 | return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, VT, Pred, | ||||
17518 | N->getOperand(2), Splat, DAG.getCondCode(CC)); | ||||
17519 | } | ||||
17520 | |||||
17521 | return SDValue(); | ||||
17522 | } | ||||
17523 | |||||
17524 | static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op, | ||||
17525 | AArch64CC::CondCode Cond) { | ||||
17526 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | ||||
17527 | |||||
17528 | SDLoc DL(Op); | ||||
17529 | assert(Op.getValueType().isScalableVector() &&(static_cast <bool> (Op.getValueType().isScalableVector () && TLI.isTypeLegal(Op.getValueType()) && "Expected legal scalable vector type!" ) ? void (0) : __assert_fail ("Op.getValueType().isScalableVector() && TLI.isTypeLegal(Op.getValueType()) && \"Expected legal scalable vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 17531, __extension__ __PRETTY_FUNCTION__)) | ||||
17530 | TLI.isTypeLegal(Op.getValueType()) &&(static_cast <bool> (Op.getValueType().isScalableVector () && TLI.isTypeLegal(Op.getValueType()) && "Expected legal scalable vector type!" ) ? void (0) : __assert_fail ("Op.getValueType().isScalableVector() && TLI.isTypeLegal(Op.getValueType()) && \"Expected legal scalable vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 17531, __extension__ __PRETTY_FUNCTION__)) | ||||
17531 | "Expected legal scalable vector type!")(static_cast <bool> (Op.getValueType().isScalableVector () && TLI.isTypeLegal(Op.getValueType()) && "Expected legal scalable vector type!" ) ? void (0) : __assert_fail ("Op.getValueType().isScalableVector() && TLI.isTypeLegal(Op.getValueType()) && \"Expected legal scalable vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 17531, __extension__ __PRETTY_FUNCTION__)); | ||||
17532 | assert(Op.getValueType() == Pg.getValueType() &&(static_cast <bool> (Op.getValueType() == Pg.getValueType () && "Expected same type for PTEST operands") ? void (0) : __assert_fail ("Op.getValueType() == Pg.getValueType() && \"Expected same type for PTEST operands\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 17533, __extension__ __PRETTY_FUNCTION__)) | ||||
17533 | "Expected same type for PTEST operands")(static_cast <bool> (Op.getValueType() == Pg.getValueType () && "Expected same type for PTEST operands") ? void (0) : __assert_fail ("Op.getValueType() == Pg.getValueType() && \"Expected same type for PTEST operands\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 17533, __extension__ __PRETTY_FUNCTION__)); | ||||
17534 | |||||
17535 | // Ensure target specific opcodes are using legal type. | ||||
17536 | EVT OutVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); | ||||
17537 | SDValue TVal = DAG.getConstant(1, DL, OutVT); | ||||
17538 | SDValue FVal = DAG.getConstant(0, DL, OutVT); | ||||
17539 | |||||
17540 | // Ensure operands have type nxv16i1. | ||||
17541 | if (Op.getValueType() != MVT::nxv16i1) { | ||||
17542 | if ((Cond == AArch64CC::ANY_ACTIVE || Cond == AArch64CC::NONE_ACTIVE) && | ||||
17543 | isZeroingInactiveLanes(Op)) | ||||
17544 | Pg = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv16i1, Pg); | ||||
17545 | else | ||||
17546 | Pg = getSVEPredicateBitCast(MVT::nxv16i1, Pg, DAG); | ||||
17547 | Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv16i1, Op); | ||||
17548 | } | ||||
17549 | |||||
17550 | // Set condition code (CC) flags. | ||||
17551 | SDValue Test = DAG.getNode( | ||||
17552 | Cond == AArch64CC::ANY_ACTIVE ? AArch64ISD::PTEST_ANY : AArch64ISD::PTEST, | ||||
17553 | DL, MVT::Other, Pg, Op); | ||||
17554 | |||||
17555 | // Convert CC to integer based on requested condition. | ||||
17556 | // NOTE: Cond is inverted to promote CSEL's removal when it feeds a compare. | ||||
17557 | SDValue CC = DAG.getConstant(getInvertedCondCode(Cond), DL, MVT::i32); | ||||
17558 | SDValue Res = DAG.getNode(AArch64ISD::CSEL, DL, OutVT, FVal, TVal, CC, Test); | ||||
17559 | return DAG.getZExtOrTrunc(Res, DL, VT); | ||||
17560 | } | ||||
17561 | |||||
17562 | static SDValue combineSVEReductionInt(SDNode *N, unsigned Opc, | ||||
17563 | SelectionDAG &DAG) { | ||||
17564 | SDLoc DL(N); | ||||
17565 | |||||
17566 | SDValue Pred = N->getOperand(1); | ||||
17567 | SDValue VecToReduce = N->getOperand(2); | ||||
17568 | |||||
17569 | // NOTE: The integer reduction's result type is not always linked to the | ||||
17570 | // operand's element type so we construct it from the intrinsic's result type. | ||||
17571 | EVT ReduceVT = getPackedSVEVectorVT(N->getValueType(0)); | ||||
17572 | SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, VecToReduce); | ||||
17573 | |||||
17574 | // SVE reductions set the whole vector register with the first element | ||||
17575 | // containing the reduction result, which we'll now extract. | ||||
17576 | SDValue Zero = DAG.getConstant(0, DL, MVT::i64); | ||||
17577 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce, | ||||
17578 | Zero); | ||||
17579 | } | ||||
17580 | |||||
17581 | static SDValue combineSVEReductionFP(SDNode *N, unsigned Opc, | ||||
17582 | SelectionDAG &DAG) { | ||||
17583 | SDLoc DL(N); | ||||
17584 | |||||
17585 | SDValue Pred = N->getOperand(1); | ||||
17586 | SDValue VecToReduce = N->getOperand(2); | ||||
17587 | |||||
17588 | EVT ReduceVT = VecToReduce.getValueType(); | ||||
17589 | SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, VecToReduce); | ||||
17590 | |||||
17591 | // SVE reductions set the whole vector register with the first element | ||||
17592 | // containing the reduction result, which we'll now extract. | ||||
17593 | SDValue Zero = DAG.getConstant(0, DL, MVT::i64); | ||||
17594 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce, | ||||
17595 | Zero); | ||||
17596 | } | ||||
17597 | |||||
17598 | static SDValue combineSVEReductionOrderedFP(SDNode *N, unsigned Opc, | ||||
17599 | SelectionDAG &DAG) { | ||||
17600 | SDLoc DL(N); | ||||
17601 | |||||
17602 | SDValue Pred = N->getOperand(1); | ||||
17603 | SDValue InitVal = N->getOperand(2); | ||||
17604 | SDValue VecToReduce = N->getOperand(3); | ||||
17605 | EVT ReduceVT = VecToReduce.getValueType(); | ||||
17606 | |||||
17607 | // Ordered reductions use the first lane of the result vector as the | ||||
17608 | // reduction's initial value. | ||||
17609 | SDValue Zero = DAG.getConstant(0, DL, MVT::i64); | ||||
17610 | InitVal = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ReduceVT, | ||||
17611 | DAG.getUNDEF(ReduceVT), InitVal, Zero); | ||||
17612 | |||||
17613 | SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, InitVal, VecToReduce); | ||||
17614 | |||||
17615 | // SVE reductions set the whole vector register with the first element | ||||
17616 | // containing the reduction result, which we'll now extract. | ||||
17617 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce, | ||||
17618 | Zero); | ||||
17619 | } | ||||
17620 | |||||
17621 | static bool isAllInactivePredicate(SDValue N) { | ||||
17622 | // Look through cast. | ||||
17623 | while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST) | ||||
17624 | N = N.getOperand(0); | ||||
17625 | |||||
17626 | return ISD::isConstantSplatVectorAllZeros(N.getNode()); | ||||
17627 | } | ||||
17628 | |||||
17629 | static bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) { | ||||
17630 | unsigned NumElts = N.getValueType().getVectorMinNumElements(); | ||||
17631 | |||||
17632 | // Look through cast. | ||||
17633 | while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST) { | ||||
17634 | N = N.getOperand(0); | ||||
17635 | // When reinterpreting from a type with fewer elements the "new" elements | ||||
17636 | // are not active, so bail if they're likely to be used. | ||||
17637 | if (N.getValueType().getVectorMinNumElements() < NumElts) | ||||
17638 | return false; | ||||
17639 | } | ||||
17640 | |||||
17641 | if (ISD::isConstantSplatVectorAllOnes(N.getNode())) | ||||
17642 | return true; | ||||
17643 | |||||
17644 | // "ptrue p.<ty>, all" can be considered all active when <ty> is the same size | ||||
17645 | // or smaller than the implicit element type represented by N. | ||||
17646 | // NOTE: A larger element count implies a smaller element type. | ||||
17647 | if (N.getOpcode() == AArch64ISD::PTRUE && | ||||
17648 | N.getConstantOperandVal(0) == AArch64SVEPredPattern::all) | ||||
17649 | return N.getValueType().getVectorMinNumElements() >= NumElts; | ||||
17650 | |||||
17651 | // If we're compiling for a specific vector-length, we can check if the | ||||
17652 | // pattern's VL equals that of the scalable vector at runtime. | ||||
17653 | if (N.getOpcode() == AArch64ISD::PTRUE) { | ||||
17654 | const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>(); | ||||
17655 | unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits(); | ||||
17656 | unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits(); | ||||
17657 | if (MaxSVESize && MinSVESize == MaxSVESize) { | ||||
17658 | unsigned VScale = MaxSVESize / AArch64::SVEBitsPerBlock; | ||||
17659 | unsigned PatNumElts = | ||||
17660 | getNumElementsFromSVEPredPattern(N.getConstantOperandVal(0)); | ||||
17661 | return PatNumElts == (NumElts * VScale); | ||||
17662 | } | ||||
17663 | } | ||||
17664 | |||||
17665 | return false; | ||||
17666 | } | ||||
17667 | |||||
17668 | // If a merged operation has no inactive lanes we can relax it to a predicated | ||||
17669 | // or unpredicated operation, which potentially allows better isel (perhaps | ||||
17670 | // using immediate forms) or relaxing register reuse requirements. | ||||
17671 | static SDValue convertMergedOpToPredOp(SDNode *N, unsigned Opc, | ||||
17672 | SelectionDAG &DAG, bool UnpredOp = false, | ||||
17673 | bool SwapOperands = false) { | ||||
17674 | assert(N->getOpcode() == ISD::INTRINSIC_WO_CHAIN && "Expected intrinsic!")(static_cast <bool> (N->getOpcode() == ISD::INTRINSIC_WO_CHAIN && "Expected intrinsic!") ? void (0) : __assert_fail ("N->getOpcode() == ISD::INTRINSIC_WO_CHAIN && \"Expected intrinsic!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 17674, __extension__ __PRETTY_FUNCTION__)); | ||||
17675 | assert(N->getNumOperands() == 4 && "Expected 3 operand intrinsic!")(static_cast <bool> (N->getNumOperands() == 4 && "Expected 3 operand intrinsic!") ? void (0) : __assert_fail ( "N->getNumOperands() == 4 && \"Expected 3 operand intrinsic!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 17675, __extension__ __PRETTY_FUNCTION__)); | ||||
17676 | SDValue Pg = N->getOperand(1); | ||||
17677 | SDValue Op1 = N->getOperand(SwapOperands ? 3 : 2); | ||||
17678 | SDValue Op2 = N->getOperand(SwapOperands ? 2 : 3); | ||||
17679 | |||||
17680 | // ISD way to specify an all active predicate. | ||||
17681 | if (isAllActivePredicate(DAG, Pg)) { | ||||
17682 | if (UnpredOp) | ||||
17683 | return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), Op1, Op2); | ||||
17684 | |||||
17685 | return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), Pg, Op1, Op2); | ||||
17686 | } | ||||
17687 | |||||
17688 | // FUTURE: SplatVector(true) | ||||
17689 | return SDValue(); | ||||
17690 | } | ||||
17691 | |||||
17692 | static SDValue performIntrinsicCombine(SDNode *N, | ||||
17693 | TargetLowering::DAGCombinerInfo &DCI, | ||||
17694 | const AArch64Subtarget *Subtarget) { | ||||
17695 | SelectionDAG &DAG = DCI.DAG; | ||||
17696 | unsigned IID = getIntrinsicID(N); | ||||
17697 | switch (IID) { | ||||
17698 | default: | ||||
17699 | break; | ||||
17700 | case Intrinsic::get_active_lane_mask: { | ||||
17701 | SDValue Res = SDValue(); | ||||
17702 | EVT VT = N->getValueType(0); | ||||
17703 | if (VT.isFixedLengthVector()) { | ||||
17704 | // We can use the SVE whilelo instruction to lower this intrinsic by | ||||
17705 | // creating the appropriate sequence of scalable vector operations and | ||||
17706 | // then extracting a fixed-width subvector from the scalable vector. | ||||
17707 | |||||
17708 | SDLoc DL(N); | ||||
17709 | SDValue ID = | ||||
17710 | DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, DL, MVT::i64); | ||||
17711 | |||||
17712 | EVT WhileVT = EVT::getVectorVT( | ||||
17713 | *DAG.getContext(), MVT::i1, | ||||
17714 | ElementCount::getScalable(VT.getVectorNumElements())); | ||||
17715 | |||||
17716 | // Get promoted scalable vector VT, i.e. promote nxv4i1 -> nxv4i32. | ||||
17717 | EVT PromVT = getPromotedVTForPredicate(WhileVT); | ||||
17718 | |||||
17719 | // Get the fixed-width equivalent of PromVT for extraction. | ||||
17720 | EVT ExtVT = | ||||
17721 | EVT::getVectorVT(*DAG.getContext(), PromVT.getVectorElementType(), | ||||
17722 | VT.getVectorElementCount()); | ||||
17723 | |||||
17724 | Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WhileVT, ID, | ||||
17725 | N->getOperand(1), N->getOperand(2)); | ||||
17726 | Res = DAG.getNode(ISD::SIGN_EXTEND, DL, PromVT, Res); | ||||
17727 | Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtVT, Res, | ||||
17728 | DAG.getConstant(0, DL, MVT::i64)); | ||||
17729 | Res = DAG.getNode(ISD::TRUNCATE, DL, VT, Res); | ||||
17730 | } | ||||
17731 | return Res; | ||||
17732 | } | ||||
17733 | case Intrinsic::aarch64_neon_vcvtfxs2fp: | ||||
17734 | case Intrinsic::aarch64_neon_vcvtfxu2fp: | ||||
17735 | return tryCombineFixedPointConvert(N, DCI, DAG); | ||||
17736 | case Intrinsic::aarch64_neon_saddv: | ||||
17737 | return combineAcrossLanesIntrinsic(AArch64ISD::SADDV, N, DAG); | ||||
17738 | case Intrinsic::aarch64_neon_uaddv: | ||||
17739 | return combineAcrossLanesIntrinsic(AArch64ISD::UADDV, N, DAG); | ||||
17740 | case Intrinsic::aarch64_neon_sminv: | ||||
17741 | return combineAcrossLanesIntrinsic(AArch64ISD::SMINV, N, DAG); | ||||
17742 | case Intrinsic::aarch64_neon_uminv: | ||||
17743 | return combineAcrossLanesIntrinsic(AArch64ISD::UMINV, N, DAG); | ||||
17744 | case Intrinsic::aarch64_neon_smaxv: | ||||
17745 | return combineAcrossLanesIntrinsic(AArch64ISD::SMAXV, N, DAG); | ||||
17746 | case Intrinsic::aarch64_neon_umaxv: | ||||
17747 | return combineAcrossLanesIntrinsic(AArch64ISD::UMAXV, N, DAG); | ||||
17748 | case Intrinsic::aarch64_neon_fmax: | ||||
17749 | return DAG.getNode(ISD::FMAXIMUM, SDLoc(N), N->getValueType(0), | ||||
17750 | N->getOperand(1), N->getOperand(2)); | ||||
17751 | case Intrinsic::aarch64_neon_fmin: | ||||
17752 | return DAG.getNode(ISD::FMINIMUM, SDLoc(N), N->getValueType(0), | ||||
17753 | N->getOperand(1), N->getOperand(2)); | ||||
17754 | case Intrinsic::aarch64_neon_fmaxnm: | ||||
17755 | return DAG.getNode(ISD::FMAXNUM, SDLoc(N), N->getValueType(0), | ||||
17756 | N->getOperand(1), N->getOperand(2)); | ||||
17757 | case Intrinsic::aarch64_neon_fminnm: | ||||
17758 | return DAG.getNode(ISD::FMINNUM, SDLoc(N), N->getValueType(0), | ||||
17759 | N->getOperand(1), N->getOperand(2)); | ||||
17760 | case Intrinsic::aarch64_neon_smull: | ||||
17761 | return DAG.getNode(AArch64ISD::SMULL, SDLoc(N), N->getValueType(0), | ||||
17762 | N->getOperand(1), N->getOperand(2)); | ||||
17763 | case Intrinsic::aarch64_neon_umull: | ||||
17764 | return DAG.getNode(AArch64ISD::UMULL, SDLoc(N), N->getValueType(0), | ||||
17765 | N->getOperand(1), N->getOperand(2)); | ||||
17766 | case Intrinsic::aarch64_neon_pmull: | ||||
17767 | return DAG.getNode(AArch64ISD::PMULL, SDLoc(N), N->getValueType(0), | ||||
17768 | N->getOperand(1), N->getOperand(2)); | ||||
17769 | case Intrinsic::aarch64_neon_sqdmull: | ||||
17770 | return tryCombineLongOpWithDup(IID, N, DCI, DAG); | ||||
17771 | case Intrinsic::aarch64_neon_sqshl: | ||||
17772 | case Intrinsic::aarch64_neon_uqshl: | ||||
17773 | case Intrinsic::aarch64_neon_sqshlu: | ||||
17774 | case Intrinsic::aarch64_neon_srshl: | ||||
17775 | case Intrinsic::aarch64_neon_urshl: | ||||
17776 | case Intrinsic::aarch64_neon_sshl: | ||||
17777 | case Intrinsic::aarch64_neon_ushl: | ||||
17778 | return tryCombineShiftImm(IID, N, DAG); | ||||
17779 | case Intrinsic::aarch64_crc32b: | ||||
17780 | case Intrinsic::aarch64_crc32cb: | ||||
17781 | return tryCombineCRC32(0xff, N, DAG); | ||||
17782 | case Intrinsic::aarch64_crc32h: | ||||
17783 | case Intrinsic::aarch64_crc32ch: | ||||
17784 | return tryCombineCRC32(0xffff, N, DAG); | ||||
17785 | case Intrinsic::aarch64_sve_saddv: | ||||
17786 | // There is no i64 version of SADDV because the sign is irrelevant. | ||||
17787 | if (N->getOperand(2)->getValueType(0).getVectorElementType() == MVT::i64) | ||||
17788 | return combineSVEReductionInt(N, AArch64ISD::UADDV_PRED, DAG); | ||||
17789 | else | ||||
17790 | return combineSVEReductionInt(N, AArch64ISD::SADDV_PRED, DAG); | ||||
17791 | case Intrinsic::aarch64_sve_uaddv: | ||||
17792 | return combineSVEReductionInt(N, AArch64ISD::UADDV_PRED, DAG); | ||||
17793 | case Intrinsic::aarch64_sve_smaxv: | ||||
17794 | return combineSVEReductionInt(N, AArch64ISD::SMAXV_PRED, DAG); | ||||
17795 | case Intrinsic::aarch64_sve_umaxv: | ||||
17796 | return combineSVEReductionInt(N, AArch64ISD::UMAXV_PRED, DAG); | ||||
17797 | case Intrinsic::aarch64_sve_sminv: | ||||
17798 | return combineSVEReductionInt(N, AArch64ISD::SMINV_PRED, DAG); | ||||
17799 | case Intrinsic::aarch64_sve_uminv: | ||||
17800 | return combineSVEReductionInt(N, AArch64ISD::UMINV_PRED, DAG); | ||||
17801 | case Intrinsic::aarch64_sve_orv: | ||||
17802 | return combineSVEReductionInt(N, AArch64ISD::ORV_PRED, DAG); | ||||
17803 | case Intrinsic::aarch64_sve_eorv: | ||||
17804 | return combineSVEReductionInt(N, AArch64ISD::EORV_PRED, DAG); | ||||
17805 | case Intrinsic::aarch64_sve_andv: | ||||
17806 | return combineSVEReductionInt(N, AArch64ISD::ANDV_PRED, DAG); | ||||
17807 | case Intrinsic::aarch64_sve_index: | ||||
17808 | return LowerSVEIntrinsicIndex(N, DAG); | ||||
17809 | case Intrinsic::aarch64_sve_dup: | ||||
17810 | return LowerSVEIntrinsicDUP(N, DAG); | ||||
17811 | case Intrinsic::aarch64_sve_dup_x: | ||||
17812 | return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), N->getValueType(0), | ||||
17813 | N->getOperand(1)); | ||||
17814 | case Intrinsic::aarch64_sve_ext: | ||||
17815 | return LowerSVEIntrinsicEXT(N, DAG); | ||||
17816 | case Intrinsic::aarch64_sve_mul: | ||||
17817 | return convertMergedOpToPredOp(N, AArch64ISD::MUL_PRED, DAG); | ||||
17818 | case Intrinsic::aarch64_sve_smulh: | ||||
17819 | return convertMergedOpToPredOp(N, AArch64ISD::MULHS_PRED, DAG); | ||||
17820 | case Intrinsic::aarch64_sve_umulh: | ||||
17821 | return convertMergedOpToPredOp(N, AArch64ISD::MULHU_PRED, DAG); | ||||
17822 | case Intrinsic::aarch64_sve_smin: | ||||
17823 | return convertMergedOpToPredOp(N, AArch64ISD::SMIN_PRED, DAG); | ||||
17824 | case Intrinsic::aarch64_sve_umin: | ||||
17825 | return convertMergedOpToPredOp(N, AArch64ISD::UMIN_PRED, DAG); | ||||
17826 | case Intrinsic::aarch64_sve_smax: | ||||
17827 | return convertMergedOpToPredOp(N, AArch64ISD::SMAX_PRED, DAG); | ||||
17828 | case Intrinsic::aarch64_sve_umax: | ||||
17829 | return convertMergedOpToPredOp(N, AArch64ISD::UMAX_PRED, DAG); | ||||
17830 | case Intrinsic::aarch64_sve_lsl: | ||||
17831 | return convertMergedOpToPredOp(N, AArch64ISD::SHL_PRED, DAG); | ||||
17832 | case Intrinsic::aarch64_sve_lsr: | ||||
17833 | return convertMergedOpToPredOp(N, AArch64ISD::SRL_PRED, DAG); | ||||
17834 | case Intrinsic::aarch64_sve_asr: | ||||
17835 | return convertMergedOpToPredOp(N, AArch64ISD::SRA_PRED, DAG); | ||||
17836 | case Intrinsic::aarch64_sve_fadd: | ||||
17837 | return convertMergedOpToPredOp(N, AArch64ISD::FADD_PRED, DAG); | ||||
17838 | case Intrinsic::aarch64_sve_fsub: | ||||
17839 | return convertMergedOpToPredOp(N, AArch64ISD::FSUB_PRED, DAG); | ||||
17840 | case Intrinsic::aarch64_sve_fmul: | ||||
17841 | return convertMergedOpToPredOp(N, AArch64ISD::FMUL_PRED, DAG); | ||||
17842 | case Intrinsic::aarch64_sve_add: | ||||
17843 | return convertMergedOpToPredOp(N, ISD::ADD, DAG, true); | ||||
17844 | case Intrinsic::aarch64_sve_sub: | ||||
17845 | return convertMergedOpToPredOp(N, ISD::SUB, DAG, true); | ||||
17846 | case Intrinsic::aarch64_sve_subr: | ||||
17847 | return convertMergedOpToPredOp(N, ISD::SUB, DAG, true, true); | ||||
17848 | case Intrinsic::aarch64_sve_and: | ||||
17849 | return convertMergedOpToPredOp(N, ISD::AND, DAG, true); | ||||
17850 | case Intrinsic::aarch64_sve_bic: | ||||
17851 | return convertMergedOpToPredOp(N, AArch64ISD::BIC, DAG, true); | ||||
17852 | case Intrinsic::aarch64_sve_eor: | ||||
17853 | return convertMergedOpToPredOp(N, ISD::XOR, DAG, true); | ||||
17854 | case Intrinsic::aarch64_sve_orr: | ||||
17855 | return convertMergedOpToPredOp(N, ISD::OR, DAG, true); | ||||
17856 | case Intrinsic::aarch64_sve_sabd: | ||||
17857 | return convertMergedOpToPredOp(N, ISD::ABDS, DAG, true); | ||||
17858 | case Intrinsic::aarch64_sve_uabd: | ||||
17859 | return convertMergedOpToPredOp(N, ISD::ABDU, DAG, true); | ||||
17860 | case Intrinsic::aarch64_sve_sqadd: | ||||
17861 | return convertMergedOpToPredOp(N, ISD::SADDSAT, DAG, true); | ||||
17862 | case Intrinsic::aarch64_sve_sqsub: | ||||
17863 | return convertMergedOpToPredOp(N, ISD::SSUBSAT, DAG, true); | ||||
17864 | case Intrinsic::aarch64_sve_uqadd: | ||||
17865 | return convertMergedOpToPredOp(N, ISD::UADDSAT, DAG, true); | ||||
17866 | case Intrinsic::aarch64_sve_uqsub: | ||||
17867 | return convertMergedOpToPredOp(N, ISD::USUBSAT, DAG, true); | ||||
17868 | case Intrinsic::aarch64_sve_sqadd_x: | ||||
17869 | return DAG.getNode(ISD::SADDSAT, SDLoc(N), N->getValueType(0), | ||||
17870 | N->getOperand(1), N->getOperand(2)); | ||||
17871 | case Intrinsic::aarch64_sve_sqsub_x: | ||||
17872 | return DAG.getNode(ISD::SSUBSAT, SDLoc(N), N->getValueType(0), | ||||
17873 | N->getOperand(1), N->getOperand(2)); | ||||
17874 | case Intrinsic::aarch64_sve_uqadd_x: | ||||
17875 | return DAG.getNode(ISD::UADDSAT, SDLoc(N), N->getValueType(0), | ||||
17876 | N->getOperand(1), N->getOperand(2)); | ||||
17877 | case Intrinsic::aarch64_sve_uqsub_x: | ||||
17878 | return DAG.getNode(ISD::USUBSAT, SDLoc(N), N->getValueType(0), | ||||
17879 | N->getOperand(1), N->getOperand(2)); | ||||
17880 | case Intrinsic::aarch64_sve_asrd: | ||||
17881 | return DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, SDLoc(N), N->getValueType(0), | ||||
17882 | N->getOperand(1), N->getOperand(2), N->getOperand(3)); | ||||
17883 | case Intrinsic::aarch64_sve_cmphs: | ||||
17884 | if (!N->getOperand(2).getValueType().isFloatingPoint()) | ||||
17885 | return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N), | ||||
17886 | N->getValueType(0), N->getOperand(1), N->getOperand(2), | ||||
17887 | N->getOperand(3), DAG.getCondCode(ISD::SETUGE)); | ||||
17888 | break; | ||||
17889 | case Intrinsic::aarch64_sve_cmphi: | ||||
17890 | if (!N->getOperand(2).getValueType().isFloatingPoint()) | ||||
17891 | return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N), | ||||
17892 | N->getValueType(0), N->getOperand(1), N->getOperand(2), | ||||
17893 | N->getOperand(3), DAG.getCondCode(ISD::SETUGT)); | ||||
17894 | break; | ||||
17895 | case Intrinsic::aarch64_sve_fcmpge: | ||||
17896 | case Intrinsic::aarch64_sve_cmpge: | ||||
17897 | return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N), | ||||
17898 | N->getValueType(0), N->getOperand(1), N->getOperand(2), | ||||
17899 | N->getOperand(3), DAG.getCondCode(ISD::SETGE)); | ||||
17900 | break; | ||||
17901 | case Intrinsic::aarch64_sve_fcmpgt: | ||||
17902 | case Intrinsic::aarch64_sve_cmpgt: | ||||
17903 | return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N), | ||||
17904 | N->getValueType(0), N->getOperand(1), N->getOperand(2), | ||||
17905 | N->getOperand(3), DAG.getCondCode(ISD::SETGT)); | ||||
17906 | break; | ||||
17907 | case Intrinsic::aarch64_sve_fcmpeq: | ||||
17908 | case Intrinsic::aarch64_sve_cmpeq: | ||||
17909 | return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N), | ||||
17910 | N->getValueType(0), N->getOperand(1), N->getOperand(2), | ||||
17911 | N->getOperand(3), DAG.getCondCode(ISD::SETEQ)); | ||||
17912 | break; | ||||
17913 | case Intrinsic::aarch64_sve_fcmpne: | ||||
17914 | case Intrinsic::aarch64_sve_cmpne: | ||||
17915 | return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N), | ||||
17916 | N->getValueType(0), N->getOperand(1), N->getOperand(2), | ||||
17917 | N->getOperand(3), DAG.getCondCode(ISD::SETNE)); | ||||
17918 | break; | ||||
17919 | case Intrinsic::aarch64_sve_fcmpuo: | ||||
17920 | return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N), | ||||
17921 | N->getValueType(0), N->getOperand(1), N->getOperand(2), | ||||
17922 | N->getOperand(3), DAG.getCondCode(ISD::SETUO)); | ||||
17923 | break; | ||||
17924 | case Intrinsic::aarch64_sve_fadda: | ||||
17925 | return combineSVEReductionOrderedFP(N, AArch64ISD::FADDA_PRED, DAG); | ||||
17926 | case Intrinsic::aarch64_sve_faddv: | ||||
17927 | return combineSVEReductionFP(N, AArch64ISD::FADDV_PRED, DAG); | ||||
17928 | case Intrinsic::aarch64_sve_fmaxnmv: | ||||
17929 | return combineSVEReductionFP(N, AArch64ISD::FMAXNMV_PRED, DAG); | ||||
17930 | case Intrinsic::aarch64_sve_fmaxv: | ||||
17931 | return combineSVEReductionFP(N, AArch64ISD::FMAXV_PRED, DAG); | ||||
17932 | case Intrinsic::aarch64_sve_fminnmv: | ||||
17933 | return combineSVEReductionFP(N, AArch64ISD::FMINNMV_PRED, DAG); | ||||
17934 | case Intrinsic::aarch64_sve_fminv: | ||||
17935 | return combineSVEReductionFP(N, AArch64ISD::FMINV_PRED, DAG); | ||||
17936 | case Intrinsic::aarch64_sve_sel: | ||||
17937 | return DAG.getNode(ISD::VSELECT, SDLoc(N), N->getValueType(0), | ||||
17938 | N->getOperand(1), N->getOperand(2), N->getOperand(3)); | ||||
17939 | case Intrinsic::aarch64_sve_cmpeq_wide: | ||||
17940 | return tryConvertSVEWideCompare(N, ISD::SETEQ, DCI, DAG); | ||||
17941 | case Intrinsic::aarch64_sve_cmpne_wide: | ||||
17942 | return tryConvertSVEWideCompare(N, ISD::SETNE, DCI, DAG); | ||||
17943 | case Intrinsic::aarch64_sve_cmpge_wide: | ||||
17944 | return tryConvertSVEWideCompare(N, ISD::SETGE, DCI, DAG); | ||||
17945 | case Intrinsic::aarch64_sve_cmpgt_wide: | ||||
17946 | return tryConvertSVEWideCompare(N, ISD::SETGT, DCI, DAG); | ||||
17947 | case Intrinsic::aarch64_sve_cmplt_wide: | ||||
17948 | return tryConvertSVEWideCompare(N, ISD::SETLT, DCI, DAG); | ||||
17949 | case Intrinsic::aarch64_sve_cmple_wide: | ||||
17950 | return tryConvertSVEWideCompare(N, ISD::SETLE, DCI, DAG); | ||||
17951 | case Intrinsic::aarch64_sve_cmphs_wide: | ||||
17952 | return tryConvertSVEWideCompare(N, ISD::SETUGE, DCI, DAG); | ||||
17953 | case Intrinsic::aarch64_sve_cmphi_wide: | ||||
17954 | return tryConvertSVEWideCompare(N, ISD::SETUGT, DCI, DAG); | ||||
17955 | case Intrinsic::aarch64_sve_cmplo_wide: | ||||
17956 | return tryConvertSVEWideCompare(N, ISD::SETULT, DCI, DAG); | ||||
17957 | case Intrinsic::aarch64_sve_cmpls_wide: | ||||
17958 | return tryConvertSVEWideCompare(N, ISD::SETULE, DCI, DAG); | ||||
17959 | case Intrinsic::aarch64_sve_ptest_any: | ||||
17960 | return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2), | ||||
17961 | AArch64CC::ANY_ACTIVE); | ||||
17962 | case Intrinsic::aarch64_sve_ptest_first: | ||||
17963 | return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2), | ||||
17964 | AArch64CC::FIRST_ACTIVE); | ||||
17965 | case Intrinsic::aarch64_sve_ptest_last: | ||||
17966 | return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2), | ||||
17967 | AArch64CC::LAST_ACTIVE); | ||||
17968 | } | ||||
17969 | return SDValue(); | ||||
17970 | } | ||||
17971 | |||||
17972 | static bool isCheapToExtend(const SDValue &N) { | ||||
17973 | unsigned OC = N->getOpcode(); | ||||
17974 | return OC == ISD::LOAD || OC == ISD::MLOAD || | ||||
17975 | ISD::isConstantSplatVectorAllZeros(N.getNode()); | ||||
17976 | } | ||||
17977 | |||||
17978 | static SDValue | ||||
17979 | performSignExtendSetCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, | ||||
17980 | SelectionDAG &DAG) { | ||||
17981 | // If we have (sext (setcc A B)) and A and B are cheap to extend, | ||||
17982 | // we can move the sext into the arguments and have the same result. For | ||||
17983 | // example, if A and B are both loads, we can make those extending loads and | ||||
17984 | // avoid an extra instruction. This pattern appears often in VLS code | ||||
17985 | // generation where the inputs to the setcc have a different size to the | ||||
17986 | // instruction that wants to use the result of the setcc. | ||||
17987 | assert(N->getOpcode() == ISD::SIGN_EXTEND &&(static_cast <bool> (N->getOpcode() == ISD::SIGN_EXTEND && N->getOperand(0)->getOpcode() == ISD::SETCC ) ? void (0) : __assert_fail ("N->getOpcode() == ISD::SIGN_EXTEND && N->getOperand(0)->getOpcode() == ISD::SETCC" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 17988, __extension__ __PRETTY_FUNCTION__)) | ||||
17988 | N->getOperand(0)->getOpcode() == ISD::SETCC)(static_cast <bool> (N->getOpcode() == ISD::SIGN_EXTEND && N->getOperand(0)->getOpcode() == ISD::SETCC ) ? void (0) : __assert_fail ("N->getOpcode() == ISD::SIGN_EXTEND && N->getOperand(0)->getOpcode() == ISD::SETCC" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 17988, __extension__ __PRETTY_FUNCTION__)); | ||||
17989 | const SDValue SetCC = N->getOperand(0); | ||||
17990 | |||||
17991 | const SDValue CCOp0 = SetCC.getOperand(0); | ||||
17992 | const SDValue CCOp1 = SetCC.getOperand(1); | ||||
17993 | if (!CCOp0->getValueType(0).isInteger() || | ||||
17994 | !CCOp1->getValueType(0).isInteger()) | ||||
17995 | return SDValue(); | ||||
17996 | |||||
17997 | ISD::CondCode Code = | ||||
17998 | cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get(); | ||||
17999 | |||||
18000 | ISD::NodeType ExtType = | ||||
18001 | isSignedIntSetCC(Code) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; | ||||
18002 | |||||
18003 | if (isCheapToExtend(SetCC.getOperand(0)) && | ||||
18004 | isCheapToExtend(SetCC.getOperand(1))) { | ||||
18005 | const SDValue Ext1 = | ||||
18006 | DAG.getNode(ExtType, SDLoc(N), N->getValueType(0), CCOp0); | ||||
18007 | const SDValue Ext2 = | ||||
18008 | DAG.getNode(ExtType, SDLoc(N), N->getValueType(0), CCOp1); | ||||
18009 | |||||
18010 | return DAG.getSetCC( | ||||
18011 | SDLoc(SetCC), N->getValueType(0), Ext1, Ext2, | ||||
18012 | cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get()); | ||||
18013 | } | ||||
18014 | |||||
18015 | return SDValue(); | ||||
18016 | } | ||||
18017 | |||||
18018 | static SDValue performExtendCombine(SDNode *N, | ||||
18019 | TargetLowering::DAGCombinerInfo &DCI, | ||||
18020 | SelectionDAG &DAG) { | ||||
18021 | // If we see something like (zext (sabd (extract_high ...), (DUP ...))) then | ||||
18022 | // we can convert that DUP into another extract_high (of a bigger DUP), which | ||||
18023 | // helps the backend to decide that an sabdl2 would be useful, saving a real | ||||
18024 | // extract_high operation. | ||||
18025 | if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ZERO_EXTEND && | ||||
18026 | (N->getOperand(0).getOpcode() == ISD::ABDU || | ||||
18027 | N->getOperand(0).getOpcode() == ISD::ABDS)) { | ||||
18028 | SDNode *ABDNode = N->getOperand(0).getNode(); | ||||
18029 | SDValue NewABD = | ||||
18030 | tryCombineLongOpWithDup(Intrinsic::not_intrinsic, ABDNode, DCI, DAG); | ||||
18031 | if (!NewABD.getNode()) | ||||
18032 | return SDValue(); | ||||
18033 | |||||
18034 | return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), NewABD); | ||||
18035 | } | ||||
18036 | |||||
18037 | if (N->getValueType(0).isFixedLengthVector() && | ||||
18038 | N->getOpcode() == ISD::SIGN_EXTEND && | ||||
18039 | N->getOperand(0)->getOpcode() == ISD::SETCC) | ||||
18040 | return performSignExtendSetCCCombine(N, DCI, DAG); | ||||
18041 | |||||
18042 | return SDValue(); | ||||
18043 | } | ||||
18044 | |||||
18045 | static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St, | ||||
18046 | SDValue SplatVal, unsigned NumVecElts) { | ||||
18047 | assert(!St.isTruncatingStore() && "cannot split truncating vector store")(static_cast <bool> (!St.isTruncatingStore() && "cannot split truncating vector store") ? void (0) : __assert_fail ("!St.isTruncatingStore() && \"cannot split truncating vector store\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18047, __extension__ __PRETTY_FUNCTION__)); | ||||
18048 | Align OrigAlignment = St.getAlign(); | ||||
18049 | unsigned EltOffset = SplatVal.getValueType().getSizeInBits() / 8; | ||||
18050 | |||||
18051 | // Create scalar stores. This is at least as good as the code sequence for a | ||||
18052 | // split unaligned store which is a dup.s, ext.b, and two stores. | ||||
18053 | // Most of the time the three stores should be replaced by store pair | ||||
18054 | // instructions (stp). | ||||
18055 | SDLoc DL(&St); | ||||
18056 | SDValue BasePtr = St.getBasePtr(); | ||||
18057 | uint64_t BaseOffset = 0; | ||||
18058 | |||||
18059 | const MachinePointerInfo &PtrInfo = St.getPointerInfo(); | ||||
18060 | SDValue NewST1 = | ||||
18061 | DAG.getStore(St.getChain(), DL, SplatVal, BasePtr, PtrInfo, | ||||
18062 | OrigAlignment, St.getMemOperand()->getFlags()); | ||||
18063 | |||||
18064 | // As this in ISel, we will not merge this add which may degrade results. | ||||
18065 | if (BasePtr->getOpcode() == ISD::ADD && | ||||
18066 | isa<ConstantSDNode>(BasePtr->getOperand(1))) { | ||||
18067 | BaseOffset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue(); | ||||
18068 | BasePtr = BasePtr->getOperand(0); | ||||
18069 | } | ||||
18070 | |||||
18071 | unsigned Offset = EltOffset; | ||||
18072 | while (--NumVecElts) { | ||||
18073 | Align Alignment = commonAlignment(OrigAlignment, Offset); | ||||
18074 | SDValue OffsetPtr = | ||||
18075 | DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, | ||||
18076 | DAG.getConstant(BaseOffset + Offset, DL, MVT::i64)); | ||||
18077 | NewST1 = DAG.getStore(NewST1.getValue(0), DL, SplatVal, OffsetPtr, | ||||
18078 | PtrInfo.getWithOffset(Offset), Alignment, | ||||
18079 | St.getMemOperand()->getFlags()); | ||||
18080 | Offset += EltOffset; | ||||
18081 | } | ||||
18082 | return NewST1; | ||||
18083 | } | ||||
18084 | |||||
18085 | // Returns an SVE type that ContentTy can be trivially sign or zero extended | ||||
18086 | // into. | ||||
18087 | static MVT getSVEContainerType(EVT ContentTy) { | ||||
18088 | assert(ContentTy.isSimple() && "No SVE containers for extended types")(static_cast <bool> (ContentTy.isSimple() && "No SVE containers for extended types" ) ? void (0) : __assert_fail ("ContentTy.isSimple() && \"No SVE containers for extended types\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18088, __extension__ __PRETTY_FUNCTION__)); | ||||
18089 | |||||
18090 | switch (ContentTy.getSimpleVT().SimpleTy) { | ||||
18091 | default: | ||||
18092 | llvm_unreachable("No known SVE container for this MVT type")::llvm::llvm_unreachable_internal("No known SVE container for this MVT type" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18092); | ||||
18093 | case MVT::nxv2i8: | ||||
18094 | case MVT::nxv2i16: | ||||
18095 | case MVT::nxv2i32: | ||||
18096 | case MVT::nxv2i64: | ||||
18097 | case MVT::nxv2f32: | ||||
18098 | case MVT::nxv2f64: | ||||
18099 | return MVT::nxv2i64; | ||||
18100 | case MVT::nxv4i8: | ||||
18101 | case MVT::nxv4i16: | ||||
18102 | case MVT::nxv4i32: | ||||
18103 | case MVT::nxv4f32: | ||||
18104 | return MVT::nxv4i32; | ||||
18105 | case MVT::nxv8i8: | ||||
18106 | case MVT::nxv8i16: | ||||
18107 | case MVT::nxv8f16: | ||||
18108 | case MVT::nxv8bf16: | ||||
18109 | return MVT::nxv8i16; | ||||
18110 | case MVT::nxv16i8: | ||||
18111 | return MVT::nxv16i8; | ||||
18112 | } | ||||
18113 | } | ||||
18114 | |||||
18115 | static SDValue performLD1Combine(SDNode *N, SelectionDAG &DAG, unsigned Opc) { | ||||
18116 | SDLoc DL(N); | ||||
18117 | EVT VT = N->getValueType(0); | ||||
18118 | |||||
18119 | if (VT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock) | ||||
18120 | return SDValue(); | ||||
18121 | |||||
18122 | EVT ContainerVT = VT; | ||||
18123 | if (ContainerVT.isInteger()) | ||||
18124 | ContainerVT = getSVEContainerType(ContainerVT); | ||||
18125 | |||||
18126 | SDVTList VTs = DAG.getVTList(ContainerVT, MVT::Other); | ||||
18127 | SDValue Ops[] = { N->getOperand(0), // Chain | ||||
18128 | N->getOperand(2), // Pg | ||||
18129 | N->getOperand(3), // Base | ||||
18130 | DAG.getValueType(VT) }; | ||||
18131 | |||||
18132 | SDValue Load = DAG.getNode(Opc, DL, VTs, Ops); | ||||
18133 | SDValue LoadChain = SDValue(Load.getNode(), 1); | ||||
18134 | |||||
18135 | if (ContainerVT.isInteger() && (VT != ContainerVT)) | ||||
18136 | Load = DAG.getNode(ISD::TRUNCATE, DL, VT, Load.getValue(0)); | ||||
18137 | |||||
18138 | return DAG.getMergeValues({ Load, LoadChain }, DL); | ||||
18139 | } | ||||
18140 | |||||
18141 | static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) { | ||||
18142 | SDLoc DL(N); | ||||
18143 | EVT VT = N->getValueType(0); | ||||
18144 | EVT PtrTy = N->getOperand(3).getValueType(); | ||||
18145 | |||||
18146 | EVT LoadVT = VT; | ||||
18147 | if (VT.isFloatingPoint()) | ||||
18148 | LoadVT = VT.changeTypeToInteger(); | ||||
18149 | |||||
18150 | auto *MINode = cast<MemIntrinsicSDNode>(N); | ||||
18151 | SDValue PassThru = DAG.getConstant(0, DL, LoadVT); | ||||
18152 | SDValue L = DAG.getMaskedLoad(LoadVT, DL, MINode->getChain(), | ||||
18153 | MINode->getOperand(3), DAG.getUNDEF(PtrTy), | ||||
18154 | MINode->getOperand(2), PassThru, | ||||
18155 | MINode->getMemoryVT(), MINode->getMemOperand(), | ||||
18156 | ISD::UNINDEXED, ISD::NON_EXTLOAD, false); | ||||
18157 | |||||
18158 | if (VT.isFloatingPoint()) { | ||||
18159 | SDValue Ops[] = { DAG.getNode(ISD::BITCAST, DL, VT, L), L.getValue(1) }; | ||||
18160 | return DAG.getMergeValues(Ops, DL); | ||||
18161 | } | ||||
18162 | |||||
18163 | return L; | ||||
18164 | } | ||||
18165 | |||||
18166 | template <unsigned Opcode> | ||||
18167 | static SDValue performLD1ReplicateCombine(SDNode *N, SelectionDAG &DAG) { | ||||
18168 | static_assert(Opcode == AArch64ISD::LD1RQ_MERGE_ZERO || | ||||
18169 | Opcode == AArch64ISD::LD1RO_MERGE_ZERO, | ||||
18170 | "Unsupported opcode."); | ||||
18171 | SDLoc DL(N); | ||||
18172 | EVT VT = N->getValueType(0); | ||||
18173 | |||||
18174 | EVT LoadVT = VT; | ||||
18175 | if (VT.isFloatingPoint()) | ||||
18176 | LoadVT = VT.changeTypeToInteger(); | ||||
18177 | |||||
18178 | SDValue Ops[] = {N->getOperand(0), N->getOperand(2), N->getOperand(3)}; | ||||
18179 | SDValue Load = DAG.getNode(Opcode, DL, {LoadVT, MVT::Other}, Ops); | ||||
18180 | SDValue LoadChain = SDValue(Load.getNode(), 1); | ||||
18181 | |||||
18182 | if (VT.isFloatingPoint()) | ||||
18183 | Load = DAG.getNode(ISD::BITCAST, DL, VT, Load.getValue(0)); | ||||
18184 | |||||
18185 | return DAG.getMergeValues({Load, LoadChain}, DL); | ||||
18186 | } | ||||
18187 | |||||
18188 | static SDValue performST1Combine(SDNode *N, SelectionDAG &DAG) { | ||||
18189 | SDLoc DL(N); | ||||
18190 | SDValue Data = N->getOperand(2); | ||||
18191 | EVT DataVT = Data.getValueType(); | ||||
18192 | EVT HwSrcVt = getSVEContainerType(DataVT); | ||||
18193 | SDValue InputVT = DAG.getValueType(DataVT); | ||||
18194 | |||||
18195 | if (DataVT.isFloatingPoint()) | ||||
18196 | InputVT = DAG.getValueType(HwSrcVt); | ||||
18197 | |||||
18198 | SDValue SrcNew; | ||||
18199 | if (Data.getValueType().isFloatingPoint()) | ||||
18200 | SrcNew = DAG.getNode(ISD::BITCAST, DL, HwSrcVt, Data); | ||||
18201 | else | ||||
18202 | SrcNew = DAG.getNode(ISD::ANY_EXTEND, DL, HwSrcVt, Data); | ||||
18203 | |||||
18204 | SDValue Ops[] = { N->getOperand(0), // Chain | ||||
18205 | SrcNew, | ||||
18206 | N->getOperand(4), // Base | ||||
18207 | N->getOperand(3), // Pg | ||||
18208 | InputVT | ||||
18209 | }; | ||||
18210 | |||||
18211 | return DAG.getNode(AArch64ISD::ST1_PRED, DL, N->getValueType(0), Ops); | ||||
18212 | } | ||||
18213 | |||||
18214 | static SDValue performSTNT1Combine(SDNode *N, SelectionDAG &DAG) { | ||||
18215 | SDLoc DL(N); | ||||
18216 | |||||
18217 | SDValue Data = N->getOperand(2); | ||||
18218 | EVT DataVT = Data.getValueType(); | ||||
18219 | EVT PtrTy = N->getOperand(4).getValueType(); | ||||
18220 | |||||
18221 | if (DataVT.isFloatingPoint()) | ||||
18222 | Data = DAG.getNode(ISD::BITCAST, DL, DataVT.changeTypeToInteger(), Data); | ||||
18223 | |||||
18224 | auto *MINode = cast<MemIntrinsicSDNode>(N); | ||||
18225 | return DAG.getMaskedStore(MINode->getChain(), DL, Data, MINode->getOperand(4), | ||||
18226 | DAG.getUNDEF(PtrTy), MINode->getOperand(3), | ||||
18227 | MINode->getMemoryVT(), MINode->getMemOperand(), | ||||
18228 | ISD::UNINDEXED, false, false); | ||||
18229 | } | ||||
18230 | |||||
18231 | /// Replace a splat of zeros to a vector store by scalar stores of WZR/XZR. The | ||||
18232 | /// load store optimizer pass will merge them to store pair stores. This should | ||||
18233 | /// be better than a movi to create the vector zero followed by a vector store | ||||
18234 | /// if the zero constant is not re-used, since one instructions and one register | ||||
18235 | /// live range will be removed. | ||||
18236 | /// | ||||
18237 | /// For example, the final generated code should be: | ||||
18238 | /// | ||||
18239 | /// stp xzr, xzr, [x0] | ||||
18240 | /// | ||||
18241 | /// instead of: | ||||
18242 | /// | ||||
18243 | /// movi v0.2d, #0 | ||||
18244 | /// str q0, [x0] | ||||
18245 | /// | ||||
18246 | static SDValue replaceZeroVectorStore(SelectionDAG &DAG, StoreSDNode &St) { | ||||
18247 | SDValue StVal = St.getValue(); | ||||
18248 | EVT VT = StVal.getValueType(); | ||||
18249 | |||||
18250 | // Avoid scalarizing zero splat stores for scalable vectors. | ||||
18251 | if (VT.isScalableVector()) | ||||
18252 | return SDValue(); | ||||
18253 | |||||
18254 | // It is beneficial to scalarize a zero splat store for 2 or 3 i64 elements or | ||||
18255 | // 2, 3 or 4 i32 elements. | ||||
18256 | int NumVecElts = VT.getVectorNumElements(); | ||||
18257 | if (!(((NumVecElts == 2 || NumVecElts == 3) && | ||||
18258 | VT.getVectorElementType().getSizeInBits() == 64) || | ||||
18259 | ((NumVecElts == 2 || NumVecElts == 3 || NumVecElts == 4) && | ||||
18260 | VT.getVectorElementType().getSizeInBits() == 32))) | ||||
18261 | return SDValue(); | ||||
18262 | |||||
18263 | if (StVal.getOpcode() != ISD::BUILD_VECTOR) | ||||
18264 | return SDValue(); | ||||
18265 | |||||
18266 | // If the zero constant has more than one use then the vector store could be | ||||
18267 | // better since the constant mov will be amortized and stp q instructions | ||||
18268 | // should be able to be formed. | ||||
18269 | if (!StVal.hasOneUse()) | ||||
18270 | return SDValue(); | ||||
18271 | |||||
18272 | // If the store is truncating then it's going down to i16 or smaller, which | ||||
18273 | // means it can be implemented in a single store anyway. | ||||
18274 | if (St.isTruncatingStore()) | ||||
18275 | return SDValue(); | ||||
18276 | |||||
18277 | // If the immediate offset of the address operand is too large for the stp | ||||
18278 | // instruction, then bail out. | ||||
18279 | if (DAG.isBaseWithConstantOffset(St.getBasePtr())) { | ||||
18280 | int64_t Offset = St.getBasePtr()->getConstantOperandVal(1); | ||||
18281 | if (Offset < -512 || Offset > 504) | ||||
18282 | return SDValue(); | ||||
18283 | } | ||||
18284 | |||||
18285 | for (int I = 0; I < NumVecElts; ++I) { | ||||
18286 | SDValue EltVal = StVal.getOperand(I); | ||||
18287 | if (!isNullConstant(EltVal) && !isNullFPConstant(EltVal)) | ||||
18288 | return SDValue(); | ||||
18289 | } | ||||
18290 | |||||
18291 | // Use a CopyFromReg WZR/XZR here to prevent | ||||
18292 | // DAGCombiner::MergeConsecutiveStores from undoing this transformation. | ||||
18293 | SDLoc DL(&St); | ||||
18294 | unsigned ZeroReg; | ||||
18295 | EVT ZeroVT; | ||||
18296 | if (VT.getVectorElementType().getSizeInBits() == 32) { | ||||
18297 | ZeroReg = AArch64::WZR; | ||||
18298 | ZeroVT = MVT::i32; | ||||
18299 | } else { | ||||
18300 | ZeroReg = AArch64::XZR; | ||||
18301 | ZeroVT = MVT::i64; | ||||
18302 | } | ||||
18303 | SDValue SplatVal = | ||||
18304 | DAG.getCopyFromReg(DAG.getEntryNode(), DL, ZeroReg, ZeroVT); | ||||
18305 | return splitStoreSplat(DAG, St, SplatVal, NumVecElts); | ||||
18306 | } | ||||
18307 | |||||
18308 | /// Replace a splat of a scalar to a vector store by scalar stores of the scalar | ||||
18309 | /// value. The load store optimizer pass will merge them to store pair stores. | ||||
18310 | /// This has better performance than a splat of the scalar followed by a split | ||||
18311 | /// vector store. Even if the stores are not merged it is four stores vs a dup, | ||||
18312 | /// followed by an ext.b and two stores. | ||||
18313 | static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode &St) { | ||||
18314 | SDValue StVal = St.getValue(); | ||||
18315 | EVT VT = StVal.getValueType(); | ||||
18316 | |||||
18317 | // Don't replace floating point stores, they possibly won't be transformed to | ||||
18318 | // stp because of the store pair suppress pass. | ||||
18319 | if (VT.isFloatingPoint()) | ||||
18320 | return SDValue(); | ||||
18321 | |||||
18322 | // We can express a splat as store pair(s) for 2 or 4 elements. | ||||
18323 | unsigned NumVecElts = VT.getVectorNumElements(); | ||||
18324 | if (NumVecElts != 4 && NumVecElts != 2) | ||||
18325 | return SDValue(); | ||||
18326 | |||||
18327 | // If the store is truncating then it's going down to i16 or smaller, which | ||||
18328 | // means it can be implemented in a single store anyway. | ||||
18329 | if (St.isTruncatingStore()) | ||||
18330 | return SDValue(); | ||||
18331 | |||||
18332 | // Check that this is a splat. | ||||
18333 | // Make sure that each of the relevant vector element locations are inserted | ||||
18334 | // to, i.e. 0 and 1 for v2i64 and 0, 1, 2, 3 for v4i32. | ||||
18335 | std::bitset<4> IndexNotInserted((1 << NumVecElts) - 1); | ||||
18336 | SDValue SplatVal; | ||||
18337 | for (unsigned I = 0; I < NumVecElts; ++I) { | ||||
18338 | // Check for insert vector elements. | ||||
18339 | if (StVal.getOpcode() != ISD::INSERT_VECTOR_ELT) | ||||
18340 | return SDValue(); | ||||
18341 | |||||
18342 | // Check that same value is inserted at each vector element. | ||||
18343 | if (I == 0) | ||||
18344 | SplatVal = StVal.getOperand(1); | ||||
18345 | else if (StVal.getOperand(1) != SplatVal) | ||||
18346 | return SDValue(); | ||||
18347 | |||||
18348 | // Check insert element index. | ||||
18349 | ConstantSDNode *CIndex = dyn_cast<ConstantSDNode>(StVal.getOperand(2)); | ||||
18350 | if (!CIndex) | ||||
18351 | return SDValue(); | ||||
18352 | uint64_t IndexVal = CIndex->getZExtValue(); | ||||
18353 | if (IndexVal >= NumVecElts) | ||||
18354 | return SDValue(); | ||||
18355 | IndexNotInserted.reset(IndexVal); | ||||
18356 | |||||
18357 | StVal = StVal.getOperand(0); | ||||
18358 | } | ||||
18359 | // Check that all vector element locations were inserted to. | ||||
18360 | if (IndexNotInserted.any()) | ||||
18361 | return SDValue(); | ||||
18362 | |||||
18363 | return splitStoreSplat(DAG, St, SplatVal, NumVecElts); | ||||
18364 | } | ||||
18365 | |||||
18366 | static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, | ||||
18367 | SelectionDAG &DAG, | ||||
18368 | const AArch64Subtarget *Subtarget) { | ||||
18369 | |||||
18370 | StoreSDNode *S = cast<StoreSDNode>(N); | ||||
18371 | if (S->isVolatile() || S->isIndexed()) | ||||
18372 | return SDValue(); | ||||
18373 | |||||
18374 | SDValue StVal = S->getValue(); | ||||
18375 | EVT VT = StVal.getValueType(); | ||||
18376 | |||||
18377 | if (!VT.isFixedLengthVector()) | ||||
18378 | return SDValue(); | ||||
18379 | |||||
18380 | // If we get a splat of zeros, convert this vector store to a store of | ||||
18381 | // scalars. They will be merged into store pairs of xzr thereby removing one | ||||
18382 | // instruction and one register. | ||||
18383 | if (SDValue ReplacedZeroSplat = replaceZeroVectorStore(DAG, *S)) | ||||
18384 | return ReplacedZeroSplat; | ||||
18385 | |||||
18386 | // FIXME: The logic for deciding if an unaligned store should be split should | ||||
18387 | // be included in TLI.allowsMisalignedMemoryAccesses(), and there should be | ||||
18388 | // a call to that function here. | ||||
18389 | |||||
18390 | if (!Subtarget->isMisaligned128StoreSlow()) | ||||
18391 | return SDValue(); | ||||
18392 | |||||
18393 | // Don't split at -Oz. | ||||
18394 | if (DAG.getMachineFunction().getFunction().hasMinSize()) | ||||
18395 | return SDValue(); | ||||
18396 | |||||
18397 | // Don't split v2i64 vectors. Memcpy lowering produces those and splitting | ||||
18398 | // those up regresses performance on micro-benchmarks and olden/bh. | ||||
18399 | if (VT.getVectorNumElements() < 2 || VT == MVT::v2i64) | ||||
18400 | return SDValue(); | ||||
18401 | |||||
18402 | // Split unaligned 16B stores. They are terrible for performance. | ||||
18403 | // Don't split stores with alignment of 1 or 2. Code that uses clang vector | ||||
18404 | // extensions can use this to mark that it does not want splitting to happen | ||||
18405 | // (by underspecifying alignment to be 1 or 2). Furthermore, the chance of | ||||
18406 | // eliminating alignment hazards is only 1 in 8 for alignment of 2. | ||||
18407 | if (VT.getSizeInBits() != 128 || S->getAlign() >= Align(16) || | ||||
18408 | S->getAlign() <= Align(2)) | ||||
18409 | return SDValue(); | ||||
18410 | |||||
18411 | // If we get a splat of a scalar convert this vector store to a store of | ||||
18412 | // scalars. They will be merged into store pairs thereby removing two | ||||
18413 | // instructions. | ||||
18414 | if (SDValue ReplacedSplat = replaceSplatVectorStore(DAG, *S)) | ||||
18415 | return ReplacedSplat; | ||||
18416 | |||||
18417 | SDLoc DL(S); | ||||
18418 | |||||
18419 | // Split VT into two. | ||||
18420 | EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); | ||||
18421 | unsigned NumElts = HalfVT.getVectorNumElements(); | ||||
18422 | SDValue SubVector0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal, | ||||
18423 | DAG.getConstant(0, DL, MVT::i64)); | ||||
18424 | SDValue SubVector1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal, | ||||
18425 | DAG.getConstant(NumElts, DL, MVT::i64)); | ||||
18426 | SDValue BasePtr = S->getBasePtr(); | ||||
18427 | SDValue NewST1 = | ||||
18428 | DAG.getStore(S->getChain(), DL, SubVector0, BasePtr, S->getPointerInfo(), | ||||
18429 | S->getAlign(), S->getMemOperand()->getFlags()); | ||||
18430 | SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, | ||||
18431 | DAG.getConstant(8, DL, MVT::i64)); | ||||
18432 | return DAG.getStore(NewST1.getValue(0), DL, SubVector1, OffsetPtr, | ||||
18433 | S->getPointerInfo(), S->getAlign(), | ||||
18434 | S->getMemOperand()->getFlags()); | ||||
18435 | } | ||||
18436 | |||||
18437 | static SDValue performSpliceCombine(SDNode *N, SelectionDAG &DAG) { | ||||
18438 | assert(N->getOpcode() == AArch64ISD::SPLICE && "Unexepected Opcode!")(static_cast <bool> (N->getOpcode() == AArch64ISD::SPLICE && "Unexepected Opcode!") ? void (0) : __assert_fail ("N->getOpcode() == AArch64ISD::SPLICE && \"Unexepected Opcode!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18438, __extension__ __PRETTY_FUNCTION__)); | ||||
18439 | |||||
18440 | // splice(pg, op1, undef) -> op1 | ||||
18441 | if (N->getOperand(2).isUndef()) | ||||
18442 | return N->getOperand(1); | ||||
18443 | |||||
18444 | return SDValue(); | ||||
18445 | } | ||||
18446 | |||||
18447 | static SDValue performUnpackCombine(SDNode *N, SelectionDAG &DAG, | ||||
18448 | const AArch64Subtarget *Subtarget) { | ||||
18449 | assert((N->getOpcode() == AArch64ISD::UUNPKHI ||(static_cast <bool> ((N->getOpcode() == AArch64ISD:: UUNPKHI || N->getOpcode() == AArch64ISD::UUNPKLO) && "Unexpected Opcode!") ? void (0) : __assert_fail ("(N->getOpcode() == AArch64ISD::UUNPKHI || N->getOpcode() == AArch64ISD::UUNPKLO) && \"Unexpected Opcode!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18451, __extension__ __PRETTY_FUNCTION__)) | ||||
18450 | N->getOpcode() == AArch64ISD::UUNPKLO) &&(static_cast <bool> ((N->getOpcode() == AArch64ISD:: UUNPKHI || N->getOpcode() == AArch64ISD::UUNPKLO) && "Unexpected Opcode!") ? void (0) : __assert_fail ("(N->getOpcode() == AArch64ISD::UUNPKHI || N->getOpcode() == AArch64ISD::UUNPKLO) && \"Unexpected Opcode!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18451, __extension__ __PRETTY_FUNCTION__)) | ||||
18451 | "Unexpected Opcode!")(static_cast <bool> ((N->getOpcode() == AArch64ISD:: UUNPKHI || N->getOpcode() == AArch64ISD::UUNPKLO) && "Unexpected Opcode!") ? void (0) : __assert_fail ("(N->getOpcode() == AArch64ISD::UUNPKHI || N->getOpcode() == AArch64ISD::UUNPKLO) && \"Unexpected Opcode!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18451, __extension__ __PRETTY_FUNCTION__)); | ||||
18452 | |||||
18453 | // uunpklo/hi undef -> undef | ||||
18454 | if (N->getOperand(0).isUndef()) | ||||
18455 | return DAG.getUNDEF(N->getValueType(0)); | ||||
18456 | |||||
18457 | // If this is a masked load followed by an UUNPKLO, fold this into a masked | ||||
18458 | // extending load. We can do this even if this is already a masked | ||||
18459 | // {z,}extload. | ||||
18460 | if (N->getOperand(0).getOpcode() == ISD::MLOAD && | ||||
18461 | N->getOpcode() == AArch64ISD::UUNPKLO) { | ||||
18462 | MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N->getOperand(0)); | ||||
18463 | SDValue Mask = MLD->getMask(); | ||||
18464 | SDLoc DL(N); | ||||
18465 | |||||
18466 | if (MLD->isUnindexed() && MLD->getExtensionType() != ISD::SEXTLOAD && | ||||
18467 | SDValue(MLD, 0).hasOneUse() && Mask->getOpcode() == AArch64ISD::PTRUE && | ||||
18468 | (MLD->getPassThru()->isUndef() || | ||||
18469 | isZerosVector(MLD->getPassThru().getNode()))) { | ||||
18470 | unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits(); | ||||
18471 | unsigned PgPattern = Mask->getConstantOperandVal(0); | ||||
18472 | EVT VT = N->getValueType(0); | ||||
18473 | |||||
18474 | // Ensure we can double the size of the predicate pattern | ||||
18475 | unsigned NumElts = getNumElementsFromSVEPredPattern(PgPattern); | ||||
18476 | if (NumElts && | ||||
18477 | NumElts * VT.getVectorElementType().getSizeInBits() <= MinSVESize) { | ||||
18478 | Mask = | ||||
18479 | getPTrue(DAG, DL, VT.changeVectorElementType(MVT::i1), PgPattern); | ||||
18480 | SDValue PassThru = DAG.getConstant(0, DL, VT); | ||||
18481 | SDValue NewLoad = DAG.getMaskedLoad( | ||||
18482 | VT, DL, MLD->getChain(), MLD->getBasePtr(), MLD->getOffset(), Mask, | ||||
18483 | PassThru, MLD->getMemoryVT(), MLD->getMemOperand(), | ||||
18484 | MLD->getAddressingMode(), ISD::ZEXTLOAD); | ||||
18485 | |||||
18486 | DAG.ReplaceAllUsesOfValueWith(SDValue(MLD, 1), NewLoad.getValue(1)); | ||||
18487 | |||||
18488 | return NewLoad; | ||||
18489 | } | ||||
18490 | } | ||||
18491 | } | ||||
18492 | |||||
18493 | return SDValue(); | ||||
18494 | } | ||||
18495 | |||||
18496 | static SDValue performUzpCombine(SDNode *N, SelectionDAG &DAG) { | ||||
18497 | SDLoc DL(N); | ||||
18498 | SDValue Op0 = N->getOperand(0); | ||||
18499 | SDValue Op1 = N->getOperand(1); | ||||
18500 | EVT ResVT = N->getValueType(0); | ||||
18501 | |||||
18502 | // uzp1(x, undef) -> concat(truncate(x), undef) | ||||
18503 | if (Op1.getOpcode() == ISD::UNDEF) { | ||||
18504 | EVT BCVT = MVT::Other, HalfVT = MVT::Other; | ||||
18505 | switch (ResVT.getSimpleVT().SimpleTy) { | ||||
18506 | default: | ||||
18507 | break; | ||||
18508 | case MVT::v16i8: | ||||
18509 | BCVT = MVT::v8i16; | ||||
18510 | HalfVT = MVT::v8i8; | ||||
18511 | break; | ||||
18512 | case MVT::v8i16: | ||||
18513 | BCVT = MVT::v4i32; | ||||
18514 | HalfVT = MVT::v4i16; | ||||
18515 | break; | ||||
18516 | case MVT::v4i32: | ||||
18517 | BCVT = MVT::v2i64; | ||||
18518 | HalfVT = MVT::v2i32; | ||||
18519 | break; | ||||
18520 | } | ||||
18521 | if (BCVT != MVT::Other) { | ||||
18522 | SDValue BC = DAG.getBitcast(BCVT, Op0); | ||||
18523 | SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, HalfVT, BC); | ||||
18524 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResVT, Trunc, | ||||
18525 | DAG.getUNDEF(HalfVT)); | ||||
18526 | } | ||||
18527 | } | ||||
18528 | |||||
18529 | // uzp1(unpklo(uzp1(x, y)), z) => uzp1(x, z) | ||||
18530 | if (Op0.getOpcode() == AArch64ISD::UUNPKLO) { | ||||
18531 | if (Op0.getOperand(0).getOpcode() == AArch64ISD::UZP1) { | ||||
18532 | SDValue X = Op0.getOperand(0).getOperand(0); | ||||
18533 | return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, X, Op1); | ||||
18534 | } | ||||
18535 | } | ||||
18536 | |||||
18537 | // uzp1(x, unpkhi(uzp1(y, z))) => uzp1(x, z) | ||||
18538 | if (Op1.getOpcode() == AArch64ISD::UUNPKHI) { | ||||
18539 | if (Op1.getOperand(0).getOpcode() == AArch64ISD::UZP1) { | ||||
18540 | SDValue Z = Op1.getOperand(0).getOperand(1); | ||||
18541 | return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, Op0, Z); | ||||
18542 | } | ||||
18543 | } | ||||
18544 | |||||
18545 | // uzp1(xtn x, xtn y) -> xtn(uzp1 (x, y)) | ||||
18546 | // Only implemented on little-endian subtargets. | ||||
18547 | bool IsLittleEndian = DAG.getDataLayout().isLittleEndian(); | ||||
18548 | |||||
18549 | // This optimization only works on little endian. | ||||
18550 | if (!IsLittleEndian) | ||||
18551 | return SDValue(); | ||||
18552 | |||||
18553 | if (ResVT != MVT::v2i32 && ResVT != MVT::v4i16 && ResVT != MVT::v8i8) | ||||
18554 | return SDValue(); | ||||
18555 | |||||
18556 | auto getSourceOp = [](SDValue Operand) -> SDValue { | ||||
18557 | const unsigned Opcode = Operand.getOpcode(); | ||||
18558 | if (Opcode == ISD::TRUNCATE) | ||||
18559 | return Operand->getOperand(0); | ||||
18560 | if (Opcode == ISD::BITCAST && | ||||
18561 | Operand->getOperand(0).getOpcode() == ISD::TRUNCATE) | ||||
18562 | return Operand->getOperand(0)->getOperand(0); | ||||
18563 | return SDValue(); | ||||
18564 | }; | ||||
18565 | |||||
18566 | SDValue SourceOp0 = getSourceOp(Op0); | ||||
18567 | SDValue SourceOp1 = getSourceOp(Op1); | ||||
18568 | |||||
18569 | if (!SourceOp0 || !SourceOp1) | ||||
18570 | return SDValue(); | ||||
18571 | |||||
18572 | if (SourceOp0.getValueType() != SourceOp1.getValueType() || | ||||
18573 | !SourceOp0.getValueType().isSimple()) | ||||
18574 | return SDValue(); | ||||
18575 | |||||
18576 | EVT ResultTy; | ||||
18577 | |||||
18578 | switch (SourceOp0.getSimpleValueType().SimpleTy) { | ||||
18579 | case MVT::v2i64: | ||||
18580 | ResultTy = MVT::v4i32; | ||||
18581 | break; | ||||
18582 | case MVT::v4i32: | ||||
18583 | ResultTy = MVT::v8i16; | ||||
18584 | break; | ||||
18585 | case MVT::v8i16: | ||||
18586 | ResultTy = MVT::v16i8; | ||||
18587 | break; | ||||
18588 | default: | ||||
18589 | return SDValue(); | ||||
18590 | } | ||||
18591 | |||||
18592 | SDValue UzpOp0 = DAG.getNode(ISD::BITCAST, DL, ResultTy, SourceOp0); | ||||
18593 | SDValue UzpOp1 = DAG.getNode(ISD::BITCAST, DL, ResultTy, SourceOp1); | ||||
18594 | SDValue UzpResult = | ||||
18595 | DAG.getNode(AArch64ISD::UZP1, DL, UzpOp0.getValueType(), UzpOp0, UzpOp1); | ||||
18596 | |||||
18597 | EVT BitcastResultTy; | ||||
18598 | |||||
18599 | switch (ResVT.getSimpleVT().SimpleTy) { | ||||
18600 | case MVT::v2i32: | ||||
18601 | BitcastResultTy = MVT::v2i64; | ||||
18602 | break; | ||||
18603 | case MVT::v4i16: | ||||
18604 | BitcastResultTy = MVT::v4i32; | ||||
18605 | break; | ||||
18606 | case MVT::v8i8: | ||||
18607 | BitcastResultTy = MVT::v8i16; | ||||
18608 | break; | ||||
18609 | default: | ||||
18610 | llvm_unreachable("Should be one of {v2i32, v4i16, v8i8}")::llvm::llvm_unreachable_internal("Should be one of {v2i32, v4i16, v8i8}" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18610); | ||||
18611 | } | ||||
18612 | |||||
18613 | return DAG.getNode(ISD::TRUNCATE, DL, ResVT, | ||||
18614 | DAG.getNode(ISD::BITCAST, DL, BitcastResultTy, UzpResult)); | ||||
18615 | } | ||||
18616 | |||||
18617 | static SDValue performGLD1Combine(SDNode *N, SelectionDAG &DAG) { | ||||
18618 | unsigned Opc = N->getOpcode(); | ||||
18619 | |||||
18620 | assert(((Opc >= AArch64ISD::GLD1_MERGE_ZERO && // unsigned gather loads(static_cast <bool> (((Opc >= AArch64ISD::GLD1_MERGE_ZERO && Opc <= AArch64ISD::GLD1_IMM_MERGE_ZERO) || (Opc >= AArch64ISD::GLD1S_MERGE_ZERO && Opc <= AArch64ISD ::GLD1S_IMM_MERGE_ZERO)) && "Invalid opcode.") ? void (0) : __assert_fail ("((Opc >= AArch64ISD::GLD1_MERGE_ZERO && Opc <= AArch64ISD::GLD1_IMM_MERGE_ZERO) || (Opc >= AArch64ISD::GLD1S_MERGE_ZERO && Opc <= AArch64ISD::GLD1S_IMM_MERGE_ZERO)) && \"Invalid opcode.\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18624, __extension__ __PRETTY_FUNCTION__)) | ||||
18621 | Opc <= AArch64ISD::GLD1_IMM_MERGE_ZERO) ||(static_cast <bool> (((Opc >= AArch64ISD::GLD1_MERGE_ZERO && Opc <= AArch64ISD::GLD1_IMM_MERGE_ZERO) || (Opc >= AArch64ISD::GLD1S_MERGE_ZERO && Opc <= AArch64ISD ::GLD1S_IMM_MERGE_ZERO)) && "Invalid opcode.") ? void (0) : __assert_fail ("((Opc >= AArch64ISD::GLD1_MERGE_ZERO && Opc <= AArch64ISD::GLD1_IMM_MERGE_ZERO) || (Opc >= AArch64ISD::GLD1S_MERGE_ZERO && Opc <= AArch64ISD::GLD1S_IMM_MERGE_ZERO)) && \"Invalid opcode.\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18624, __extension__ __PRETTY_FUNCTION__)) | ||||
18622 | (Opc >= AArch64ISD::GLD1S_MERGE_ZERO && // signed gather loads(static_cast <bool> (((Opc >= AArch64ISD::GLD1_MERGE_ZERO && Opc <= AArch64ISD::GLD1_IMM_MERGE_ZERO) || (Opc >= AArch64ISD::GLD1S_MERGE_ZERO && Opc <= AArch64ISD ::GLD1S_IMM_MERGE_ZERO)) && "Invalid opcode.") ? void (0) : __assert_fail ("((Opc >= AArch64ISD::GLD1_MERGE_ZERO && Opc <= AArch64ISD::GLD1_IMM_MERGE_ZERO) || (Opc >= AArch64ISD::GLD1S_MERGE_ZERO && Opc <= AArch64ISD::GLD1S_IMM_MERGE_ZERO)) && \"Invalid opcode.\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18624, __extension__ __PRETTY_FUNCTION__)) | ||||
18623 | Opc <= AArch64ISD::GLD1S_IMM_MERGE_ZERO)) &&(static_cast <bool> (((Opc >= AArch64ISD::GLD1_MERGE_ZERO && Opc <= AArch64ISD::GLD1_IMM_MERGE_ZERO) || (Opc >= AArch64ISD::GLD1S_MERGE_ZERO && Opc <= AArch64ISD ::GLD1S_IMM_MERGE_ZERO)) && "Invalid opcode.") ? void (0) : __assert_fail ("((Opc >= AArch64ISD::GLD1_MERGE_ZERO && Opc <= AArch64ISD::GLD1_IMM_MERGE_ZERO) || (Opc >= AArch64ISD::GLD1S_MERGE_ZERO && Opc <= AArch64ISD::GLD1S_IMM_MERGE_ZERO)) && \"Invalid opcode.\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18624, __extension__ __PRETTY_FUNCTION__)) | ||||
18624 | "Invalid opcode.")(static_cast <bool> (((Opc >= AArch64ISD::GLD1_MERGE_ZERO && Opc <= AArch64ISD::GLD1_IMM_MERGE_ZERO) || (Opc >= AArch64ISD::GLD1S_MERGE_ZERO && Opc <= AArch64ISD ::GLD1S_IMM_MERGE_ZERO)) && "Invalid opcode.") ? void (0) : __assert_fail ("((Opc >= AArch64ISD::GLD1_MERGE_ZERO && Opc <= AArch64ISD::GLD1_IMM_MERGE_ZERO) || (Opc >= AArch64ISD::GLD1S_MERGE_ZERO && Opc <= AArch64ISD::GLD1S_IMM_MERGE_ZERO)) && \"Invalid opcode.\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18624, __extension__ __PRETTY_FUNCTION__)); | ||||
18625 | |||||
18626 | const bool Scaled = Opc == AArch64ISD::GLD1_SCALED_MERGE_ZERO || | ||||
18627 | Opc == AArch64ISD::GLD1S_SCALED_MERGE_ZERO; | ||||
18628 | const bool Signed = Opc == AArch64ISD::GLD1S_MERGE_ZERO || | ||||
18629 | Opc == AArch64ISD::GLD1S_SCALED_MERGE_ZERO; | ||||
18630 | const bool Extended = Opc == AArch64ISD::GLD1_SXTW_MERGE_ZERO || | ||||
18631 | Opc == AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO || | ||||
18632 | Opc == AArch64ISD::GLD1_UXTW_MERGE_ZERO || | ||||
18633 | Opc == AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO; | ||||
18634 | |||||
18635 | SDLoc DL(N); | ||||
18636 | SDValue Chain = N->getOperand(0); | ||||
18637 | SDValue Pg = N->getOperand(1); | ||||
18638 | SDValue Base = N->getOperand(2); | ||||
18639 | SDValue Offset = N->getOperand(3); | ||||
18640 | SDValue Ty = N->getOperand(4); | ||||
18641 | |||||
18642 | EVT ResVT = N->getValueType(0); | ||||
18643 | |||||
18644 | const auto OffsetOpc = Offset.getOpcode(); | ||||
18645 | const bool OffsetIsZExt = | ||||
18646 | OffsetOpc == AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU; | ||||
18647 | const bool OffsetIsSExt = | ||||
18648 | OffsetOpc == AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU; | ||||
18649 | |||||
18650 | // Fold sign/zero extensions of vector offsets into GLD1 nodes where possible. | ||||
18651 | if (!Extended && (OffsetIsSExt || OffsetIsZExt)) { | ||||
18652 | SDValue ExtPg = Offset.getOperand(0); | ||||
18653 | VTSDNode *ExtFrom = cast<VTSDNode>(Offset.getOperand(2).getNode()); | ||||
18654 | EVT ExtFromEVT = ExtFrom->getVT().getVectorElementType(); | ||||
18655 | |||||
18656 | // If the predicate for the sign- or zero-extended offset is the | ||||
18657 | // same as the predicate used for this load and the sign-/zero-extension | ||||
18658 | // was from a 32-bits... | ||||
18659 | if (ExtPg == Pg && ExtFromEVT == MVT::i32) { | ||||
18660 | SDValue UnextendedOffset = Offset.getOperand(1); | ||||
18661 | |||||
18662 | unsigned NewOpc = getGatherVecOpcode(Scaled, OffsetIsSExt, true); | ||||
18663 | if (Signed) | ||||
18664 | NewOpc = getSignExtendedGatherOpcode(NewOpc); | ||||
18665 | |||||
18666 | return DAG.getNode(NewOpc, DL, {ResVT, MVT::Other}, | ||||
18667 | {Chain, Pg, Base, UnextendedOffset, Ty}); | ||||
18668 | } | ||||
18669 | } | ||||
18670 | |||||
18671 | return SDValue(); | ||||
18672 | } | ||||
18673 | |||||
18674 | /// Optimize a vector shift instruction and its operand if shifted out | ||||
18675 | /// bits are not used. | ||||
18676 | static SDValue performVectorShiftCombine(SDNode *N, | ||||
18677 | const AArch64TargetLowering &TLI, | ||||
18678 | TargetLowering::DAGCombinerInfo &DCI) { | ||||
18679 | assert(N->getOpcode() == AArch64ISD::VASHR ||(static_cast <bool> (N->getOpcode() == AArch64ISD::VASHR || N->getOpcode() == AArch64ISD::VLSHR) ? void (0) : __assert_fail ("N->getOpcode() == AArch64ISD::VASHR || N->getOpcode() == AArch64ISD::VLSHR" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18680, __extension__ __PRETTY_FUNCTION__)) | ||||
18680 | N->getOpcode() == AArch64ISD::VLSHR)(static_cast <bool> (N->getOpcode() == AArch64ISD::VASHR || N->getOpcode() == AArch64ISD::VLSHR) ? void (0) : __assert_fail ("N->getOpcode() == AArch64ISD::VASHR || N->getOpcode() == AArch64ISD::VLSHR" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18680, __extension__ __PRETTY_FUNCTION__)); | ||||
18681 | |||||
18682 | SDValue Op = N->getOperand(0); | ||||
18683 | unsigned OpScalarSize = Op.getScalarValueSizeInBits(); | ||||
18684 | |||||
18685 | unsigned ShiftImm = N->getConstantOperandVal(1); | ||||
18686 | assert(OpScalarSize > ShiftImm && "Invalid shift imm")(static_cast <bool> (OpScalarSize > ShiftImm && "Invalid shift imm") ? void (0) : __assert_fail ("OpScalarSize > ShiftImm && \"Invalid shift imm\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18686, __extension__ __PRETTY_FUNCTION__)); | ||||
18687 | |||||
18688 | APInt ShiftedOutBits = APInt::getLowBitsSet(OpScalarSize, ShiftImm); | ||||
18689 | APInt DemandedMask = ~ShiftedOutBits; | ||||
18690 | |||||
18691 | if (TLI.SimplifyDemandedBits(Op, DemandedMask, DCI)) | ||||
18692 | return SDValue(N, 0); | ||||
18693 | |||||
18694 | return SDValue(); | ||||
18695 | } | ||||
18696 | |||||
18697 | static SDValue performSunpkloCombine(SDNode *N, SelectionDAG &DAG) { | ||||
18698 | // sunpklo(sext(pred)) -> sext(extract_low_half(pred)) | ||||
18699 | // This transform works in partnership with performSetCCPunpkCombine to | ||||
18700 | // remove unnecessary transfer of predicates into standard registers and back | ||||
18701 | if (N->getOperand(0).getOpcode() == ISD::SIGN_EXTEND && | ||||
18702 | N->getOperand(0)->getOperand(0)->getValueType(0).getScalarType() == | ||||
18703 | MVT::i1) { | ||||
18704 | SDValue CC = N->getOperand(0)->getOperand(0); | ||||
18705 | auto VT = CC->getValueType(0).getHalfNumVectorElementsVT(*DAG.getContext()); | ||||
18706 | SDValue Unpk = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT, CC, | ||||
18707 | DAG.getVectorIdxConstant(0, SDLoc(N))); | ||||
18708 | return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), N->getValueType(0), Unpk); | ||||
18709 | } | ||||
18710 | |||||
18711 | return SDValue(); | ||||
18712 | } | ||||
18713 | |||||
18714 | /// Target-specific DAG combine function for post-increment LD1 (lane) and | ||||
18715 | /// post-increment LD1R. | ||||
18716 | static SDValue performPostLD1Combine(SDNode *N, | ||||
18717 | TargetLowering::DAGCombinerInfo &DCI, | ||||
18718 | bool IsLaneOp) { | ||||
18719 | if (DCI.isBeforeLegalizeOps()) | ||||
18720 | return SDValue(); | ||||
18721 | |||||
18722 | SelectionDAG &DAG = DCI.DAG; | ||||
18723 | EVT VT = N->getValueType(0); | ||||
18724 | |||||
18725 | if (!VT.is128BitVector() && !VT.is64BitVector()) | ||||
18726 | return SDValue(); | ||||
18727 | |||||
18728 | unsigned LoadIdx = IsLaneOp ? 1 : 0; | ||||
18729 | SDNode *LD = N->getOperand(LoadIdx).getNode(); | ||||
18730 | // If it is not LOAD, can not do such combine. | ||||
18731 | if (LD->getOpcode() != ISD::LOAD) | ||||
18732 | return SDValue(); | ||||
18733 | |||||
18734 | // The vector lane must be a constant in the LD1LANE opcode. | ||||
18735 | SDValue Lane; | ||||
18736 | if (IsLaneOp) { | ||||
18737 | Lane = N->getOperand(2); | ||||
18738 | auto *LaneC = dyn_cast<ConstantSDNode>(Lane); | ||||
18739 | if (!LaneC || LaneC->getZExtValue() >= VT.getVectorNumElements()) | ||||
18740 | return SDValue(); | ||||
18741 | } | ||||
18742 | |||||
18743 | LoadSDNode *LoadSDN = cast<LoadSDNode>(LD); | ||||
18744 | EVT MemVT = LoadSDN->getMemoryVT(); | ||||
18745 | // Check if memory operand is the same type as the vector element. | ||||
18746 | if (MemVT != VT.getVectorElementType()) | ||||
18747 | return SDValue(); | ||||
18748 | |||||
18749 | // Check if there are other uses. If so, do not combine as it will introduce | ||||
18750 | // an extra load. | ||||
18751 | for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); UI != UE; | ||||
18752 | ++UI) { | ||||
18753 | if (UI.getUse().getResNo() == 1) // Ignore uses of the chain result. | ||||
18754 | continue; | ||||
18755 | if (*UI != N) | ||||
18756 | return SDValue(); | ||||
18757 | } | ||||
18758 | |||||
18759 | SDValue Addr = LD->getOperand(1); | ||||
18760 | SDValue Vector = N->getOperand(0); | ||||
18761 | // Search for a use of the address operand that is an increment. | ||||
18762 | for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), UE = | ||||
18763 | Addr.getNode()->use_end(); UI != UE; ++UI) { | ||||
18764 | SDNode *User = *UI; | ||||
18765 | if (User->getOpcode() != ISD::ADD | ||||
18766 | || UI.getUse().getResNo() != Addr.getResNo()) | ||||
18767 | continue; | ||||
18768 | |||||
18769 | // If the increment is a constant, it must match the memory ref size. | ||||
18770 | SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); | ||||
18771 | if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { | ||||
18772 | uint32_t IncVal = CInc->getZExtValue(); | ||||
18773 | unsigned NumBytes = VT.getScalarSizeInBits() / 8; | ||||
18774 | if (IncVal != NumBytes) | ||||
18775 | continue; | ||||
18776 | Inc = DAG.getRegister(AArch64::XZR, MVT::i64); | ||||
18777 | } | ||||
18778 | |||||
18779 | // To avoid cycle construction make sure that neither the load nor the add | ||||
18780 | // are predecessors to each other or the Vector. | ||||
18781 | SmallPtrSet<const SDNode *, 32> Visited; | ||||
18782 | SmallVector<const SDNode *, 16> Worklist; | ||||
18783 | Visited.insert(Addr.getNode()); | ||||
18784 | Worklist.push_back(User); | ||||
18785 | Worklist.push_back(LD); | ||||
18786 | Worklist.push_back(Vector.getNode()); | ||||
18787 | if (SDNode::hasPredecessorHelper(LD, Visited, Worklist) || | ||||
18788 | SDNode::hasPredecessorHelper(User, Visited, Worklist)) | ||||
18789 | continue; | ||||
18790 | |||||
18791 | SmallVector<SDValue, 8> Ops; | ||||
18792 | Ops.push_back(LD->getOperand(0)); // Chain | ||||
18793 | if (IsLaneOp) { | ||||
18794 | Ops.push_back(Vector); // The vector to be inserted | ||||
18795 | Ops.push_back(Lane); // The lane to be inserted in the vector | ||||
18796 | } | ||||
18797 | Ops.push_back(Addr); | ||||
18798 | Ops.push_back(Inc); | ||||
18799 | |||||
18800 | EVT Tys[3] = { VT, MVT::i64, MVT::Other }; | ||||
18801 | SDVTList SDTys = DAG.getVTList(Tys); | ||||
18802 | unsigned NewOp = IsLaneOp ? AArch64ISD::LD1LANEpost : AArch64ISD::LD1DUPpost; | ||||
18803 | SDValue UpdN = DAG.getMemIntrinsicNode(NewOp, SDLoc(N), SDTys, Ops, | ||||
18804 | MemVT, | ||||
18805 | LoadSDN->getMemOperand()); | ||||
18806 | |||||
18807 | // Update the uses. | ||||
18808 | SDValue NewResults[] = { | ||||
18809 | SDValue(LD, 0), // The result of load | ||||
18810 | SDValue(UpdN.getNode(), 2) // Chain | ||||
18811 | }; | ||||
18812 | DCI.CombineTo(LD, NewResults); | ||||
18813 | DCI.CombineTo(N, SDValue(UpdN.getNode(), 0)); // Dup/Inserted Result | ||||
18814 | DCI.CombineTo(User, SDValue(UpdN.getNode(), 1)); // Write back register | ||||
18815 | |||||
18816 | break; | ||||
18817 | } | ||||
18818 | return SDValue(); | ||||
18819 | } | ||||
18820 | |||||
18821 | /// Simplify ``Addr`` given that the top byte of it is ignored by HW during | ||||
18822 | /// address translation. | ||||
18823 | static bool performTBISimplification(SDValue Addr, | ||||
18824 | TargetLowering::DAGCombinerInfo &DCI, | ||||
18825 | SelectionDAG &DAG) { | ||||
18826 | APInt DemandedMask = APInt::getLowBitsSet(64, 56); | ||||
18827 | KnownBits Known; | ||||
18828 | TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), | ||||
18829 | !DCI.isBeforeLegalizeOps()); | ||||
18830 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | ||||
18831 | if (TLI.SimplifyDemandedBits(Addr, DemandedMask, Known, TLO)) { | ||||
18832 | DCI.CommitTargetLoweringOpt(TLO); | ||||
18833 | return true; | ||||
18834 | } | ||||
18835 | return false; | ||||
18836 | } | ||||
18837 | |||||
18838 | static SDValue foldTruncStoreOfExt(SelectionDAG &DAG, SDNode *N) { | ||||
18839 | assert((N->getOpcode() == ISD::STORE || N->getOpcode() == ISD::MSTORE) &&(static_cast <bool> ((N->getOpcode() == ISD::STORE || N->getOpcode() == ISD::MSTORE) && "Expected STORE dag node in input!" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::STORE || N->getOpcode() == ISD::MSTORE) && \"Expected STORE dag node in input!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18840, __extension__ __PRETTY_FUNCTION__)) | ||||
18840 | "Expected STORE dag node in input!")(static_cast <bool> ((N->getOpcode() == ISD::STORE || N->getOpcode() == ISD::MSTORE) && "Expected STORE dag node in input!" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::STORE || N->getOpcode() == ISD::MSTORE) && \"Expected STORE dag node in input!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 18840, __extension__ __PRETTY_FUNCTION__)); | ||||
18841 | |||||
18842 | if (auto Store = dyn_cast<StoreSDNode>(N)) { | ||||
18843 | if (!Store->isTruncatingStore() || Store->isIndexed()) | ||||
18844 | return SDValue(); | ||||
18845 | SDValue Ext = Store->getValue(); | ||||
18846 | auto ExtOpCode = Ext.getOpcode(); | ||||
18847 | if (ExtOpCode != ISD::ZERO_EXTEND && ExtOpCode != ISD::SIGN_EXTEND && | ||||
18848 | ExtOpCode != ISD::ANY_EXTEND) | ||||
18849 | return SDValue(); | ||||
18850 | SDValue Orig = Ext->getOperand(0); | ||||
18851 | if (Store->getMemoryVT() != Orig.getValueType()) | ||||
18852 | return SDValue(); | ||||
18853 | return DAG.getStore(Store->getChain(), SDLoc(Store), Orig, | ||||
18854 | Store->getBasePtr(), Store->getMemOperand()); | ||||
18855 | } | ||||
18856 | |||||
18857 | return SDValue(); | ||||
18858 | } | ||||
18859 | |||||
18860 | // Perform TBI simplification if supported by the target and try to break up | ||||
18861 | // nontemporal loads larger than 256-bits loads for odd types so LDNPQ 256-bit | ||||
18862 | // load instructions can be selected. | ||||
18863 | static SDValue performLOADCombine(SDNode *N, | ||||
18864 | TargetLowering::DAGCombinerInfo &DCI, | ||||
18865 | SelectionDAG &DAG, | ||||
18866 | const AArch64Subtarget *Subtarget) { | ||||
18867 | if (Subtarget->supportsAddressTopByteIgnored()) | ||||
18868 | performTBISimplification(N->getOperand(1), DCI, DAG); | ||||
18869 | |||||
18870 | LoadSDNode *LD = cast<LoadSDNode>(N); | ||||
18871 | EVT MemVT = LD->getMemoryVT(); | ||||
18872 | if (LD->isVolatile() || !LD->isNonTemporal() || !Subtarget->isLittleEndian()) | ||||
18873 | return SDValue(N, 0); | ||||
18874 | |||||
18875 | if (MemVT.isScalableVector() || MemVT.getSizeInBits() <= 256 || | ||||
18876 | MemVT.getSizeInBits() % 256 == 0 || | ||||
18877 | 256 % MemVT.getScalarSizeInBits() != 0) | ||||
18878 | return SDValue(N, 0); | ||||
18879 | |||||
18880 | SDLoc DL(LD); | ||||
18881 | SDValue Chain = LD->getChain(); | ||||
18882 | SDValue BasePtr = LD->getBasePtr(); | ||||
18883 | SDNodeFlags Flags = LD->getFlags(); | ||||
18884 | SmallVector<SDValue, 4> LoadOps; | ||||
18885 | SmallVector<SDValue, 4> LoadOpsChain; | ||||
18886 | // Replace any non temporal load over 256-bit with a series of 256 bit loads | ||||
18887 | // and a scalar/vector load less than 256. This way we can utilize 256-bit | ||||
18888 | // loads and reduce the amount of load instructions generated. | ||||
18889 | MVT NewVT = | ||||
18890 | MVT::getVectorVT(MemVT.getVectorElementType().getSimpleVT(), | ||||
18891 | 256 / MemVT.getVectorElementType().getSizeInBits()); | ||||
18892 | unsigned Num256Loads = MemVT.getSizeInBits() / 256; | ||||
18893 | // Create all 256-bit loads starting from offset 0 and up to Num256Loads-1*32. | ||||
18894 | for (unsigned I = 0; I < Num256Loads; I++) { | ||||
18895 | unsigned PtrOffset = I * 32; | ||||
18896 | SDValue NewPtr = DAG.getMemBasePlusOffset( | ||||
18897 | BasePtr, TypeSize::Fixed(PtrOffset), DL, Flags); | ||||
18898 | Align NewAlign = commonAlignment(LD->getAlign(), PtrOffset); | ||||
18899 | SDValue NewLoad = DAG.getLoad( | ||||
18900 | NewVT, DL, Chain, NewPtr, LD->getPointerInfo().getWithOffset(PtrOffset), | ||||
18901 | NewAlign, LD->getMemOperand()->getFlags(), LD->getAAInfo()); | ||||
18902 | LoadOps.push_back(NewLoad); | ||||
18903 | LoadOpsChain.push_back(SDValue(cast<SDNode>(NewLoad), 1)); | ||||
18904 | } | ||||
18905 | |||||
18906 | // Process remaining bits of the load operation. | ||||
18907 | // This is done by creating an UNDEF vector to match the size of the | ||||
18908 | // 256-bit loads and inserting the remaining load to it. We extract the | ||||
18909 | // original load type at the end using EXTRACT_SUBVECTOR instruction. | ||||
18910 | unsigned BitsRemaining = MemVT.getSizeInBits() % 256; | ||||
18911 | unsigned PtrOffset = (MemVT.getSizeInBits() - BitsRemaining) / 8; | ||||
18912 | MVT RemainingVT = MVT::getVectorVT( | ||||
18913 | MemVT.getVectorElementType().getSimpleVT(), | ||||
18914 | BitsRemaining / MemVT.getVectorElementType().getSizeInBits()); | ||||
18915 | SDValue NewPtr = | ||||
18916 | DAG.getMemBasePlusOffset(BasePtr, TypeSize::Fixed(PtrOffset), DL, Flags); | ||||
18917 | Align NewAlign = commonAlignment(LD->getAlign(), PtrOffset); | ||||
18918 | SDValue RemainingLoad = | ||||
18919 | DAG.getLoad(RemainingVT, DL, Chain, NewPtr, | ||||
18920 | LD->getPointerInfo().getWithOffset(PtrOffset), NewAlign, | ||||
18921 | LD->getMemOperand()->getFlags(), LD->getAAInfo()); | ||||
18922 | SDValue UndefVector = DAG.getUNDEF(NewVT); | ||||
18923 | SDValue InsertIdx = DAG.getVectorIdxConstant(0, DL); | ||||
18924 | SDValue ExtendedReminingLoad = | ||||
18925 | DAG.getNode(ISD::INSERT_SUBVECTOR, DL, NewVT, | ||||
18926 | {UndefVector, RemainingLoad, InsertIdx}); | ||||
18927 | LoadOps.push_back(ExtendedReminingLoad); | ||||
18928 | LoadOpsChain.push_back(SDValue(cast<SDNode>(RemainingLoad), 1)); | ||||
18929 | EVT ConcatVT = | ||||
18930 | EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), | ||||
18931 | LoadOps.size() * NewVT.getVectorNumElements()); | ||||
18932 | SDValue ConcatVectors = | ||||
18933 | DAG.getNode(ISD::CONCAT_VECTORS, DL, ConcatVT, LoadOps); | ||||
18934 | // Extract the original vector type size. | ||||
18935 | SDValue ExtractSubVector = | ||||
18936 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MemVT, | ||||
18937 | {ConcatVectors, DAG.getVectorIdxConstant(0, DL)}); | ||||
18938 | SDValue TokenFactor = | ||||
18939 | DAG.getNode(ISD::TokenFactor, DL, MVT::Other, LoadOpsChain); | ||||
18940 | return DAG.getMergeValues({ExtractSubVector, TokenFactor}, DL); | ||||
18941 | } | ||||
18942 | |||||
18943 | static SDValue performSTORECombine(SDNode *N, | ||||
18944 | TargetLowering::DAGCombinerInfo &DCI, | ||||
18945 | SelectionDAG &DAG, | ||||
18946 | const AArch64Subtarget *Subtarget) { | ||||
18947 | StoreSDNode *ST = cast<StoreSDNode>(N); | ||||
18948 | SDValue Chain = ST->getChain(); | ||||
18949 | SDValue Value = ST->getValue(); | ||||
18950 | SDValue Ptr = ST->getBasePtr(); | ||||
18951 | |||||
18952 | // If this is an FP_ROUND followed by a store, fold this into a truncating | ||||
18953 | // store. We can do this even if this is already a truncstore. | ||||
18954 | // We purposefully don't care about legality of the nodes here as we know | ||||
18955 | // they can be split down into something legal. | ||||
18956 | if (DCI.isBeforeLegalizeOps() && Value.getOpcode() == ISD::FP_ROUND && | ||||
18957 | Value.getNode()->hasOneUse() && ST->isUnindexed() && | ||||
18958 | Subtarget->useSVEForFixedLengthVectors() && | ||||
18959 | Value.getValueType().isFixedLengthVector() && | ||||
18960 | Value.getValueType().getFixedSizeInBits() >= | ||||
18961 | Subtarget->getMinSVEVectorSizeInBits()) | ||||
18962 | return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0), Ptr, | ||||
18963 | ST->getMemoryVT(), ST->getMemOperand()); | ||||
18964 | |||||
18965 | if (SDValue Split = splitStores(N, DCI, DAG, Subtarget)) | ||||
18966 | return Split; | ||||
18967 | |||||
18968 | if (Subtarget->supportsAddressTopByteIgnored() && | ||||
18969 | performTBISimplification(N->getOperand(2), DCI, DAG)) | ||||
18970 | return SDValue(N, 0); | ||||
18971 | |||||
18972 | if (SDValue Store = foldTruncStoreOfExt(DAG, N)) | ||||
18973 | return Store; | ||||
18974 | |||||
18975 | return SDValue(); | ||||
18976 | } | ||||
18977 | |||||
18978 | static SDValue performMSTORECombine(SDNode *N, | ||||
18979 | TargetLowering::DAGCombinerInfo &DCI, | ||||
18980 | SelectionDAG &DAG, | ||||
18981 | const AArch64Subtarget *Subtarget) { | ||||
18982 | MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N); | ||||
18983 | SDValue Value = MST->getValue(); | ||||
18984 | SDValue Mask = MST->getMask(); | ||||
18985 | SDLoc DL(N); | ||||
18986 | |||||
18987 | // If this is a UZP1 followed by a masked store, fold this into a masked | ||||
18988 | // truncating store. We can do this even if this is already a masked | ||||
18989 | // truncstore. | ||||
18990 | if (Value.getOpcode() == AArch64ISD::UZP1 && Value->hasOneUse() && | ||||
18991 | MST->isUnindexed() && Mask->getOpcode() == AArch64ISD::PTRUE && | ||||
18992 | Value.getValueType().isInteger()) { | ||||
18993 | Value = Value.getOperand(0); | ||||
18994 | if (Value.getOpcode() == ISD::BITCAST) { | ||||
18995 | EVT HalfVT = | ||||
18996 | Value.getValueType().getHalfNumVectorElementsVT(*DAG.getContext()); | ||||
18997 | EVT InVT = Value.getOperand(0).getValueType(); | ||||
18998 | |||||
18999 | if (HalfVT.widenIntegerVectorElementType(*DAG.getContext()) == InVT) { | ||||
19000 | unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits(); | ||||
19001 | unsigned PgPattern = Mask->getConstantOperandVal(0); | ||||
19002 | |||||
19003 | // Ensure we can double the size of the predicate pattern | ||||
19004 | unsigned NumElts = getNumElementsFromSVEPredPattern(PgPattern); | ||||
19005 | if (NumElts && NumElts * InVT.getVectorElementType().getSizeInBits() <= | ||||
19006 | MinSVESize) { | ||||
19007 | Mask = getPTrue(DAG, DL, InVT.changeVectorElementType(MVT::i1), | ||||
19008 | PgPattern); | ||||
19009 | return DAG.getMaskedStore(MST->getChain(), DL, Value.getOperand(0), | ||||
19010 | MST->getBasePtr(), MST->getOffset(), Mask, | ||||
19011 | MST->getMemoryVT(), MST->getMemOperand(), | ||||
19012 | MST->getAddressingMode(), | ||||
19013 | /*IsTruncating=*/true); | ||||
19014 | } | ||||
19015 | } | ||||
19016 | } | ||||
19017 | } | ||||
19018 | |||||
19019 | return SDValue(); | ||||
19020 | } | ||||
19021 | |||||
19022 | /// \return true if part of the index was folded into the Base. | ||||
19023 | static bool foldIndexIntoBase(SDValue &BasePtr, SDValue &Index, SDValue Scale, | ||||
19024 | SDLoc DL, SelectionDAG &DAG) { | ||||
19025 | // This function assumes a vector of i64 indices. | ||||
19026 | EVT IndexVT = Index.getValueType(); | ||||
19027 | if (!IndexVT.isVector() || IndexVT.getVectorElementType() != MVT::i64) | ||||
19028 | return false; | ||||
19029 | |||||
19030 | // Simplify: | ||||
19031 | // BasePtr = Ptr | ||||
19032 | // Index = X + splat(Offset) | ||||
19033 | // -> | ||||
19034 | // BasePtr = Ptr + Offset * scale. | ||||
19035 | // Index = X | ||||
19036 | if (Index.getOpcode() == ISD::ADD) { | ||||
19037 | if (auto Offset = DAG.getSplatValue(Index.getOperand(1))) { | ||||
19038 | Offset = DAG.getNode(ISD::MUL, DL, MVT::i64, Offset, Scale); | ||||
19039 | BasePtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, Offset); | ||||
19040 | Index = Index.getOperand(0); | ||||
19041 | return true; | ||||
19042 | } | ||||
19043 | } | ||||
19044 | |||||
19045 | // Simplify: | ||||
19046 | // BasePtr = Ptr | ||||
19047 | // Index = (X + splat(Offset)) << splat(Shift) | ||||
19048 | // -> | ||||
19049 | // BasePtr = Ptr + (Offset << Shift) * scale) | ||||
19050 | // Index = X << splat(shift) | ||||
19051 | if (Index.getOpcode() == ISD::SHL && | ||||
19052 | Index.getOperand(0).getOpcode() == ISD::ADD) { | ||||
19053 | SDValue Add = Index.getOperand(0); | ||||
19054 | SDValue ShiftOp = Index.getOperand(1); | ||||
19055 | SDValue OffsetOp = Add.getOperand(1); | ||||
19056 | if (auto Shift = DAG.getSplatValue(ShiftOp)) | ||||
19057 | if (auto Offset = DAG.getSplatValue(OffsetOp)) { | ||||
19058 | Offset = DAG.getNode(ISD::SHL, DL, MVT::i64, Offset, Shift); | ||||
19059 | Offset = DAG.getNode(ISD::MUL, DL, MVT::i64, Offset, Scale); | ||||
19060 | BasePtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, Offset); | ||||
19061 | Index = DAG.getNode(ISD::SHL, DL, Index.getValueType(), | ||||
19062 | Add.getOperand(0), ShiftOp); | ||||
19063 | return true; | ||||
19064 | } | ||||
19065 | } | ||||
19066 | |||||
19067 | return false; | ||||
19068 | } | ||||
19069 | |||||
19070 | // Analyse the specified address returning true if a more optimal addressing | ||||
19071 | // mode is available. When returning true all parameters are updated to reflect | ||||
19072 | // their recommended values. | ||||
19073 | static bool findMoreOptimalIndexType(const MaskedGatherScatterSDNode *N, | ||||
19074 | SDValue &BasePtr, SDValue &Index, | ||||
19075 | SelectionDAG &DAG) { | ||||
19076 | // Try to iteratively fold parts of the index into the base pointer to | ||||
19077 | // simplify the index as much as possible. | ||||
19078 | bool Changed = false; | ||||
19079 | while (foldIndexIntoBase(BasePtr, Index, N->getScale(), SDLoc(N), DAG)) | ||||
19080 | Changed = true; | ||||
19081 | |||||
19082 | // Only consider element types that are pointer sized as smaller types can | ||||
19083 | // be easily promoted. | ||||
19084 | EVT IndexVT = Index.getValueType(); | ||||
19085 | if (IndexVT.getVectorElementType() != MVT::i64 || IndexVT == MVT::nxv2i64) | ||||
19086 | return Changed; | ||||
19087 | |||||
19088 | // Can indices be trivially shrunk? | ||||
19089 | EVT DataVT = N->getOperand(1).getValueType(); | ||||
19090 | // Don't attempt to shrink the index for fixed vectors of 64 bit data since it | ||||
19091 | // will later be re-extended to 64 bits in legalization | ||||
19092 | if (DataVT.isFixedLengthVector() && DataVT.getScalarSizeInBits() == 64) | ||||
19093 | return Changed; | ||||
19094 | if (ISD::isVectorShrinkable(Index.getNode(), 32, N->isIndexSigned())) { | ||||
19095 | EVT NewIndexVT = IndexVT.changeVectorElementType(MVT::i32); | ||||
19096 | Index = DAG.getNode(ISD::TRUNCATE, SDLoc(N), NewIndexVT, Index); | ||||
19097 | return true; | ||||
19098 | } | ||||
19099 | |||||
19100 | // Match: | ||||
19101 | // Index = step(const) | ||||
19102 | int64_t Stride = 0; | ||||
19103 | if (Index.getOpcode() == ISD::STEP_VECTOR) { | ||||
19104 | Stride = cast<ConstantSDNode>(Index.getOperand(0))->getSExtValue(); | ||||
19105 | } | ||||
19106 | // Match: | ||||
19107 | // Index = step(const) << shift(const) | ||||
19108 | else if (Index.getOpcode() == ISD::SHL && | ||||
19109 | Index.getOperand(0).getOpcode() == ISD::STEP_VECTOR) { | ||||
19110 | SDValue RHS = Index.getOperand(1); | ||||
19111 | if (auto *Shift = | ||||
19112 | dyn_cast_or_null<ConstantSDNode>(DAG.getSplatValue(RHS))) { | ||||
19113 | int64_t Step = (int64_t)Index.getOperand(0).getConstantOperandVal(1); | ||||
19114 | Stride = Step << Shift->getZExtValue(); | ||||
19115 | } | ||||
19116 | } | ||||
19117 | |||||
19118 | // Return early because no supported pattern is found. | ||||
19119 | if (Stride == 0) | ||||
19120 | return Changed; | ||||
19121 | |||||
19122 | if (Stride < std::numeric_limits<int32_t>::min() || | ||||
19123 | Stride > std::numeric_limits<int32_t>::max()) | ||||
19124 | return Changed; | ||||
19125 | |||||
19126 | const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>(); | ||||
19127 | unsigned MaxVScale = | ||||
19128 | Subtarget.getMaxSVEVectorSizeInBits() / AArch64::SVEBitsPerBlock; | ||||
19129 | int64_t LastElementOffset = | ||||
19130 | IndexVT.getVectorMinNumElements() * Stride * MaxVScale; | ||||
19131 | |||||
19132 | if (LastElementOffset < std::numeric_limits<int32_t>::min() || | ||||
19133 | LastElementOffset > std::numeric_limits<int32_t>::max()) | ||||
19134 | return Changed; | ||||
19135 | |||||
19136 | EVT NewIndexVT = IndexVT.changeVectorElementType(MVT::i32); | ||||
19137 | // Stride does not scale explicitly by 'Scale', because it happens in | ||||
19138 | // the gather/scatter addressing mode. | ||||
19139 | Index = DAG.getStepVector(SDLoc(N), NewIndexVT, APInt(32, Stride)); | ||||
19140 | return true; | ||||
19141 | } | ||||
19142 | |||||
19143 | static SDValue performMaskedGatherScatterCombine( | ||||
19144 | SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { | ||||
19145 | MaskedGatherScatterSDNode *MGS = cast<MaskedGatherScatterSDNode>(N); | ||||
19146 | assert(MGS && "Can only combine gather load or scatter store nodes")(static_cast <bool> (MGS && "Can only combine gather load or scatter store nodes" ) ? void (0) : __assert_fail ("MGS && \"Can only combine gather load or scatter store nodes\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 19146, __extension__ __PRETTY_FUNCTION__)); | ||||
19147 | |||||
19148 | if (!DCI.isBeforeLegalize()) | ||||
19149 | return SDValue(); | ||||
19150 | |||||
19151 | SDLoc DL(MGS); | ||||
19152 | SDValue Chain = MGS->getChain(); | ||||
19153 | SDValue Scale = MGS->getScale(); | ||||
19154 | SDValue Index = MGS->getIndex(); | ||||
19155 | SDValue Mask = MGS->getMask(); | ||||
19156 | SDValue BasePtr = MGS->getBasePtr(); | ||||
19157 | ISD::MemIndexType IndexType = MGS->getIndexType(); | ||||
19158 | |||||
19159 | if (!findMoreOptimalIndexType(MGS, BasePtr, Index, DAG)) | ||||
19160 | return SDValue(); | ||||
19161 | |||||
19162 | // Here we catch such cases early and change MGATHER's IndexType to allow | ||||
19163 | // the use of an Index that's more legalisation friendly. | ||||
19164 | if (auto *MGT = dyn_cast<MaskedGatherSDNode>(MGS)) { | ||||
19165 | SDValue PassThru = MGT->getPassThru(); | ||||
19166 | SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale}; | ||||
19167 | return DAG.getMaskedGather( | ||||
19168 | DAG.getVTList(N->getValueType(0), MVT::Other), MGT->getMemoryVT(), DL, | ||||
19169 | Ops, MGT->getMemOperand(), IndexType, MGT->getExtensionType()); | ||||
19170 | } | ||||
19171 | auto *MSC = cast<MaskedScatterSDNode>(MGS); | ||||
19172 | SDValue Data = MSC->getValue(); | ||||
19173 | SDValue Ops[] = {Chain, Data, Mask, BasePtr, Index, Scale}; | ||||
19174 | return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), MSC->getMemoryVT(), DL, | ||||
19175 | Ops, MSC->getMemOperand(), IndexType, | ||||
19176 | MSC->isTruncatingStore()); | ||||
19177 | } | ||||
19178 | |||||
19179 | /// Target-specific DAG combine function for NEON load/store intrinsics | ||||
19180 | /// to merge base address updates. | ||||
19181 | static SDValue performNEONPostLDSTCombine(SDNode *N, | ||||
19182 | TargetLowering::DAGCombinerInfo &DCI, | ||||
19183 | SelectionDAG &DAG) { | ||||
19184 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) | ||||
19185 | return SDValue(); | ||||
19186 | |||||
19187 | unsigned AddrOpIdx = N->getNumOperands() - 1; | ||||
19188 | SDValue Addr = N->getOperand(AddrOpIdx); | ||||
19189 | |||||
19190 | // Search for a use of the address operand that is an increment. | ||||
19191 | for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), | ||||
19192 | UE = Addr.getNode()->use_end(); UI != UE; ++UI) { | ||||
19193 | SDNode *User = *UI; | ||||
19194 | if (User->getOpcode() != ISD::ADD || | ||||
19195 | UI.getUse().getResNo() != Addr.getResNo()) | ||||
19196 | continue; | ||||
19197 | |||||
19198 | // Check that the add is independent of the load/store. Otherwise, folding | ||||
19199 | // it would create a cycle. | ||||
19200 | SmallPtrSet<const SDNode *, 32> Visited; | ||||
19201 | SmallVector<const SDNode *, 16> Worklist; | ||||
19202 | Visited.insert(Addr.getNode()); | ||||
19203 | Worklist.push_back(N); | ||||
19204 | Worklist.push_back(User); | ||||
19205 | if (SDNode::hasPredecessorHelper(N, Visited, Worklist) || | ||||
19206 | SDNode::hasPredecessorHelper(User, Visited, Worklist)) | ||||
19207 | continue; | ||||
19208 | |||||
19209 | // Find the new opcode for the updating load/store. | ||||
19210 | bool IsStore = false; | ||||
19211 | bool IsLaneOp = false; | ||||
19212 | bool IsDupOp = false; | ||||
19213 | unsigned NewOpc = 0; | ||||
19214 | unsigned NumVecs = 0; | ||||
19215 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); | ||||
19216 | switch (IntNo) { | ||||
19217 | default: llvm_unreachable("unexpected intrinsic for Neon base update")::llvm::llvm_unreachable_internal("unexpected intrinsic for Neon base update" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 19217); | ||||
19218 | case Intrinsic::aarch64_neon_ld2: NewOpc = AArch64ISD::LD2post; | ||||
19219 | NumVecs = 2; break; | ||||
19220 | case Intrinsic::aarch64_neon_ld3: NewOpc = AArch64ISD::LD3post; | ||||
19221 | NumVecs = 3; break; | ||||
19222 | case Intrinsic::aarch64_neon_ld4: NewOpc = AArch64ISD::LD4post; | ||||
19223 | NumVecs = 4; break; | ||||
19224 | case Intrinsic::aarch64_neon_st2: NewOpc = AArch64ISD::ST2post; | ||||
19225 | NumVecs = 2; IsStore = true; break; | ||||
19226 | case Intrinsic::aarch64_neon_st3: NewOpc = AArch64ISD::ST3post; | ||||
19227 | NumVecs = 3; IsStore = true; break; | ||||
19228 | case Intrinsic::aarch64_neon_st4: NewOpc = AArch64ISD::ST4post; | ||||
19229 | NumVecs = 4; IsStore = true; break; | ||||
19230 | case Intrinsic::aarch64_neon_ld1x2: NewOpc = AArch64ISD::LD1x2post; | ||||
19231 | NumVecs = 2; break; | ||||
19232 | case Intrinsic::aarch64_neon_ld1x3: NewOpc = AArch64ISD::LD1x3post; | ||||
19233 | NumVecs = 3; break; | ||||
19234 | case Intrinsic::aarch64_neon_ld1x4: NewOpc = AArch64ISD::LD1x4post; | ||||
19235 | NumVecs = 4; break; | ||||
19236 | case Intrinsic::aarch64_neon_st1x2: NewOpc = AArch64ISD::ST1x2post; | ||||
19237 | NumVecs = 2; IsStore = true; break; | ||||
19238 | case Intrinsic::aarch64_neon_st1x3: NewOpc = AArch64ISD::ST1x3post; | ||||
19239 | NumVecs = 3; IsStore = true; break; | ||||
19240 | case Intrinsic::aarch64_neon_st1x4: NewOpc = AArch64ISD::ST1x4post; | ||||
19241 | NumVecs = 4; IsStore = true; break; | ||||
19242 | case Intrinsic::aarch64_neon_ld2r: NewOpc = AArch64ISD::LD2DUPpost; | ||||
19243 | NumVecs = 2; IsDupOp = true; break; | ||||
19244 | case Intrinsic::aarch64_neon_ld3r: NewOpc = AArch64ISD::LD3DUPpost; | ||||
19245 | NumVecs = 3; IsDupOp = true; break; | ||||
19246 | case Intrinsic::aarch64_neon_ld4r: NewOpc = AArch64ISD::LD4DUPpost; | ||||
19247 | NumVecs = 4; IsDupOp = true; break; | ||||
19248 | case Intrinsic::aarch64_neon_ld2lane: NewOpc = AArch64ISD::LD2LANEpost; | ||||
19249 | NumVecs = 2; IsLaneOp = true; break; | ||||
19250 | case Intrinsic::aarch64_neon_ld3lane: NewOpc = AArch64ISD::LD3LANEpost; | ||||
19251 | NumVecs = 3; IsLaneOp = true; break; | ||||
19252 | case Intrinsic::aarch64_neon_ld4lane: NewOpc = AArch64ISD::LD4LANEpost; | ||||
19253 | NumVecs = 4; IsLaneOp = true; break; | ||||
19254 | case Intrinsic::aarch64_neon_st2lane: NewOpc = AArch64ISD::ST2LANEpost; | ||||
19255 | NumVecs = 2; IsStore = true; IsLaneOp = true; break; | ||||
19256 | case Intrinsic::aarch64_neon_st3lane: NewOpc = AArch64ISD::ST3LANEpost; | ||||
19257 | NumVecs = 3; IsStore = true; IsLaneOp = true; break; | ||||
19258 | case Intrinsic::aarch64_neon_st4lane: NewOpc = AArch64ISD::ST4LANEpost; | ||||
19259 | NumVecs = 4; IsStore = true; IsLaneOp = true; break; | ||||
19260 | } | ||||
19261 | |||||
19262 | EVT VecTy; | ||||
19263 | if (IsStore) | ||||
19264 | VecTy = N->getOperand(2).getValueType(); | ||||
19265 | else | ||||
19266 | VecTy = N->getValueType(0); | ||||
19267 | |||||
19268 | // If the increment is a constant, it must match the memory ref size. | ||||
19269 | SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); | ||||
19270 | if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { | ||||
19271 | uint32_t IncVal = CInc->getZExtValue(); | ||||
19272 | unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; | ||||
19273 | if (IsLaneOp || IsDupOp) | ||||
19274 | NumBytes /= VecTy.getVectorNumElements(); | ||||
19275 | if (IncVal != NumBytes) | ||||
19276 | continue; | ||||
19277 | Inc = DAG.getRegister(AArch64::XZR, MVT::i64); | ||||
19278 | } | ||||
19279 | SmallVector<SDValue, 8> Ops; | ||||
19280 | Ops.push_back(N->getOperand(0)); // Incoming chain | ||||
19281 | // Load lane and store have vector list as input. | ||||
19282 | if (IsLaneOp || IsStore) | ||||
19283 | for (unsigned i = 2; i < AddrOpIdx; ++i) | ||||
19284 | Ops.push_back(N->getOperand(i)); | ||||
19285 | Ops.push_back(Addr); // Base register | ||||
19286 | Ops.push_back(Inc); | ||||
19287 | |||||
19288 | // Return Types. | ||||
19289 | EVT Tys[6]; | ||||
19290 | unsigned NumResultVecs = (IsStore ? 0 : NumVecs); | ||||
19291 | unsigned n; | ||||
19292 | for (n = 0; n < NumResultVecs; ++n) | ||||
19293 | Tys[n] = VecTy; | ||||
19294 | Tys[n++] = MVT::i64; // Type of write back register | ||||
19295 | Tys[n] = MVT::Other; // Type of the chain | ||||
19296 | SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs + 2)); | ||||
19297 | |||||
19298 | MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); | ||||
19299 | SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, Ops, | ||||
19300 | MemInt->getMemoryVT(), | ||||
19301 | MemInt->getMemOperand()); | ||||
19302 | |||||
19303 | // Update the uses. | ||||
19304 | std::vector<SDValue> NewResults; | ||||
19305 | for (unsigned i = 0; i < NumResultVecs; ++i) { | ||||
19306 | NewResults.push_back(SDValue(UpdN.getNode(), i)); | ||||
19307 | } | ||||
19308 | NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); | ||||
19309 | DCI.CombineTo(N, NewResults); | ||||
19310 | DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); | ||||
19311 | |||||
19312 | break; | ||||
19313 | } | ||||
19314 | return SDValue(); | ||||
19315 | } | ||||
19316 | |||||
19317 | // Checks to see if the value is the prescribed width and returns information | ||||
19318 | // about its extension mode. | ||||
19319 | static | ||||
19320 | bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType) { | ||||
19321 | ExtType = ISD::NON_EXTLOAD; | ||||
19322 | switch(V.getNode()->getOpcode()) { | ||||
19323 | default: | ||||
19324 | return false; | ||||
19325 | case ISD::LOAD: { | ||||
19326 | LoadSDNode *LoadNode = cast<LoadSDNode>(V.getNode()); | ||||
19327 | if ((LoadNode->getMemoryVT() == MVT::i8 && width == 8) | ||||
19328 | || (LoadNode->getMemoryVT() == MVT::i16 && width == 16)) { | ||||
19329 | ExtType = LoadNode->getExtensionType(); | ||||
19330 | return true; | ||||
19331 | } | ||||
19332 | return false; | ||||
19333 | } | ||||
19334 | case ISD::AssertSext: { | ||||
19335 | VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1)); | ||||
19336 | if ((TypeNode->getVT() == MVT::i8 && width == 8) | ||||
19337 | || (TypeNode->getVT() == MVT::i16 && width == 16)) { | ||||
19338 | ExtType = ISD::SEXTLOAD; | ||||
19339 | return true; | ||||
19340 | } | ||||
19341 | return false; | ||||
19342 | } | ||||
19343 | case ISD::AssertZext: { | ||||
19344 | VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1)); | ||||
19345 | if ((TypeNode->getVT() == MVT::i8 && width == 8) | ||||
19346 | || (TypeNode->getVT() == MVT::i16 && width == 16)) { | ||||
19347 | ExtType = ISD::ZEXTLOAD; | ||||
19348 | return true; | ||||
19349 | } | ||||
19350 | return false; | ||||
19351 | } | ||||
19352 | case ISD::Constant: | ||||
19353 | case ISD::TargetConstant: { | ||||
19354 | return std::abs(cast<ConstantSDNode>(V.getNode())->getSExtValue()) < | ||||
19355 | 1LL << (width - 1); | ||||
19356 | } | ||||
19357 | } | ||||
19358 | |||||
19359 | return true; | ||||
19360 | } | ||||
19361 | |||||
19362 | // This function does a whole lot of voodoo to determine if the tests are | ||||
19363 | // equivalent without and with a mask. Essentially what happens is that given a | ||||
19364 | // DAG resembling: | ||||
19365 | // | ||||
19366 | // +-------------+ +-------------+ +-------------+ +-------------+ | ||||
19367 | // | Input | | AddConstant | | CompConstant| | CC | | ||||
19368 | // +-------------+ +-------------+ +-------------+ +-------------+ | ||||
19369 | // | | | | | ||||
19370 | // V V | +----------+ | ||||
19371 | // +-------------+ +----+ | | | ||||
19372 | // | ADD | |0xff| | | | ||||
19373 | // +-------------+ +----+ | | | ||||
19374 | // | | | | | ||||
19375 | // V V | | | ||||
19376 | // +-------------+ | | | ||||
19377 | // | AND | | | | ||||
19378 | // +-------------+ | | | ||||
19379 | // | | | | ||||
19380 | // +-----+ | | | ||||
19381 | // | | | | ||||
19382 | // V V V | ||||
19383 | // +-------------+ | ||||
19384 | // | CMP | | ||||
19385 | // +-------------+ | ||||
19386 | // | ||||
19387 | // The AND node may be safely removed for some combinations of inputs. In | ||||
19388 | // particular we need to take into account the extension type of the Input, | ||||
19389 | // the exact values of AddConstant, CompConstant, and CC, along with the nominal | ||||
19390 | // width of the input (this can work for any width inputs, the above graph is | ||||
19391 | // specific to 8 bits. | ||||
19392 | // | ||||
19393 | // The specific equations were worked out by generating output tables for each | ||||
19394 | // AArch64CC value in terms of and AddConstant (w1), CompConstant(w2). The | ||||
19395 | // problem was simplified by working with 4 bit inputs, which means we only | ||||
19396 | // needed to reason about 24 distinct bit patterns: 8 patterns unique to zero | ||||
19397 | // extension (8,15), 8 patterns unique to sign extensions (-8,-1), and 8 | ||||
19398 | // patterns present in both extensions (0,7). For every distinct set of | ||||
19399 | // AddConstant and CompConstants bit patterns we can consider the masked and | ||||
19400 | // unmasked versions to be equivalent if the result of this function is true for | ||||
19401 | // all 16 distinct bit patterns of for the current extension type of Input (w0). | ||||
19402 | // | ||||
19403 | // sub w8, w0, w1 | ||||
19404 | // and w10, w8, #0x0f | ||||
19405 | // cmp w8, w2 | ||||
19406 | // cset w9, AArch64CC | ||||
19407 | // cmp w10, w2 | ||||
19408 | // cset w11, AArch64CC | ||||
19409 | // cmp w9, w11 | ||||
19410 | // cset w0, eq | ||||
19411 | // ret | ||||
19412 | // | ||||
19413 | // Since the above function shows when the outputs are equivalent it defines | ||||
19414 | // when it is safe to remove the AND. Unfortunately it only runs on AArch64 and | ||||
19415 | // would be expensive to run during compiles. The equations below were written | ||||
19416 | // in a test harness that confirmed they gave equivalent outputs to the above | ||||
19417 | // for all inputs function, so they can be used determine if the removal is | ||||
19418 | // legal instead. | ||||
19419 | // | ||||
19420 | // isEquivalentMaskless() is the code for testing if the AND can be removed | ||||
19421 | // factored out of the DAG recognition as the DAG can take several forms. | ||||
19422 | |||||
19423 | static bool isEquivalentMaskless(unsigned CC, unsigned width, | ||||
19424 | ISD::LoadExtType ExtType, int AddConstant, | ||||
19425 | int CompConstant) { | ||||
19426 | // By being careful about our equations and only writing the in term | ||||
19427 | // symbolic values and well known constants (0, 1, -1, MaxUInt) we can | ||||
19428 | // make them generally applicable to all bit widths. | ||||
19429 | int MaxUInt = (1 << width); | ||||
19430 | |||||
19431 | // For the purposes of these comparisons sign extending the type is | ||||
19432 | // equivalent to zero extending the add and displacing it by half the integer | ||||
19433 | // width. Provided we are careful and make sure our equations are valid over | ||||
19434 | // the whole range we can just adjust the input and avoid writing equations | ||||
19435 | // for sign extended inputs. | ||||
19436 | if (ExtType == ISD::SEXTLOAD) | ||||
19437 | AddConstant -= (1 << (width-1)); | ||||
19438 | |||||
19439 | switch(CC) { | ||||
19440 | case AArch64CC::LE: | ||||
19441 | case AArch64CC::GT: | ||||
19442 | if ((AddConstant == 0) || | ||||
19443 | (CompConstant == MaxUInt - 1 && AddConstant < 0) || | ||||
19444 | (AddConstant >= 0 && CompConstant < 0) || | ||||
19445 | (AddConstant <= 0 && CompConstant <= 0 && CompConstant < AddConstant)) | ||||
19446 | return true; | ||||
19447 | break; | ||||
19448 | case AArch64CC::LT: | ||||
19449 | case AArch64CC::GE: | ||||
19450 | if ((AddConstant == 0) || | ||||
19451 | (AddConstant >= 0 && CompConstant <= 0) || | ||||
19452 | (AddConstant <= 0 && CompConstant <= 0 && CompConstant <= AddConstant)) | ||||
19453 | return true; | ||||
19454 | break; | ||||
19455 | case AArch64CC::HI: | ||||
19456 | case AArch64CC::LS: | ||||
19457 | if ((AddConstant >= 0 && CompConstant < 0) || | ||||
19458 | (AddConstant <= 0 && CompConstant >= -1 && | ||||
19459 | CompConstant < AddConstant + MaxUInt)) | ||||
19460 | return true; | ||||
19461 | break; | ||||
19462 | case AArch64CC::PL: | ||||
19463 | case AArch64CC::MI: | ||||
19464 | if ((AddConstant == 0) || | ||||
19465 | (AddConstant > 0 && CompConstant <= 0) || | ||||
19466 | (AddConstant < 0 && CompConstant <= AddConstant)) | ||||
19467 | return true; | ||||
19468 | break; | ||||
19469 | case AArch64CC::LO: | ||||
19470 | case AArch64CC::HS: | ||||
19471 | if ((AddConstant >= 0 && CompConstant <= 0) || | ||||
19472 | (AddConstant <= 0 && CompConstant >= 0 && | ||||
19473 | CompConstant <= AddConstant + MaxUInt)) | ||||
19474 | return true; | ||||
19475 | break; | ||||
19476 | case AArch64CC::EQ: | ||||
19477 | case AArch64CC::NE: | ||||
19478 | if ((AddConstant > 0 && CompConstant < 0) || | ||||
19479 | (AddConstant < 0 && CompConstant >= 0 && | ||||
19480 | CompConstant < AddConstant + MaxUInt) || | ||||
19481 | (AddConstant >= 0 && CompConstant >= 0 && | ||||
19482 | CompConstant >= AddConstant) || | ||||
19483 | (AddConstant <= 0 && CompConstant < 0 && CompConstant < AddConstant)) | ||||
19484 | return true; | ||||
19485 | break; | ||||
19486 | case AArch64CC::VS: | ||||
19487 | case AArch64CC::VC: | ||||
19488 | case AArch64CC::AL: | ||||
19489 | case AArch64CC::NV: | ||||
19490 | return true; | ||||
19491 | case AArch64CC::Invalid: | ||||
19492 | break; | ||||
19493 | } | ||||
19494 | |||||
19495 | return false; | ||||
19496 | } | ||||
19497 | |||||
19498 | static | ||||
19499 | SDValue performCONDCombine(SDNode *N, | ||||
19500 | TargetLowering::DAGCombinerInfo &DCI, | ||||
19501 | SelectionDAG &DAG, unsigned CCIndex, | ||||
19502 | unsigned CmpIndex) { | ||||
19503 | unsigned CC = cast<ConstantSDNode>(N->getOperand(CCIndex))->getSExtValue(); | ||||
19504 | SDNode *SubsNode = N->getOperand(CmpIndex).getNode(); | ||||
19505 | unsigned CondOpcode = SubsNode->getOpcode(); | ||||
19506 | |||||
19507 | if (CondOpcode != AArch64ISD::SUBS || SubsNode->hasAnyUseOfValue(0)) | ||||
19508 | return SDValue(); | ||||
19509 | |||||
19510 | // There is a SUBS feeding this condition. Is it fed by a mask we can | ||||
19511 | // use? | ||||
19512 | |||||
19513 | SDNode *AndNode = SubsNode->getOperand(0).getNode(); | ||||
19514 | unsigned MaskBits = 0; | ||||
19515 | |||||
19516 | if (AndNode->getOpcode() != ISD::AND) | ||||
19517 | return SDValue(); | ||||
19518 | |||||
19519 | if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(AndNode->getOperand(1))) { | ||||
19520 | uint32_t CNV = CN->getZExtValue(); | ||||
19521 | if (CNV == 255) | ||||
19522 | MaskBits = 8; | ||||
19523 | else if (CNV == 65535) | ||||
19524 | MaskBits = 16; | ||||
19525 | } | ||||
19526 | |||||
19527 | if (!MaskBits) | ||||
19528 | return SDValue(); | ||||
19529 | |||||
19530 | SDValue AddValue = AndNode->getOperand(0); | ||||
19531 | |||||
19532 | if (AddValue.getOpcode() != ISD::ADD) | ||||
19533 | return SDValue(); | ||||
19534 | |||||
19535 | // The basic dag structure is correct, grab the inputs and validate them. | ||||
19536 | |||||
19537 | SDValue AddInputValue1 = AddValue.getNode()->getOperand(0); | ||||
19538 | SDValue AddInputValue2 = AddValue.getNode()->getOperand(1); | ||||
19539 | SDValue SubsInputValue = SubsNode->getOperand(1); | ||||
19540 | |||||
19541 | // The mask is present and the provenance of all the values is a smaller type, | ||||
19542 | // lets see if the mask is superfluous. | ||||
19543 | |||||
19544 | if (!isa<ConstantSDNode>(AddInputValue2.getNode()) || | ||||
19545 | !isa<ConstantSDNode>(SubsInputValue.getNode())) | ||||
19546 | return SDValue(); | ||||
19547 | |||||
19548 | ISD::LoadExtType ExtType; | ||||
19549 | |||||
19550 | if (!checkValueWidth(SubsInputValue, MaskBits, ExtType) || | ||||
19551 | !checkValueWidth(AddInputValue2, MaskBits, ExtType) || | ||||
19552 | !checkValueWidth(AddInputValue1, MaskBits, ExtType) ) | ||||
19553 | return SDValue(); | ||||
19554 | |||||
19555 | if(!isEquivalentMaskless(CC, MaskBits, ExtType, | ||||
19556 | cast<ConstantSDNode>(AddInputValue2.getNode())->getSExtValue(), | ||||
19557 | cast<ConstantSDNode>(SubsInputValue.getNode())->getSExtValue())) | ||||
19558 | return SDValue(); | ||||
19559 | |||||
19560 | // The AND is not necessary, remove it. | ||||
19561 | |||||
19562 | SDVTList VTs = DAG.getVTList(SubsNode->getValueType(0), | ||||
19563 | SubsNode->getValueType(1)); | ||||
19564 | SDValue Ops[] = { AddValue, SubsNode->getOperand(1) }; | ||||
19565 | |||||
19566 | SDValue NewValue = DAG.getNode(CondOpcode, SDLoc(SubsNode), VTs, Ops); | ||||
19567 | DAG.ReplaceAllUsesWith(SubsNode, NewValue.getNode()); | ||||
19568 | |||||
19569 | return SDValue(N, 0); | ||||
19570 | } | ||||
19571 | |||||
19572 | // Optimize compare with zero and branch. | ||||
19573 | static SDValue performBRCONDCombine(SDNode *N, | ||||
19574 | TargetLowering::DAGCombinerInfo &DCI, | ||||
19575 | SelectionDAG &DAG) { | ||||
19576 | MachineFunction &MF = DAG.getMachineFunction(); | ||||
19577 | // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions | ||||
19578 | // will not be produced, as they are conditional branch instructions that do | ||||
19579 | // not set flags. | ||||
19580 | if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening)) | ||||
19581 | return SDValue(); | ||||
19582 | |||||
19583 | if (SDValue NV = performCONDCombine(N, DCI, DAG, 2, 3)) | ||||
19584 | N = NV.getNode(); | ||||
19585 | SDValue Chain = N->getOperand(0); | ||||
19586 | SDValue Dest = N->getOperand(1); | ||||
19587 | SDValue CCVal = N->getOperand(2); | ||||
19588 | SDValue Cmp = N->getOperand(3); | ||||
19589 | |||||
19590 | assert(isa<ConstantSDNode>(CCVal) && "Expected a ConstantSDNode here!")(static_cast <bool> (isa<ConstantSDNode>(CCVal) && "Expected a ConstantSDNode here!") ? void (0) : __assert_fail ("isa<ConstantSDNode>(CCVal) && \"Expected a ConstantSDNode here!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 19590, __extension__ __PRETTY_FUNCTION__)); | ||||
19591 | unsigned CC = cast<ConstantSDNode>(CCVal)->getZExtValue(); | ||||
19592 | if (CC != AArch64CC::EQ && CC != AArch64CC::NE) | ||||
19593 | return SDValue(); | ||||
19594 | |||||
19595 | unsigned CmpOpc = Cmp.getOpcode(); | ||||
19596 | if (CmpOpc != AArch64ISD::ADDS && CmpOpc != AArch64ISD::SUBS) | ||||
19597 | return SDValue(); | ||||
19598 | |||||
19599 | // Only attempt folding if there is only one use of the flag and no use of the | ||||
19600 | // value. | ||||
19601 | if (!Cmp->hasNUsesOfValue(0, 0) || !Cmp->hasNUsesOfValue(1, 1)) | ||||
19602 | return SDValue(); | ||||
19603 | |||||
19604 | SDValue LHS = Cmp.getOperand(0); | ||||
19605 | SDValue RHS = Cmp.getOperand(1); | ||||
19606 | |||||
19607 | assert(LHS.getValueType() == RHS.getValueType() &&(static_cast <bool> (LHS.getValueType() == RHS.getValueType () && "Expected the value type to be the same for both operands!" ) ? void (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType() && \"Expected the value type to be the same for both operands!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 19608, __extension__ __PRETTY_FUNCTION__)) | ||||
19608 | "Expected the value type to be the same for both operands!")(static_cast <bool> (LHS.getValueType() == RHS.getValueType () && "Expected the value type to be the same for both operands!" ) ? void (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType() && \"Expected the value type to be the same for both operands!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 19608, __extension__ __PRETTY_FUNCTION__)); | ||||
19609 | if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64) | ||||
19610 | return SDValue(); | ||||
19611 | |||||
19612 | if (isNullConstant(LHS)) | ||||
19613 | std::swap(LHS, RHS); | ||||
19614 | |||||
19615 | if (!isNullConstant(RHS)) | ||||
19616 | return SDValue(); | ||||
19617 | |||||
19618 | if (LHS.getOpcode() == ISD::SHL || LHS.getOpcode() == ISD::SRA || | ||||
19619 | LHS.getOpcode() == ISD::SRL) | ||||
19620 | return SDValue(); | ||||
19621 | |||||
19622 | // Fold the compare into the branch instruction. | ||||
19623 | SDValue BR; | ||||
19624 | if (CC == AArch64CC::EQ) | ||||
19625 | BR = DAG.getNode(AArch64ISD::CBZ, SDLoc(N), MVT::Other, Chain, LHS, Dest); | ||||
19626 | else | ||||
19627 | BR = DAG.getNode(AArch64ISD::CBNZ, SDLoc(N), MVT::Other, Chain, LHS, Dest); | ||||
19628 | |||||
19629 | // Do not add new nodes to DAG combiner worklist. | ||||
19630 | DCI.CombineTo(N, BR, false); | ||||
19631 | |||||
19632 | return SDValue(); | ||||
19633 | } | ||||
19634 | |||||
19635 | static SDValue foldCSELofCTTZ(SDNode *N, SelectionDAG &DAG) { | ||||
19636 | unsigned CC = N->getConstantOperandVal(2); | ||||
19637 | SDValue SUBS = N->getOperand(3); | ||||
19638 | SDValue Zero, CTTZ; | ||||
19639 | |||||
19640 | if (CC == AArch64CC::EQ && SUBS.getOpcode() == AArch64ISD::SUBS) { | ||||
19641 | Zero = N->getOperand(0); | ||||
19642 | CTTZ = N->getOperand(1); | ||||
19643 | } else if (CC == AArch64CC::NE && SUBS.getOpcode() == AArch64ISD::SUBS) { | ||||
19644 | Zero = N->getOperand(1); | ||||
19645 | CTTZ = N->getOperand(0); | ||||
19646 | } else | ||||
19647 | return SDValue(); | ||||
19648 | |||||
19649 | if ((CTTZ.getOpcode() != ISD::CTTZ && CTTZ.getOpcode() != ISD::TRUNCATE) || | ||||
19650 | (CTTZ.getOpcode() == ISD::TRUNCATE && | ||||
19651 | CTTZ.getOperand(0).getOpcode() != ISD::CTTZ)) | ||||
19652 | return SDValue(); | ||||
19653 | |||||
19654 | assert((CTTZ.getValueType() == MVT::i32 || CTTZ.getValueType() == MVT::i64) &&(static_cast <bool> ((CTTZ.getValueType() == MVT::i32 || CTTZ.getValueType() == MVT::i64) && "Illegal type in CTTZ folding" ) ? void (0) : __assert_fail ("(CTTZ.getValueType() == MVT::i32 || CTTZ.getValueType() == MVT::i64) && \"Illegal type in CTTZ folding\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 19655, __extension__ __PRETTY_FUNCTION__)) | ||||
19655 | "Illegal type in CTTZ folding")(static_cast <bool> ((CTTZ.getValueType() == MVT::i32 || CTTZ.getValueType() == MVT::i64) && "Illegal type in CTTZ folding" ) ? void (0) : __assert_fail ("(CTTZ.getValueType() == MVT::i32 || CTTZ.getValueType() == MVT::i64) && \"Illegal type in CTTZ folding\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 19655, __extension__ __PRETTY_FUNCTION__)); | ||||
19656 | |||||
19657 | if (!isNullConstant(Zero) || !isNullConstant(SUBS.getOperand(1))) | ||||
19658 | return SDValue(); | ||||
19659 | |||||
19660 | SDValue X = CTTZ.getOpcode() == ISD::TRUNCATE | ||||
19661 | ? CTTZ.getOperand(0).getOperand(0) | ||||
19662 | : CTTZ.getOperand(0); | ||||
19663 | |||||
19664 | if (X != SUBS.getOperand(0)) | ||||
19665 | return SDValue(); | ||||
19666 | |||||
19667 | unsigned BitWidth = CTTZ.getOpcode() == ISD::TRUNCATE | ||||
19668 | ? CTTZ.getOperand(0).getValueSizeInBits() | ||||
19669 | : CTTZ.getValueSizeInBits(); | ||||
19670 | SDValue BitWidthMinusOne = | ||||
19671 | DAG.getConstant(BitWidth - 1, SDLoc(N), CTTZ.getValueType()); | ||||
19672 | return DAG.getNode(ISD::AND, SDLoc(N), CTTZ.getValueType(), CTTZ, | ||||
19673 | BitWidthMinusOne); | ||||
19674 | } | ||||
19675 | |||||
19676 | // (CSEL l r EQ (CMP (CSEL x y cc2 cond) x)) => (CSEL l r cc2 cond) | ||||
19677 | // (CSEL l r EQ (CMP (CSEL x y cc2 cond) y)) => (CSEL l r !cc2 cond) | ||||
19678 | // Where x and y are constants | ||||
19679 | |||||
19680 | // (CSEL l r NE (CMP (CSEL x y cc2 cond) x)) => (CSEL l r !cc2 cond) | ||||
19681 | // (CSEL l r NE (CMP (CSEL x y cc2 cond) y)) => (CSEL l r cc2 cond) | ||||
19682 | // Where x and y are constants | ||||
19683 | static SDValue foldCSELOfCSEL(SDNode *Op, SelectionDAG &DAG) { | ||||
19684 | SDValue L = Op->getOperand(0); | ||||
19685 | SDValue R = Op->getOperand(1); | ||||
19686 | AArch64CC::CondCode OpCC = | ||||
19687 | static_cast<AArch64CC::CondCode>(Op->getConstantOperandVal(2)); | ||||
19688 | |||||
19689 | SDValue OpCmp = Op->getOperand(3); | ||||
19690 | if (!isCMP(OpCmp)) | ||||
19691 | return SDValue(); | ||||
19692 | |||||
19693 | SDValue CmpLHS = OpCmp.getOperand(0); | ||||
19694 | SDValue CmpRHS = OpCmp.getOperand(1); | ||||
19695 | |||||
19696 | if (CmpRHS.getOpcode() == AArch64ISD::CSEL) | ||||
19697 | std::swap(CmpLHS, CmpRHS); | ||||
19698 | else if (CmpLHS.getOpcode() != AArch64ISD::CSEL) | ||||
19699 | return SDValue(); | ||||
19700 | |||||
19701 | SDValue X = CmpLHS->getOperand(0); | ||||
19702 | SDValue Y = CmpLHS->getOperand(1); | ||||
19703 | if (!isa<ConstantSDNode>(X) || !isa<ConstantSDNode>(Y)) { | ||||
19704 | return SDValue(); | ||||
19705 | } | ||||
19706 | |||||
19707 | AArch64CC::CondCode CC = | ||||
19708 | static_cast<AArch64CC::CondCode>(CmpLHS->getConstantOperandVal(2)); | ||||
19709 | SDValue Cond = CmpLHS->getOperand(3); | ||||
19710 | |||||
19711 | if (CmpRHS == Y) | ||||
19712 | CC = AArch64CC::getInvertedCondCode(CC); | ||||
19713 | else if (CmpRHS != X) | ||||
19714 | return SDValue(); | ||||
19715 | |||||
19716 | if (OpCC == AArch64CC::NE) | ||||
19717 | CC = AArch64CC::getInvertedCondCode(CC); | ||||
19718 | else if (OpCC != AArch64CC::EQ) | ||||
19719 | return SDValue(); | ||||
19720 | |||||
19721 | SDLoc DL(Op); | ||||
19722 | EVT VT = Op->getValueType(0); | ||||
19723 | |||||
19724 | SDValue CCValue = DAG.getConstant(CC, DL, MVT::i32); | ||||
19725 | return DAG.getNode(AArch64ISD::CSEL, DL, VT, L, R, CCValue, Cond); | ||||
19726 | } | ||||
19727 | |||||
19728 | // Optimize CSEL instructions | ||||
19729 | static SDValue performCSELCombine(SDNode *N, | ||||
19730 | TargetLowering::DAGCombinerInfo &DCI, | ||||
19731 | SelectionDAG &DAG) { | ||||
19732 | // CSEL x, x, cc -> x | ||||
19733 | if (N->getOperand(0) == N->getOperand(1)) | ||||
19734 | return N->getOperand(0); | ||||
19735 | |||||
19736 | if (SDValue R = foldCSELOfCSEL(N, DAG)) | ||||
19737 | return R; | ||||
19738 | |||||
19739 | // CSEL 0, cttz(X), eq(X, 0) -> AND cttz bitwidth-1 | ||||
19740 | // CSEL cttz(X), 0, ne(X, 0) -> AND cttz bitwidth-1 | ||||
19741 | if (SDValue Folded = foldCSELofCTTZ(N, DAG)) | ||||
19742 | return Folded; | ||||
19743 | |||||
19744 | return performCONDCombine(N, DCI, DAG, 2, 3); | ||||
19745 | } | ||||
19746 | |||||
19747 | // Try to re-use an already extended operand of a vector SetCC feeding a | ||||
19748 | // extended select. Doing so avoids requiring another full extension of the | ||||
19749 | // SET_CC result when lowering the select. | ||||
19750 | static SDValue tryToWidenSetCCOperands(SDNode *Op, SelectionDAG &DAG) { | ||||
19751 | EVT Op0MVT = Op->getOperand(0).getValueType(); | ||||
19752 | if (!Op0MVT.isVector() || Op->use_empty()) | ||||
19753 | return SDValue(); | ||||
19754 | |||||
19755 | // Make sure that all uses of Op are VSELECTs with result matching types where | ||||
19756 | // the result type has a larger element type than the SetCC operand. | ||||
19757 | SDNode *FirstUse = *Op->use_begin(); | ||||
19758 | if (FirstUse->getOpcode() != ISD::VSELECT) | ||||
19759 | return SDValue(); | ||||
19760 | EVT UseMVT = FirstUse->getValueType(0); | ||||
19761 | if (UseMVT.getScalarSizeInBits() <= Op0MVT.getScalarSizeInBits()) | ||||
19762 | return SDValue(); | ||||
19763 | if (any_of(Op->uses(), [&UseMVT](const SDNode *N) { | ||||
19764 | return N->getOpcode() != ISD::VSELECT || N->getValueType(0) != UseMVT; | ||||
19765 | })) | ||||
19766 | return SDValue(); | ||||
19767 | |||||
19768 | APInt V; | ||||
19769 | if (!ISD::isConstantSplatVector(Op->getOperand(1).getNode(), V)) | ||||
19770 | return SDValue(); | ||||
19771 | |||||
19772 | SDLoc DL(Op); | ||||
19773 | SDValue Op0ExtV; | ||||
19774 | SDValue Op1ExtV; | ||||
19775 | ISD::CondCode CC = cast<CondCodeSDNode>(Op->getOperand(2))->get(); | ||||
19776 | // Check if the first operand of the SET_CC is already extended. If it is, | ||||
19777 | // split the SET_CC and re-use the extended version of the operand. | ||||
19778 | SDNode *Op0SExt = DAG.getNodeIfExists(ISD::SIGN_EXTEND, DAG.getVTList(UseMVT), | ||||
19779 | Op->getOperand(0)); | ||||
19780 | SDNode *Op0ZExt = DAG.getNodeIfExists(ISD::ZERO_EXTEND, DAG.getVTList(UseMVT), | ||||
19781 | Op->getOperand(0)); | ||||
19782 | if (Op0SExt && (isSignedIntSetCC(CC) || isIntEqualitySetCC(CC))) { | ||||
19783 | Op0ExtV = SDValue(Op0SExt, 0); | ||||
19784 | Op1ExtV = DAG.getNode(ISD::SIGN_EXTEND, DL, UseMVT, Op->getOperand(1)); | ||||
19785 | } else if (Op0ZExt && (isUnsignedIntSetCC(CC) || isIntEqualitySetCC(CC))) { | ||||
19786 | Op0ExtV = SDValue(Op0ZExt, 0); | ||||
19787 | Op1ExtV = DAG.getNode(ISD::ZERO_EXTEND, DL, UseMVT, Op->getOperand(1)); | ||||
19788 | } else | ||||
19789 | return SDValue(); | ||||
19790 | |||||
19791 | return DAG.getNode(ISD::SETCC, DL, UseMVT.changeVectorElementType(MVT::i1), | ||||
19792 | Op0ExtV, Op1ExtV, Op->getOperand(2)); | ||||
19793 | } | ||||
19794 | |||||
19795 | static SDValue performSETCCCombine(SDNode *N, | ||||
19796 | TargetLowering::DAGCombinerInfo &DCI, | ||||
19797 | SelectionDAG &DAG) { | ||||
19798 | assert(N->getOpcode() == ISD::SETCC && "Unexpected opcode!")(static_cast <bool> (N->getOpcode() == ISD::SETCC && "Unexpected opcode!") ? void (0) : __assert_fail ("N->getOpcode() == ISD::SETCC && \"Unexpected opcode!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 19798, __extension__ __PRETTY_FUNCTION__)); | ||||
19799 | SDValue LHS = N->getOperand(0); | ||||
19800 | SDValue RHS = N->getOperand(1); | ||||
19801 | ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get(); | ||||
19802 | SDLoc DL(N); | ||||
19803 | EVT VT = N->getValueType(0); | ||||
19804 | |||||
19805 | if (SDValue V = tryToWidenSetCCOperands(N, DAG)) | ||||
19806 | return V; | ||||
19807 | |||||
19808 | // setcc (csel 0, 1, cond, X), 1, ne ==> csel 0, 1, !cond, X | ||||
19809 | if (Cond == ISD::SETNE && isOneConstant(RHS) && | ||||
19810 | LHS->getOpcode() == AArch64ISD::CSEL && | ||||
19811 | isNullConstant(LHS->getOperand(0)) && isOneConstant(LHS->getOperand(1)) && | ||||
19812 | LHS->hasOneUse()) { | ||||
19813 | // Invert CSEL's condition. | ||||
19814 | auto *OpCC = cast<ConstantSDNode>(LHS.getOperand(2)); | ||||
19815 | auto OldCond = static_cast<AArch64CC::CondCode>(OpCC->getZExtValue()); | ||||
19816 | auto NewCond = getInvertedCondCode(OldCond); | ||||
19817 | |||||
19818 | // csel 0, 1, !cond, X | ||||
19819 | SDValue CSEL = | ||||
19820 | DAG.getNode(AArch64ISD::CSEL, DL, LHS.getValueType(), LHS.getOperand(0), | ||||
19821 | LHS.getOperand(1), DAG.getConstant(NewCond, DL, MVT::i32), | ||||
19822 | LHS.getOperand(3)); | ||||
19823 | return DAG.getZExtOrTrunc(CSEL, DL, VT); | ||||
19824 | } | ||||
19825 | |||||
19826 | // setcc (srl x, imm), 0, ne ==> setcc (and x, (-1 << imm)), 0, ne | ||||
19827 | if (Cond == ISD::SETNE && isNullConstant(RHS) && | ||||
19828 | LHS->getOpcode() == ISD::SRL && isa<ConstantSDNode>(LHS->getOperand(1)) && | ||||
19829 | LHS->hasOneUse()) { | ||||
19830 | EVT TstVT = LHS->getValueType(0); | ||||
19831 | if (TstVT.isScalarInteger() && TstVT.getFixedSizeInBits() <= 64) { | ||||
19832 | // this pattern will get better opt in emitComparison | ||||
19833 | uint64_t TstImm = -1ULL << LHS->getConstantOperandVal(1); | ||||
19834 | SDValue TST = DAG.getNode(ISD::AND, DL, TstVT, LHS->getOperand(0), | ||||
19835 | DAG.getConstant(TstImm, DL, TstVT)); | ||||
19836 | return DAG.getNode(ISD::SETCC, DL, VT, TST, RHS, N->getOperand(2)); | ||||
19837 | } | ||||
19838 | } | ||||
19839 | |||||
19840 | // setcc (iN (bitcast (vNi1 X))), 0, (eq|ne) | ||||
19841 | // ==> setcc (iN (zext (i1 (vecreduce_or (vNi1 X))))), 0, (eq|ne) | ||||
19842 | if (DCI.isBeforeLegalize() && VT.isScalarInteger() && | ||||
19843 | (Cond == ISD::SETEQ || Cond == ISD::SETNE) && isNullConstant(RHS) && | ||||
19844 | LHS->getOpcode() == ISD::BITCAST) { | ||||
19845 | EVT ToVT = LHS->getValueType(0); | ||||
19846 | EVT FromVT = LHS->getOperand(0).getValueType(); | ||||
19847 | if (FromVT.isFixedLengthVector() && | ||||
19848 | FromVT.getVectorElementType() == MVT::i1) { | ||||
19849 | LHS = DAG.getNode(ISD::VECREDUCE_OR, DL, MVT::i1, LHS->getOperand(0)); | ||||
19850 | LHS = DAG.getNode(ISD::ZERO_EXTEND, DL, ToVT, LHS); | ||||
19851 | return DAG.getSetCC(DL, VT, LHS, RHS, Cond); | ||||
19852 | } | ||||
19853 | } | ||||
19854 | |||||
19855 | // Try to perform the memcmp when the result is tested for [in]equality with 0 | ||||
19856 | if (SDValue V = performOrXorChainCombine(N, DAG)) | ||||
19857 | return V; | ||||
19858 | |||||
19859 | return SDValue(); | ||||
19860 | } | ||||
19861 | |||||
19862 | // Replace a flag-setting operator (eg ANDS) with the generic version | ||||
19863 | // (eg AND) if the flag is unused. | ||||
19864 | static SDValue performFlagSettingCombine(SDNode *N, | ||||
19865 | TargetLowering::DAGCombinerInfo &DCI, | ||||
19866 | unsigned GenericOpcode) { | ||||
19867 | SDLoc DL(N); | ||||
19868 | SDValue LHS = N->getOperand(0); | ||||
19869 | SDValue RHS = N->getOperand(1); | ||||
19870 | EVT VT = N->getValueType(0); | ||||
19871 | |||||
19872 | // If the flag result isn't used, convert back to a generic opcode. | ||||
19873 | if (!N->hasAnyUseOfValue(1)) { | ||||
19874 | SDValue Res = DCI.DAG.getNode(GenericOpcode, DL, VT, N->ops()); | ||||
19875 | return DCI.DAG.getMergeValues({Res, DCI.DAG.getConstant(0, DL, MVT::i32)}, | ||||
19876 | DL); | ||||
19877 | } | ||||
19878 | |||||
19879 | // Combine identical generic nodes into this node, re-using the result. | ||||
19880 | if (SDNode *Generic = DCI.DAG.getNodeIfExists( | ||||
19881 | GenericOpcode, DCI.DAG.getVTList(VT), {LHS, RHS})) | ||||
19882 | DCI.CombineTo(Generic, SDValue(N, 0)); | ||||
19883 | |||||
19884 | return SDValue(); | ||||
19885 | } | ||||
19886 | |||||
19887 | static SDValue performSetCCPunpkCombine(SDNode *N, SelectionDAG &DAG) { | ||||
19888 | // setcc_merge_zero pred | ||||
19889 | // (sign_extend (extract_subvector (setcc_merge_zero ... pred ...))), 0, ne | ||||
19890 | // => extract_subvector (inner setcc_merge_zero) | ||||
19891 | SDValue Pred = N->getOperand(0); | ||||
19892 | SDValue LHS = N->getOperand(1); | ||||
19893 | SDValue RHS = N->getOperand(2); | ||||
19894 | ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(3))->get(); | ||||
19895 | |||||
19896 | if (Cond != ISD::SETNE || !isZerosVector(RHS.getNode()) || | ||||
19897 | LHS->getOpcode() != ISD::SIGN_EXTEND) | ||||
19898 | return SDValue(); | ||||
19899 | |||||
19900 | SDValue Extract = LHS->getOperand(0); | ||||
19901 | if (Extract->getOpcode() != ISD::EXTRACT_SUBVECTOR || | ||||
19902 | Extract->getValueType(0) != N->getValueType(0) || | ||||
19903 | Extract->getConstantOperandVal(1) != 0) | ||||
19904 | return SDValue(); | ||||
19905 | |||||
19906 | SDValue InnerSetCC = Extract->getOperand(0); | ||||
19907 | if (InnerSetCC->getOpcode() != AArch64ISD::SETCC_MERGE_ZERO) | ||||
19908 | return SDValue(); | ||||
19909 | |||||
19910 | // By this point we've effectively got | ||||
19911 | // zero_inactive_lanes_and_trunc_i1(sext_i1(A)). If we can prove A's inactive | ||||
19912 | // lanes are already zero then the trunc(sext()) sequence is redundant and we | ||||
19913 | // can operate on A directly. | ||||
19914 | SDValue InnerPred = InnerSetCC.getOperand(0); | ||||
19915 | if (Pred.getOpcode() == AArch64ISD::PTRUE && | ||||
19916 | InnerPred.getOpcode() == AArch64ISD::PTRUE && | ||||
19917 | Pred.getConstantOperandVal(0) == InnerPred.getConstantOperandVal(0) && | ||||
19918 | Pred->getConstantOperandVal(0) >= AArch64SVEPredPattern::vl1 && | ||||
19919 | Pred->getConstantOperandVal(0) <= AArch64SVEPredPattern::vl256) | ||||
19920 | return Extract; | ||||
19921 | |||||
19922 | return SDValue(); | ||||
19923 | } | ||||
19924 | |||||
19925 | static SDValue | ||||
19926 | performSetccMergeZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { | ||||
19927 | assert(N->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO &&(static_cast <bool> (N->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO && "Unexpected opcode!") ? void (0) : __assert_fail ( "N->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO && \"Unexpected opcode!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 19928, __extension__ __PRETTY_FUNCTION__)) | ||||
19928 | "Unexpected opcode!")(static_cast <bool> (N->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO && "Unexpected opcode!") ? void (0) : __assert_fail ( "N->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO && \"Unexpected opcode!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 19928, __extension__ __PRETTY_FUNCTION__)); | ||||
19929 | |||||
19930 | SelectionDAG &DAG = DCI.DAG; | ||||
19931 | SDValue Pred = N->getOperand(0); | ||||
19932 | SDValue LHS = N->getOperand(1); | ||||
19933 | SDValue RHS = N->getOperand(2); | ||||
19934 | ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(3))->get(); | ||||
19935 | |||||
19936 | if (SDValue V = performSetCCPunpkCombine(N, DAG)) | ||||
19937 | return V; | ||||
19938 | |||||
19939 | if (Cond == ISD::SETNE && isZerosVector(RHS.getNode()) && | ||||
19940 | LHS->getOpcode() == ISD::SIGN_EXTEND && | ||||
19941 | LHS->getOperand(0)->getValueType(0) == N->getValueType(0)) { | ||||
19942 | // setcc_merge_zero( | ||||
19943 | // pred, extend(setcc_merge_zero(pred, ...)), != splat(0)) | ||||
19944 | // => setcc_merge_zero(pred, ...) | ||||
19945 | if (LHS->getOperand(0)->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO && | ||||
19946 | LHS->getOperand(0)->getOperand(0) == Pred) | ||||
19947 | return LHS->getOperand(0); | ||||
19948 | |||||
19949 | // setcc_merge_zero( | ||||
19950 | // all_active, extend(nxvNi1 ...), != splat(0)) | ||||
19951 | // -> nxvNi1 ... | ||||
19952 | if (isAllActivePredicate(DAG, Pred)) | ||||
19953 | return LHS->getOperand(0); | ||||
19954 | |||||
19955 | // setcc_merge_zero( | ||||
19956 | // pred, extend(nxvNi1 ...), != splat(0)) | ||||
19957 | // -> nxvNi1 and(pred, ...) | ||||
19958 | if (DCI.isAfterLegalizeDAG()) | ||||
19959 | // Do this after legalization to allow more folds on setcc_merge_zero | ||||
19960 | // to be recognized. | ||||
19961 | return DAG.getNode(ISD::AND, SDLoc(N), N->getValueType(0), | ||||
19962 | LHS->getOperand(0), Pred); | ||||
19963 | } | ||||
19964 | |||||
19965 | return SDValue(); | ||||
19966 | } | ||||
19967 | |||||
19968 | // Optimize some simple tbz/tbnz cases. Returns the new operand and bit to test | ||||
19969 | // as well as whether the test should be inverted. This code is required to | ||||
19970 | // catch these cases (as opposed to standard dag combines) because | ||||
19971 | // AArch64ISD::TBZ is matched during legalization. | ||||
19972 | static SDValue getTestBitOperand(SDValue Op, unsigned &Bit, bool &Invert, | ||||
19973 | SelectionDAG &DAG) { | ||||
19974 | |||||
19975 | if (!Op->hasOneUse()) | ||||
19976 | return Op; | ||||
19977 | |||||
19978 | // We don't handle undef/constant-fold cases below, as they should have | ||||
19979 | // already been taken care of (e.g. and of 0, test of undefined shifted bits, | ||||
19980 | // etc.) | ||||
19981 | |||||
19982 | // (tbz (trunc x), b) -> (tbz x, b) | ||||
19983 | // This case is just here to enable more of the below cases to be caught. | ||||
19984 | if (Op->getOpcode() == ISD::TRUNCATE && | ||||
19985 | Bit < Op->getValueType(0).getSizeInBits()) { | ||||
19986 | return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); | ||||
19987 | } | ||||
19988 | |||||
19989 | // (tbz (any_ext x), b) -> (tbz x, b) if we don't use the extended bits. | ||||
19990 | if (Op->getOpcode() == ISD::ANY_EXTEND && | ||||
19991 | Bit < Op->getOperand(0).getValueSizeInBits()) { | ||||
19992 | return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); | ||||
19993 | } | ||||
19994 | |||||
19995 | if (Op->getNumOperands() != 2) | ||||
19996 | return Op; | ||||
19997 | |||||
19998 | auto *C = dyn_cast<ConstantSDNode>(Op->getOperand(1)); | ||||
19999 | if (!C) | ||||
20000 | return Op; | ||||
20001 | |||||
20002 | switch (Op->getOpcode()) { | ||||
20003 | default: | ||||
20004 | return Op; | ||||
20005 | |||||
20006 | // (tbz (and x, m), b) -> (tbz x, b) | ||||
20007 | case ISD::AND: | ||||
20008 | if ((C->getZExtValue() >> Bit) & 1) | ||||
20009 | return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); | ||||
20010 | return Op; | ||||
20011 | |||||
20012 | // (tbz (shl x, c), b) -> (tbz x, b-c) | ||||
20013 | case ISD::SHL: | ||||
20014 | if (C->getZExtValue() <= Bit && | ||||
20015 | (Bit - C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) { | ||||
20016 | Bit = Bit - C->getZExtValue(); | ||||
20017 | return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); | ||||
20018 | } | ||||
20019 | return Op; | ||||
20020 | |||||
20021 | // (tbz (sra x, c), b) -> (tbz x, b+c) or (tbz x, msb) if b+c is > # bits in x | ||||
20022 | case ISD::SRA: | ||||
20023 | Bit = Bit + C->getZExtValue(); | ||||
20024 | if (Bit >= Op->getValueType(0).getSizeInBits()) | ||||
20025 | Bit = Op->getValueType(0).getSizeInBits() - 1; | ||||
20026 | return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); | ||||
20027 | |||||
20028 | // (tbz (srl x, c), b) -> (tbz x, b+c) | ||||
20029 | case ISD::SRL: | ||||
20030 | if ((Bit + C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) { | ||||
20031 | Bit = Bit + C->getZExtValue(); | ||||
20032 | return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); | ||||
20033 | } | ||||
20034 | return Op; | ||||
20035 | |||||
20036 | // (tbz (xor x, -1), b) -> (tbnz x, b) | ||||
20037 | case ISD::XOR: | ||||
20038 | if ((C->getZExtValue() >> Bit) & 1) | ||||
20039 | Invert = !Invert; | ||||
20040 | return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); | ||||
20041 | } | ||||
20042 | } | ||||
20043 | |||||
20044 | // Optimize test single bit zero/non-zero and branch. | ||||
20045 | static SDValue performTBZCombine(SDNode *N, | ||||
20046 | TargetLowering::DAGCombinerInfo &DCI, | ||||
20047 | SelectionDAG &DAG) { | ||||
20048 | unsigned Bit = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); | ||||
20049 | bool Invert = false; | ||||
20050 | SDValue TestSrc = N->getOperand(1); | ||||
20051 | SDValue NewTestSrc = getTestBitOperand(TestSrc, Bit, Invert, DAG); | ||||
20052 | |||||
20053 | if (TestSrc == NewTestSrc) | ||||
20054 | return SDValue(); | ||||
20055 | |||||
20056 | unsigned NewOpc = N->getOpcode(); | ||||
20057 | if (Invert) { | ||||
20058 | if (NewOpc == AArch64ISD::TBZ) | ||||
20059 | NewOpc = AArch64ISD::TBNZ; | ||||
20060 | else { | ||||
20061 | assert(NewOpc == AArch64ISD::TBNZ)(static_cast <bool> (NewOpc == AArch64ISD::TBNZ) ? void (0) : __assert_fail ("NewOpc == AArch64ISD::TBNZ", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 20061, __extension__ __PRETTY_FUNCTION__)); | ||||
20062 | NewOpc = AArch64ISD::TBZ; | ||||
20063 | } | ||||
20064 | } | ||||
20065 | |||||
20066 | SDLoc DL(N); | ||||
20067 | return DAG.getNode(NewOpc, DL, MVT::Other, N->getOperand(0), NewTestSrc, | ||||
20068 | DAG.getConstant(Bit, DL, MVT::i64), N->getOperand(3)); | ||||
20069 | } | ||||
20070 | |||||
20071 | // Swap vselect operands where it may allow a predicated operation to achieve | ||||
20072 | // the `sel`. | ||||
20073 | // | ||||
20074 | // (vselect (setcc ( condcode) (_) (_)) (a) (op (a) (b))) | ||||
20075 | // => (vselect (setcc (!condcode) (_) (_)) (op (a) (b)) (a)) | ||||
20076 | static SDValue trySwapVSelectOperands(SDNode *N, SelectionDAG &DAG) { | ||||
20077 | auto SelectA = N->getOperand(1); | ||||
20078 | auto SelectB = N->getOperand(2); | ||||
20079 | auto NTy = N->getValueType(0); | ||||
20080 | |||||
20081 | if (!NTy.isScalableVector()) | ||||
20082 | return SDValue(); | ||||
20083 | SDValue SetCC = N->getOperand(0); | ||||
20084 | if (SetCC.getOpcode() != ISD::SETCC || !SetCC.hasOneUse()) | ||||
20085 | return SDValue(); | ||||
20086 | |||||
20087 | switch (SelectB.getOpcode()) { | ||||
20088 | default: | ||||
20089 | return SDValue(); | ||||
20090 | case ISD::FMUL: | ||||
20091 | case ISD::FSUB: | ||||
20092 | case ISD::FADD: | ||||
20093 | break; | ||||
20094 | } | ||||
20095 | if (SelectA != SelectB.getOperand(0)) | ||||
20096 | return SDValue(); | ||||
20097 | |||||
20098 | ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get(); | ||||
20099 | ISD::CondCode InverseCC = | ||||
20100 | ISD::getSetCCInverse(CC, SetCC.getOperand(0).getValueType()); | ||||
20101 | auto InverseSetCC = | ||||
20102 | DAG.getSetCC(SDLoc(SetCC), SetCC.getValueType(), SetCC.getOperand(0), | ||||
20103 | SetCC.getOperand(1), InverseCC); | ||||
20104 | |||||
20105 | return DAG.getNode(ISD::VSELECT, SDLoc(N), NTy, | ||||
20106 | {InverseSetCC, SelectB, SelectA}); | ||||
20107 | } | ||||
20108 | |||||
20109 | // vselect (v1i1 setcc) -> | ||||
20110 | // vselect (v1iXX setcc) (XX is the size of the compared operand type) | ||||
20111 | // FIXME: Currently the type legalizer can't handle VSELECT having v1i1 as | ||||
20112 | // condition. If it can legalize "VSELECT v1i1" correctly, no need to combine | ||||
20113 | // such VSELECT. | ||||
20114 | static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG) { | ||||
20115 | if (auto SwapResult = trySwapVSelectOperands(N, DAG)) | ||||
20116 | return SwapResult; | ||||
20117 | |||||
20118 | SDValue N0 = N->getOperand(0); | ||||
20119 | EVT CCVT = N0.getValueType(); | ||||
20120 | |||||
20121 | if (isAllActivePredicate(DAG, N0)) | ||||
20122 | return N->getOperand(1); | ||||
20123 | |||||
20124 | if (isAllInactivePredicate(N0)) | ||||
20125 | return N->getOperand(2); | ||||
20126 | |||||
20127 | // Check for sign pattern (VSELECT setgt, iN lhs, -1, 1, -1) and transform | ||||
20128 | // into (OR (ASR lhs, N-1), 1), which requires less instructions for the | ||||
20129 | // supported types. | ||||
20130 | SDValue SetCC = N->getOperand(0); | ||||
20131 | if (SetCC.getOpcode() == ISD::SETCC && | ||||
20132 | SetCC.getOperand(2) == DAG.getCondCode(ISD::SETGT)) { | ||||
20133 | SDValue CmpLHS = SetCC.getOperand(0); | ||||
20134 | EVT VT = CmpLHS.getValueType(); | ||||
20135 | SDNode *CmpRHS = SetCC.getOperand(1).getNode(); | ||||
20136 | SDNode *SplatLHS = N->getOperand(1).getNode(); | ||||
20137 | SDNode *SplatRHS = N->getOperand(2).getNode(); | ||||
20138 | APInt SplatLHSVal; | ||||
20139 | if (CmpLHS.getValueType() == N->getOperand(1).getValueType() && | ||||
20140 | VT.isSimple() && | ||||
20141 | is_contained( | ||||
20142 | makeArrayRef({MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, | ||||
20143 | MVT::v2i32, MVT::v4i32, MVT::v2i64}), | ||||
20144 | VT.getSimpleVT().SimpleTy) && | ||||
20145 | ISD::isConstantSplatVector(SplatLHS, SplatLHSVal) && | ||||
20146 | SplatLHSVal.isOne() && ISD::isConstantSplatVectorAllOnes(CmpRHS) && | ||||
20147 | ISD::isConstantSplatVectorAllOnes(SplatRHS)) { | ||||
20148 | unsigned NumElts = VT.getVectorNumElements(); | ||||
20149 | SmallVector<SDValue, 8> Ops( | ||||
20150 | NumElts, DAG.getConstant(VT.getScalarSizeInBits() - 1, SDLoc(N), | ||||
20151 | VT.getScalarType())); | ||||
20152 | SDValue Val = DAG.getBuildVector(VT, SDLoc(N), Ops); | ||||
20153 | |||||
20154 | auto Shift = DAG.getNode(ISD::SRA, SDLoc(N), VT, CmpLHS, Val); | ||||
20155 | auto Or = DAG.getNode(ISD::OR, SDLoc(N), VT, Shift, N->getOperand(1)); | ||||
20156 | return Or; | ||||
20157 | } | ||||
20158 | } | ||||
20159 | |||||
20160 | if (N0.getOpcode() != ISD::SETCC || | ||||
20161 | CCVT.getVectorElementCount() != ElementCount::getFixed(1) || | ||||
20162 | CCVT.getVectorElementType() != MVT::i1) | ||||
20163 | return SDValue(); | ||||
20164 | |||||
20165 | EVT ResVT = N->getValueType(0); | ||||
20166 | EVT CmpVT = N0.getOperand(0).getValueType(); | ||||
20167 | // Only combine when the result type is of the same size as the compared | ||||
20168 | // operands. | ||||
20169 | if (ResVT.getSizeInBits() != CmpVT.getSizeInBits()) | ||||
20170 | return SDValue(); | ||||
20171 | |||||
20172 | SDValue IfTrue = N->getOperand(1); | ||||
20173 | SDValue IfFalse = N->getOperand(2); | ||||
20174 | SetCC = DAG.getSetCC(SDLoc(N), CmpVT.changeVectorElementTypeToInteger(), | ||||
20175 | N0.getOperand(0), N0.getOperand(1), | ||||
20176 | cast<CondCodeSDNode>(N0.getOperand(2))->get()); | ||||
20177 | return DAG.getNode(ISD::VSELECT, SDLoc(N), ResVT, SetCC, | ||||
20178 | IfTrue, IfFalse); | ||||
20179 | } | ||||
20180 | |||||
20181 | /// A vector select: "(select vL, vR, (setcc LHS, RHS))" is best performed with | ||||
20182 | /// the compare-mask instructions rather than going via NZCV, even if LHS and | ||||
20183 | /// RHS are really scalar. This replaces any scalar setcc in the above pattern | ||||
20184 | /// with a vector one followed by a DUP shuffle on the result. | ||||
20185 | static SDValue performSelectCombine(SDNode *N, | ||||
20186 | TargetLowering::DAGCombinerInfo &DCI) { | ||||
20187 | SelectionDAG &DAG = DCI.DAG; | ||||
20188 | SDValue N0 = N->getOperand(0); | ||||
20189 | EVT ResVT = N->getValueType(0); | ||||
20190 | |||||
20191 | if (N0.getOpcode() != ISD::SETCC) | ||||
20192 | return SDValue(); | ||||
20193 | |||||
20194 | if (ResVT.isScalableVector()) | ||||
20195 | return SDValue(); | ||||
20196 | |||||
20197 | // Make sure the SETCC result is either i1 (initial DAG), or i32, the lowered | ||||
20198 | // scalar SetCCResultType. We also don't expect vectors, because we assume | ||||
20199 | // that selects fed by vector SETCCs are canonicalized to VSELECT. | ||||
20200 | assert((N0.getValueType() == MVT::i1 || N0.getValueType() == MVT::i32) &&(static_cast <bool> ((N0.getValueType() == MVT::i1 || N0 .getValueType() == MVT::i32) && "Scalar-SETCC feeding SELECT has unexpected result type!" ) ? void (0) : __assert_fail ("(N0.getValueType() == MVT::i1 || N0.getValueType() == MVT::i32) && \"Scalar-SETCC feeding SELECT has unexpected result type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 20201, __extension__ __PRETTY_FUNCTION__)) | ||||
20201 | "Scalar-SETCC feeding SELECT has unexpected result type!")(static_cast <bool> ((N0.getValueType() == MVT::i1 || N0 .getValueType() == MVT::i32) && "Scalar-SETCC feeding SELECT has unexpected result type!" ) ? void (0) : __assert_fail ("(N0.getValueType() == MVT::i1 || N0.getValueType() == MVT::i32) && \"Scalar-SETCC feeding SELECT has unexpected result type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 20201, __extension__ __PRETTY_FUNCTION__)); | ||||
20202 | |||||
20203 | // If NumMaskElts == 0, the comparison is larger than select result. The | ||||
20204 | // largest real NEON comparison is 64-bits per lane, which means the result is | ||||
20205 | // at most 32-bits and an illegal vector. Just bail out for now. | ||||
20206 | EVT SrcVT = N0.getOperand(0).getValueType(); | ||||
20207 | |||||
20208 | // Don't try to do this optimization when the setcc itself has i1 operands. | ||||
20209 | // There are no legal vectors of i1, so this would be pointless. | ||||
20210 | if (SrcVT == MVT::i1) | ||||
20211 | return SDValue(); | ||||
20212 | |||||
20213 | int NumMaskElts = ResVT.getSizeInBits() / SrcVT.getSizeInBits(); | ||||
20214 | if (!ResVT.isVector() || NumMaskElts == 0) | ||||
20215 | return SDValue(); | ||||
20216 | |||||
20217 | SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumMaskElts); | ||||
20218 | EVT CCVT = SrcVT.changeVectorElementTypeToInteger(); | ||||
20219 | |||||
20220 | // Also bail out if the vector CCVT isn't the same size as ResVT. | ||||
20221 | // This can happen if the SETCC operand size doesn't divide the ResVT size | ||||
20222 | // (e.g., f64 vs v3f32). | ||||
20223 | if (CCVT.getSizeInBits() != ResVT.getSizeInBits()) | ||||
20224 | return SDValue(); | ||||
20225 | |||||
20226 | // Make sure we didn't create illegal types, if we're not supposed to. | ||||
20227 | assert(DCI.isBeforeLegalize() ||(static_cast <bool> (DCI.isBeforeLegalize() || DAG.getTargetLoweringInfo ().isTypeLegal(SrcVT)) ? void (0) : __assert_fail ("DCI.isBeforeLegalize() || DAG.getTargetLoweringInfo().isTypeLegal(SrcVT)" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 20228, __extension__ __PRETTY_FUNCTION__)) | ||||
20228 | DAG.getTargetLoweringInfo().isTypeLegal(SrcVT))(static_cast <bool> (DCI.isBeforeLegalize() || DAG.getTargetLoweringInfo ().isTypeLegal(SrcVT)) ? void (0) : __assert_fail ("DCI.isBeforeLegalize() || DAG.getTargetLoweringInfo().isTypeLegal(SrcVT)" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 20228, __extension__ __PRETTY_FUNCTION__)); | ||||
20229 | |||||
20230 | // First perform a vector comparison, where lane 0 is the one we're interested | ||||
20231 | // in. | ||||
20232 | SDLoc DL(N0); | ||||
20233 | SDValue LHS = | ||||
20234 | DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(0)); | ||||
20235 | SDValue RHS = | ||||
20236 | DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(1)); | ||||
20237 | SDValue SetCC = DAG.getNode(ISD::SETCC, DL, CCVT, LHS, RHS, N0.getOperand(2)); | ||||
20238 | |||||
20239 | // Now duplicate the comparison mask we want across all other lanes. | ||||
20240 | SmallVector<int, 8> DUPMask(CCVT.getVectorNumElements(), 0); | ||||
20241 | SDValue Mask = DAG.getVectorShuffle(CCVT, DL, SetCC, SetCC, DUPMask); | ||||
20242 | Mask = DAG.getNode(ISD::BITCAST, DL, | ||||
20243 | ResVT.changeVectorElementTypeToInteger(), Mask); | ||||
20244 | |||||
20245 | return DAG.getSelect(DL, ResVT, Mask, N->getOperand(1), N->getOperand(2)); | ||||
20246 | } | ||||
20247 | |||||
20248 | static SDValue performDUPCombine(SDNode *N, | ||||
20249 | TargetLowering::DAGCombinerInfo &DCI) { | ||||
20250 | EVT VT = N->getValueType(0); | ||||
20251 | // If "v2i32 DUP(x)" and "v4i32 DUP(x)" both exist, use an extract from the | ||||
20252 | // 128bit vector version. | ||||
20253 | if (VT.is64BitVector() && DCI.isAfterLegalizeDAG()) { | ||||
20254 | EVT LVT = VT.getDoubleNumVectorElementsVT(*DCI.DAG.getContext()); | ||||
20255 | if (SDNode *LN = DCI.DAG.getNodeIfExists( | ||||
20256 | N->getOpcode(), DCI.DAG.getVTList(LVT), {N->getOperand(0)})) { | ||||
20257 | SDLoc DL(N); | ||||
20258 | return DCI.DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SDValue(LN, 0), | ||||
20259 | DCI.DAG.getConstant(0, DL, MVT::i64)); | ||||
20260 | } | ||||
20261 | } | ||||
20262 | |||||
20263 | return performPostLD1Combine(N, DCI, false); | ||||
20264 | } | ||||
20265 | |||||
20266 | /// Get rid of unnecessary NVCASTs (that don't change the type). | ||||
20267 | static SDValue performNVCASTCombine(SDNode *N) { | ||||
20268 | if (N->getValueType(0) == N->getOperand(0).getValueType()) | ||||
20269 | return N->getOperand(0); | ||||
20270 | |||||
20271 | return SDValue(); | ||||
20272 | } | ||||
20273 | |||||
20274 | // If all users of the globaladdr are of the form (globaladdr + constant), find | ||||
20275 | // the smallest constant, fold it into the globaladdr's offset and rewrite the | ||||
20276 | // globaladdr as (globaladdr + constant) - constant. | ||||
20277 | static SDValue performGlobalAddressCombine(SDNode *N, SelectionDAG &DAG, | ||||
20278 | const AArch64Subtarget *Subtarget, | ||||
20279 | const TargetMachine &TM) { | ||||
20280 | auto *GN = cast<GlobalAddressSDNode>(N); | ||||
20281 | if (Subtarget->ClassifyGlobalReference(GN->getGlobal(), TM) != | ||||
20282 | AArch64II::MO_NO_FLAG) | ||||
20283 | return SDValue(); | ||||
20284 | |||||
20285 | uint64_t MinOffset = -1ull; | ||||
20286 | for (SDNode *N : GN->uses()) { | ||||
20287 | if (N->getOpcode() != ISD::ADD) | ||||
20288 | return SDValue(); | ||||
20289 | auto *C = dyn_cast<ConstantSDNode>(N->getOperand(0)); | ||||
20290 | if (!C) | ||||
20291 | C = dyn_cast<ConstantSDNode>(N->getOperand(1)); | ||||
20292 | if (!C) | ||||
20293 | return SDValue(); | ||||
20294 | MinOffset = std::min(MinOffset, C->getZExtValue()); | ||||
20295 | } | ||||
20296 | uint64_t Offset = MinOffset + GN->getOffset(); | ||||
20297 | |||||
20298 | // Require that the new offset is larger than the existing one. Otherwise, we | ||||
20299 | // can end up oscillating between two possible DAGs, for example, | ||||
20300 | // (add (add globaladdr + 10, -1), 1) and (add globaladdr + 9, 1). | ||||
20301 | if (Offset <= uint64_t(GN->getOffset())) | ||||
20302 | return SDValue(); | ||||
20303 | |||||
20304 | // Check whether folding this offset is legal. It must not go out of bounds of | ||||
20305 | // the referenced object to avoid violating the code model, and must be | ||||
20306 | // smaller than 2^20 because this is the largest offset expressible in all | ||||
20307 | // object formats. (The IMAGE_REL_ARM64_PAGEBASE_REL21 relocation in COFF | ||||
20308 | // stores an immediate signed 21 bit offset.) | ||||
20309 | // | ||||
20310 | // This check also prevents us from folding negative offsets, which will end | ||||
20311 | // up being treated in the same way as large positive ones. They could also | ||||
20312 | // cause code model violations, and aren't really common enough to matter. | ||||
20313 | if (Offset >= (1 << 20)) | ||||
20314 | return SDValue(); | ||||
20315 | |||||
20316 | const GlobalValue *GV = GN->getGlobal(); | ||||
20317 | Type *T = GV->getValueType(); | ||||
20318 | if (!T->isSized() || | ||||
20319 | Offset > GV->getParent()->getDataLayout().getTypeAllocSize(T)) | ||||
20320 | return SDValue(); | ||||
20321 | |||||
20322 | SDLoc DL(GN); | ||||
20323 | SDValue Result = DAG.getGlobalAddress(GV, DL, MVT::i64, Offset); | ||||
20324 | return DAG.getNode(ISD::SUB, DL, MVT::i64, Result, | ||||
20325 | DAG.getConstant(MinOffset, DL, MVT::i64)); | ||||
20326 | } | ||||
20327 | |||||
20328 | static SDValue performCTLZCombine(SDNode *N, SelectionDAG &DAG, | ||||
20329 | const AArch64Subtarget *Subtarget) { | ||||
20330 | SDValue BR = N->getOperand(0); | ||||
20331 | if (!Subtarget->hasCSSC() || BR.getOpcode() != ISD::BITREVERSE || | ||||
20332 | !BR.getValueType().isScalarInteger()) | ||||
20333 | return SDValue(); | ||||
20334 | |||||
20335 | SDLoc DL(N); | ||||
20336 | return DAG.getNode(ISD::CTTZ, DL, BR.getValueType(), BR.getOperand(0)); | ||||
20337 | } | ||||
20338 | |||||
20339 | // Turns the vector of indices into a vector of byte offstes by scaling Offset | ||||
20340 | // by (BitWidth / 8). | ||||
20341 | static SDValue getScaledOffsetForBitWidth(SelectionDAG &DAG, SDValue Offset, | ||||
20342 | SDLoc DL, unsigned BitWidth) { | ||||
20343 | assert(Offset.getValueType().isScalableVector() &&(static_cast <bool> (Offset.getValueType().isScalableVector () && "This method is only for scalable vectors of offsets" ) ? void (0) : __assert_fail ("Offset.getValueType().isScalableVector() && \"This method is only for scalable vectors of offsets\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 20344, __extension__ __PRETTY_FUNCTION__)) | ||||
20344 | "This method is only for scalable vectors of offsets")(static_cast <bool> (Offset.getValueType().isScalableVector () && "This method is only for scalable vectors of offsets" ) ? void (0) : __assert_fail ("Offset.getValueType().isScalableVector() && \"This method is only for scalable vectors of offsets\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 20344, __extension__ __PRETTY_FUNCTION__)); | ||||
20345 | |||||
20346 | SDValue Shift = DAG.getConstant(Log2_32(BitWidth / 8), DL, MVT::i64); | ||||
20347 | SDValue SplatShift = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, Shift); | ||||
20348 | |||||
20349 | return DAG.getNode(ISD::SHL, DL, MVT::nxv2i64, Offset, SplatShift); | ||||
20350 | } | ||||
20351 | |||||
20352 | /// Check if the value of \p OffsetInBytes can be used as an immediate for | ||||
20353 | /// the gather load/prefetch and scatter store instructions with vector base and | ||||
20354 | /// immediate offset addressing mode: | ||||
20355 | /// | ||||
20356 | /// [<Zn>.[S|D]{, #<imm>}] | ||||
20357 | /// | ||||
20358 | /// where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31. | ||||
20359 | inline static bool isValidImmForSVEVecImmAddrMode(unsigned OffsetInBytes, | ||||
20360 | unsigned ScalarSizeInBytes) { | ||||
20361 | // The immediate is not a multiple of the scalar size. | ||||
20362 | if (OffsetInBytes % ScalarSizeInBytes) | ||||
20363 | return false; | ||||
20364 | |||||
20365 | // The immediate is out of range. | ||||
20366 | if (OffsetInBytes / ScalarSizeInBytes > 31) | ||||
20367 | return false; | ||||
20368 | |||||
20369 | return true; | ||||
20370 | } | ||||
20371 | |||||
20372 | /// Check if the value of \p Offset represents a valid immediate for the SVE | ||||
20373 | /// gather load/prefetch and scatter store instructiona with vector base and | ||||
20374 | /// immediate offset addressing mode: | ||||
20375 | /// | ||||
20376 | /// [<Zn>.[S|D]{, #<imm>}] | ||||
20377 | /// | ||||
20378 | /// where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31. | ||||
20379 | static bool isValidImmForSVEVecImmAddrMode(SDValue Offset, | ||||
20380 | unsigned ScalarSizeInBytes) { | ||||
20381 | ConstantSDNode *OffsetConst = dyn_cast<ConstantSDNode>(Offset.getNode()); | ||||
20382 | return OffsetConst && isValidImmForSVEVecImmAddrMode( | ||||
20383 | OffsetConst->getZExtValue(), ScalarSizeInBytes); | ||||
20384 | } | ||||
20385 | |||||
20386 | static SDValue performScatterStoreCombine(SDNode *N, SelectionDAG &DAG, | ||||
20387 | unsigned Opcode, | ||||
20388 | bool OnlyPackedOffsets = true) { | ||||
20389 | const SDValue Src = N->getOperand(2); | ||||
20390 | const EVT SrcVT = Src->getValueType(0); | ||||
20391 | assert(SrcVT.isScalableVector() &&(static_cast <bool> (SrcVT.isScalableVector() && "Scatter stores are only possible for SVE vectors") ? void ( 0) : __assert_fail ("SrcVT.isScalableVector() && \"Scatter stores are only possible for SVE vectors\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 20392, __extension__ __PRETTY_FUNCTION__)) | ||||
20392 | "Scatter stores are only possible for SVE vectors")(static_cast <bool> (SrcVT.isScalableVector() && "Scatter stores are only possible for SVE vectors") ? void ( 0) : __assert_fail ("SrcVT.isScalableVector() && \"Scatter stores are only possible for SVE vectors\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 20392, __extension__ __PRETTY_FUNCTION__)); | ||||
20393 | |||||
20394 | SDLoc DL(N); | ||||
20395 | MVT SrcElVT = SrcVT.getVectorElementType().getSimpleVT(); | ||||
20396 | |||||
20397 | // Make sure that source data will fit into an SVE register | ||||
20398 | if (SrcVT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock) | ||||
20399 | return SDValue(); | ||||
20400 | |||||
20401 | // For FPs, ACLE only supports _packed_ single and double precision types. | ||||
20402 | if (SrcElVT.isFloatingPoint()) | ||||
20403 | if ((SrcVT != MVT::nxv4f32) && (SrcVT != MVT::nxv2f64)) | ||||
20404 | return SDValue(); | ||||
20405 | |||||
20406 | // Depending on the addressing mode, this is either a pointer or a vector of | ||||
20407 | // pointers (that fits into one register) | ||||
20408 | SDValue Base = N->getOperand(4); | ||||
20409 | // Depending on the addressing mode, this is either a single offset or a | ||||
20410 | // vector of offsets (that fits into one register) | ||||
20411 | SDValue Offset = N->getOperand(5); | ||||
20412 | |||||
20413 | // For "scalar + vector of indices", just scale the indices. This only | ||||
20414 | // applies to non-temporal scatters because there's no instruction that takes | ||||
20415 | // indicies. | ||||
20416 | if (Opcode == AArch64ISD::SSTNT1_INDEX_PRED) { | ||||
20417 | Offset = | ||||
20418 | getScaledOffsetForBitWidth(DAG, Offset, DL, SrcElVT.getSizeInBits()); | ||||
20419 | Opcode = AArch64ISD::SSTNT1_PRED; | ||||
20420 | } | ||||
20421 | |||||
20422 | // In the case of non-temporal gather loads there's only one SVE instruction | ||||
20423 | // per data-size: "scalar + vector", i.e. | ||||
20424 | // * stnt1{b|h|w|d} { z0.s }, p0/z, [z0.s, x0] | ||||
20425 | // Since we do have intrinsics that allow the arguments to be in a different | ||||
20426 | // order, we may need to swap them to match the spec. | ||||
20427 | if (Opcode == AArch64ISD::SSTNT1_PRED && Offset.getValueType().isVector()) | ||||
20428 | std::swap(Base, Offset); | ||||
20429 | |||||
20430 | // SST1_IMM requires that the offset is an immediate that is: | ||||
20431 | // * a multiple of #SizeInBytes, | ||||
20432 | // * in the range [0, 31 x #SizeInBytes], | ||||
20433 | // where #SizeInBytes is the size in bytes of the stored items. For | ||||
20434 | // immediates outside that range and non-immediate scalar offsets use SST1 or | ||||
20435 | // SST1_UXTW instead. | ||||
20436 | if (Opcode == AArch64ISD::SST1_IMM_PRED) { | ||||
20437 | if (!isValidImmForSVEVecImmAddrMode(Offset, | ||||
20438 | SrcVT.getScalarSizeInBits() / 8)) { | ||||
20439 | if (MVT::nxv4i32 == Base.getValueType().getSimpleVT().SimpleTy) | ||||
20440 | Opcode = AArch64ISD::SST1_UXTW_PRED; | ||||
20441 | else | ||||
20442 | Opcode = AArch64ISD::SST1_PRED; | ||||
20443 | |||||
20444 | std::swap(Base, Offset); | ||||
20445 | } | ||||
20446 | } | ||||
20447 | |||||
20448 | auto &TLI = DAG.getTargetLoweringInfo(); | ||||
20449 | if (!TLI.isTypeLegal(Base.getValueType())) | ||||
20450 | return SDValue(); | ||||
20451 | |||||
20452 | // Some scatter store variants allow unpacked offsets, but only as nxv2i32 | ||||
20453 | // vectors. These are implicitly sign (sxtw) or zero (zxtw) extend to | ||||
20454 | // nxv2i64. Legalize accordingly. | ||||
20455 | if (!OnlyPackedOffsets && | ||||
20456 | Offset.getValueType().getSimpleVT().SimpleTy == MVT::nxv2i32) | ||||
20457 | Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset).getValue(0); | ||||
20458 | |||||
20459 | if (!TLI.isTypeLegal(Offset.getValueType())) | ||||
20460 | return SDValue(); | ||||
20461 | |||||
20462 | // Source value type that is representable in hardware | ||||
20463 | EVT HwSrcVt = getSVEContainerType(SrcVT); | ||||
20464 | |||||
20465 | // Keep the original type of the input data to store - this is needed to be | ||||
20466 | // able to select the correct instruction, e.g. ST1B, ST1H, ST1W and ST1D. For | ||||
20467 | // FP values we want the integer equivalent, so just use HwSrcVt. | ||||
20468 | SDValue InputVT = DAG.getValueType(SrcVT); | ||||
20469 | if (SrcVT.isFloatingPoint()) | ||||
20470 | InputVT = DAG.getValueType(HwSrcVt); | ||||
20471 | |||||
20472 | SDVTList VTs = DAG.getVTList(MVT::Other); | ||||
20473 | SDValue SrcNew; | ||||
20474 | |||||
20475 | if (Src.getValueType().isFloatingPoint()) | ||||
20476 | SrcNew = DAG.getNode(ISD::BITCAST, DL, HwSrcVt, Src); | ||||
20477 | else | ||||
20478 | SrcNew = DAG.getNode(ISD::ANY_EXTEND, DL, HwSrcVt, Src); | ||||
20479 | |||||
20480 | SDValue Ops[] = {N->getOperand(0), // Chain | ||||
20481 | SrcNew, | ||||
20482 | N->getOperand(3), // Pg | ||||
20483 | Base, | ||||
20484 | Offset, | ||||
20485 | InputVT}; | ||||
20486 | |||||
20487 | return DAG.getNode(Opcode, DL, VTs, Ops); | ||||
20488 | } | ||||
20489 | |||||
20490 | static SDValue performGatherLoadCombine(SDNode *N, SelectionDAG &DAG, | ||||
20491 | unsigned Opcode, | ||||
20492 | bool OnlyPackedOffsets = true) { | ||||
20493 | const EVT RetVT = N->getValueType(0); | ||||
20494 | assert(RetVT.isScalableVector() &&(static_cast <bool> (RetVT.isScalableVector() && "Gather loads are only possible for SVE vectors") ? void (0) : __assert_fail ("RetVT.isScalableVector() && \"Gather loads are only possible for SVE vectors\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 20495, __extension__ __PRETTY_FUNCTION__)) | ||||
20495 | "Gather loads are only possible for SVE vectors")(static_cast <bool> (RetVT.isScalableVector() && "Gather loads are only possible for SVE vectors") ? void (0) : __assert_fail ("RetVT.isScalableVector() && \"Gather loads are only possible for SVE vectors\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 20495, __extension__ __PRETTY_FUNCTION__)); | ||||
20496 | |||||
20497 | SDLoc DL(N); | ||||
20498 | |||||
20499 | // Make sure that the loaded data will fit into an SVE register | ||||
20500 | if (RetVT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock) | ||||
20501 | return SDValue(); | ||||
20502 | |||||
20503 | // Depending on the addressing mode, this is either a pointer or a vector of | ||||
20504 | // pointers (that fits into one register) | ||||
20505 | SDValue Base = N->getOperand(3); | ||||
20506 | // Depending on the addressing mode, this is either a single offset or a | ||||
20507 | // vector of offsets (that fits into one register) | ||||
20508 | SDValue Offset = N->getOperand(4); | ||||
20509 | |||||
20510 | // For "scalar + vector of indices", just scale the indices. This only | ||||
20511 | // applies to non-temporal gathers because there's no instruction that takes | ||||
20512 | // indicies. | ||||
20513 | if (Opcode == AArch64ISD::GLDNT1_INDEX_MERGE_ZERO) { | ||||
20514 | Offset = getScaledOffsetForBitWidth(DAG, Offset, DL, | ||||
20515 | RetVT.getScalarSizeInBits()); | ||||
20516 | Opcode = AArch64ISD::GLDNT1_MERGE_ZERO; | ||||
20517 | } | ||||
20518 | |||||
20519 | // In the case of non-temporal gather loads there's only one SVE instruction | ||||
20520 | // per data-size: "scalar + vector", i.e. | ||||
20521 | // * ldnt1{b|h|w|d} { z0.s }, p0/z, [z0.s, x0] | ||||
20522 | // Since we do have intrinsics that allow the arguments to be in a different | ||||
20523 | // order, we may need to swap them to match the spec. | ||||
20524 | if (Opcode == AArch64ISD::GLDNT1_MERGE_ZERO && | ||||
20525 | Offset.getValueType().isVector()) | ||||
20526 | std::swap(Base, Offset); | ||||
20527 | |||||
20528 | // GLD{FF}1_IMM requires that the offset is an immediate that is: | ||||
20529 | // * a multiple of #SizeInBytes, | ||||
20530 | // * in the range [0, 31 x #SizeInBytes], | ||||
20531 | // where #SizeInBytes is the size in bytes of the loaded items. For | ||||
20532 | // immediates outside that range and non-immediate scalar offsets use | ||||
20533 | // GLD1_MERGE_ZERO or GLD1_UXTW_MERGE_ZERO instead. | ||||
20534 | if (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO || | ||||
20535 | Opcode == AArch64ISD::GLDFF1_IMM_MERGE_ZERO) { | ||||
20536 | if (!isValidImmForSVEVecImmAddrMode(Offset, | ||||
20537 | RetVT.getScalarSizeInBits() / 8)) { | ||||
20538 | if (MVT::nxv4i32 == Base.getValueType().getSimpleVT().SimpleTy) | ||||
20539 | Opcode = (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO) | ||||
20540 | ? AArch64ISD::GLD1_UXTW_MERGE_ZERO | ||||
20541 | : AArch64ISD::GLDFF1_UXTW_MERGE_ZERO; | ||||
20542 | else | ||||
20543 | Opcode = (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO) | ||||
20544 | ? AArch64ISD::GLD1_MERGE_ZERO | ||||
20545 | : AArch64ISD::GLDFF1_MERGE_ZERO; | ||||
20546 | |||||
20547 | std::swap(Base, Offset); | ||||
20548 | } | ||||
20549 | } | ||||
20550 | |||||
20551 | auto &TLI = DAG.getTargetLoweringInfo(); | ||||
20552 | if (!TLI.isTypeLegal(Base.getValueType())) | ||||
20553 | return SDValue(); | ||||
20554 | |||||
20555 | // Some gather load variants allow unpacked offsets, but only as nxv2i32 | ||||
20556 | // vectors. These are implicitly sign (sxtw) or zero (zxtw) extend to | ||||
20557 | // nxv2i64. Legalize accordingly. | ||||
20558 | if (!OnlyPackedOffsets && | ||||
20559 | Offset.getValueType().getSimpleVT().SimpleTy == MVT::nxv2i32) | ||||
20560 | Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset).getValue(0); | ||||
20561 | |||||
20562 | // Return value type that is representable in hardware | ||||
20563 | EVT HwRetVt = getSVEContainerType(RetVT); | ||||
20564 | |||||
20565 | // Keep the original output value type around - this is needed to be able to | ||||
20566 | // select the correct instruction, e.g. LD1B, LD1H, LD1W and LD1D. For FP | ||||
20567 | // values we want the integer equivalent, so just use HwRetVT. | ||||
20568 | SDValue OutVT = DAG.getValueType(RetVT); | ||||
20569 | if (RetVT.isFloatingPoint()) | ||||
20570 | OutVT = DAG.getValueType(HwRetVt); | ||||
20571 | |||||
20572 | SDVTList VTs = DAG.getVTList(HwRetVt, MVT::Other); | ||||
20573 | SDValue Ops[] = {N->getOperand(0), // Chain | ||||
20574 | N->getOperand(2), // Pg | ||||
20575 | Base, Offset, OutVT}; | ||||
20576 | |||||
20577 | SDValue Load = DAG.getNode(Opcode, DL, VTs, Ops); | ||||
20578 | SDValue LoadChain = SDValue(Load.getNode(), 1); | ||||
20579 | |||||
20580 | if (RetVT.isInteger() && (RetVT != HwRetVt)) | ||||
20581 | Load = DAG.getNode(ISD::TRUNCATE, DL, RetVT, Load.getValue(0)); | ||||
20582 | |||||
20583 | // If the original return value was FP, bitcast accordingly. Doing it here | ||||
20584 | // means that we can avoid adding TableGen patterns for FPs. | ||||
20585 | if (RetVT.isFloatingPoint()) | ||||
20586 | Load = DAG.getNode(ISD::BITCAST, DL, RetVT, Load.getValue(0)); | ||||
20587 | |||||
20588 | return DAG.getMergeValues({Load, LoadChain}, DL); | ||||
20589 | } | ||||
20590 | |||||
20591 | static SDValue | ||||
20592 | performSignExtendInRegCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, | ||||
20593 | SelectionDAG &DAG) { | ||||
20594 | SDLoc DL(N); | ||||
20595 | SDValue Src = N->getOperand(0); | ||||
20596 | unsigned Opc = Src->getOpcode(); | ||||
20597 | |||||
20598 | // Sign extend of an unsigned unpack -> signed unpack | ||||
20599 | if (Opc == AArch64ISD::UUNPKHI || Opc == AArch64ISD::UUNPKLO) { | ||||
20600 | |||||
20601 | unsigned SOpc = Opc == AArch64ISD::UUNPKHI ? AArch64ISD::SUNPKHI | ||||
20602 | : AArch64ISD::SUNPKLO; | ||||
20603 | |||||
20604 | // Push the sign extend to the operand of the unpack | ||||
20605 | // This is necessary where, for example, the operand of the unpack | ||||
20606 | // is another unpack: | ||||
20607 | // 4i32 sign_extend_inreg (4i32 uunpklo(8i16 uunpklo (16i8 opnd)), from 4i8) | ||||
20608 | // -> | ||||
20609 | // 4i32 sunpklo (8i16 sign_extend_inreg(8i16 uunpklo (16i8 opnd), from 8i8) | ||||
20610 | // -> | ||||
20611 | // 4i32 sunpklo(8i16 sunpklo(16i8 opnd)) | ||||
20612 | SDValue ExtOp = Src->getOperand(0); | ||||
20613 | auto VT = cast<VTSDNode>(N->getOperand(1))->getVT(); | ||||
20614 | EVT EltTy = VT.getVectorElementType(); | ||||
20615 | (void)EltTy; | ||||
20616 | |||||
20617 | assert((EltTy == MVT::i8 || EltTy == MVT::i16 || EltTy == MVT::i32) &&(static_cast <bool> ((EltTy == MVT::i8 || EltTy == MVT:: i16 || EltTy == MVT::i32) && "Sign extending from an invalid type" ) ? void (0) : __assert_fail ("(EltTy == MVT::i8 || EltTy == MVT::i16 || EltTy == MVT::i32) && \"Sign extending from an invalid type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 20618, __extension__ __PRETTY_FUNCTION__)) | ||||
20618 | "Sign extending from an invalid type")(static_cast <bool> ((EltTy == MVT::i8 || EltTy == MVT:: i16 || EltTy == MVT::i32) && "Sign extending from an invalid type" ) ? void (0) : __assert_fail ("(EltTy == MVT::i8 || EltTy == MVT::i16 || EltTy == MVT::i32) && \"Sign extending from an invalid type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 20618, __extension__ __PRETTY_FUNCTION__)); | ||||
20619 | |||||
20620 | EVT ExtVT = VT.getDoubleNumVectorElementsVT(*DAG.getContext()); | ||||
20621 | |||||
20622 | SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ExtOp.getValueType(), | ||||
20623 | ExtOp, DAG.getValueType(ExtVT)); | ||||
20624 | |||||
20625 | return DAG.getNode(SOpc, DL, N->getValueType(0), Ext); | ||||
20626 | } | ||||
20627 | |||||
20628 | if (DCI.isBeforeLegalizeOps()) | ||||
20629 | return SDValue(); | ||||
20630 | |||||
20631 | if (!EnableCombineMGatherIntrinsics) | ||||
20632 | return SDValue(); | ||||
20633 | |||||
20634 | // SVE load nodes (e.g. AArch64ISD::GLD1) are straightforward candidates | ||||
20635 | // for DAG Combine with SIGN_EXTEND_INREG. Bail out for all other nodes. | ||||
20636 | unsigned NewOpc; | ||||
20637 | unsigned MemVTOpNum = 4; | ||||
20638 | switch (Opc) { | ||||
20639 | case AArch64ISD::LD1_MERGE_ZERO: | ||||
20640 | NewOpc = AArch64ISD::LD1S_MERGE_ZERO; | ||||
20641 | MemVTOpNum = 3; | ||||
20642 | break; | ||||
20643 | case AArch64ISD::LDNF1_MERGE_ZERO: | ||||
20644 | NewOpc = AArch64ISD::LDNF1S_MERGE_ZERO; | ||||
20645 | MemVTOpNum = 3; | ||||
20646 | break; | ||||
20647 | case AArch64ISD::LDFF1_MERGE_ZERO: | ||||
20648 | NewOpc = AArch64ISD::LDFF1S_MERGE_ZERO; | ||||
20649 | MemVTOpNum = 3; | ||||
20650 | break; | ||||
20651 | case AArch64ISD::GLD1_MERGE_ZERO: | ||||
20652 | NewOpc = AArch64ISD::GLD1S_MERGE_ZERO; | ||||
20653 | break; | ||||
20654 | case AArch64ISD::GLD1_SCALED_MERGE_ZERO: | ||||
20655 | NewOpc = AArch64ISD::GLD1S_SCALED_MERGE_ZERO; | ||||
20656 | break; | ||||
20657 | case AArch64ISD::GLD1_SXTW_MERGE_ZERO: | ||||
20658 | NewOpc = AArch64ISD::GLD1S_SXTW_MERGE_ZERO; | ||||
20659 | break; | ||||
20660 | case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO: | ||||
20661 | NewOpc = AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO; | ||||
20662 | break; | ||||
20663 | case AArch64ISD::GLD1_UXTW_MERGE_ZERO: | ||||
20664 | NewOpc = AArch64ISD::GLD1S_UXTW_MERGE_ZERO; | ||||
20665 | break; | ||||
20666 | case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO: | ||||
20667 | NewOpc = AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO; | ||||
20668 | break; | ||||
20669 | case AArch64ISD::GLD1_IMM_MERGE_ZERO: | ||||
20670 | NewOpc = AArch64ISD::GLD1S_IMM_MERGE_ZERO; | ||||
20671 | break; | ||||
20672 | case AArch64ISD::GLDFF1_MERGE_ZERO: | ||||
20673 | NewOpc = AArch64ISD::GLDFF1S_MERGE_ZERO; | ||||
20674 | break; | ||||
20675 | case AArch64ISD::GLDFF1_SCALED_MERGE_ZERO: | ||||
20676 | NewOpc = AArch64ISD::GLDFF1S_SCALED_MERGE_ZERO; | ||||
20677 | break; | ||||
20678 | case AArch64ISD::GLDFF1_SXTW_MERGE_ZERO: | ||||
20679 | NewOpc = AArch64ISD::GLDFF1S_SXTW_MERGE_ZERO; | ||||
20680 | break; | ||||
20681 | case AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO: | ||||
20682 | NewOpc = AArch64ISD::GLDFF1S_SXTW_SCALED_MERGE_ZERO; | ||||
20683 | break; | ||||
20684 | case AArch64ISD::GLDFF1_UXTW_MERGE_ZERO: | ||||
20685 | NewOpc = AArch64ISD::GLDFF1S_UXTW_MERGE_ZERO; | ||||
20686 | break; | ||||
20687 | case AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO: | ||||
20688 | NewOpc = AArch64ISD::GLDFF1S_UXTW_SCALED_MERGE_ZERO; | ||||
20689 | break; | ||||
20690 | case AArch64ISD::GLDFF1_IMM_MERGE_ZERO: | ||||
20691 | NewOpc = AArch64ISD::GLDFF1S_IMM_MERGE_ZERO; | ||||
20692 | break; | ||||
20693 | case AArch64ISD::GLDNT1_MERGE_ZERO: | ||||
20694 | NewOpc = AArch64ISD::GLDNT1S_MERGE_ZERO; | ||||
20695 | break; | ||||
20696 | default: | ||||
20697 | return SDValue(); | ||||
20698 | } | ||||
20699 | |||||
20700 | EVT SignExtSrcVT = cast<VTSDNode>(N->getOperand(1))->getVT(); | ||||
20701 | EVT SrcMemVT = cast<VTSDNode>(Src->getOperand(MemVTOpNum))->getVT(); | ||||
20702 | |||||
20703 | if ((SignExtSrcVT != SrcMemVT) || !Src.hasOneUse()) | ||||
20704 | return SDValue(); | ||||
20705 | |||||
20706 | EVT DstVT = N->getValueType(0); | ||||
20707 | SDVTList VTs = DAG.getVTList(DstVT, MVT::Other); | ||||
20708 | |||||
20709 | SmallVector<SDValue, 5> Ops; | ||||
20710 | for (unsigned I = 0; I < Src->getNumOperands(); ++I) | ||||
20711 | Ops.push_back(Src->getOperand(I)); | ||||
20712 | |||||
20713 | SDValue ExtLoad = DAG.getNode(NewOpc, SDLoc(N), VTs, Ops); | ||||
20714 | DCI.CombineTo(N, ExtLoad); | ||||
20715 | DCI.CombineTo(Src.getNode(), ExtLoad, ExtLoad.getValue(1)); | ||||
20716 | |||||
20717 | // Return N so it doesn't get rechecked | ||||
20718 | return SDValue(N, 0); | ||||
20719 | } | ||||
20720 | |||||
20721 | /// Legalize the gather prefetch (scalar + vector addressing mode) when the | ||||
20722 | /// offset vector is an unpacked 32-bit scalable vector. The other cases (Offset | ||||
20723 | /// != nxv2i32) do not need legalization. | ||||
20724 | static SDValue legalizeSVEGatherPrefetchOffsVec(SDNode *N, SelectionDAG &DAG) { | ||||
20725 | const unsigned OffsetPos = 4; | ||||
20726 | SDValue Offset = N->getOperand(OffsetPos); | ||||
20727 | |||||
20728 | // Not an unpacked vector, bail out. | ||||
20729 | if (Offset.getValueType().getSimpleVT().SimpleTy != MVT::nxv2i32) | ||||
20730 | return SDValue(); | ||||
20731 | |||||
20732 | // Extend the unpacked offset vector to 64-bit lanes. | ||||
20733 | SDLoc DL(N); | ||||
20734 | Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset); | ||||
20735 | SmallVector<SDValue, 5> Ops(N->op_begin(), N->op_end()); | ||||
20736 | // Replace the offset operand with the 64-bit one. | ||||
20737 | Ops[OffsetPos] = Offset; | ||||
20738 | |||||
20739 | return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops); | ||||
20740 | } | ||||
20741 | |||||
20742 | /// Combines a node carrying the intrinsic | ||||
20743 | /// `aarch64_sve_prf<T>_gather_scalar_offset` into a node that uses | ||||
20744 | /// `aarch64_sve_prfb_gather_uxtw_index` when the scalar offset passed to | ||||
20745 | /// `aarch64_sve_prf<T>_gather_scalar_offset` is not a valid immediate for the | ||||
20746 | /// sve gather prefetch instruction with vector plus immediate addressing mode. | ||||
20747 | static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG, | ||||
20748 | unsigned ScalarSizeInBytes) { | ||||
20749 | const unsigned ImmPos = 4, OffsetPos = 3; | ||||
20750 | // No need to combine the node if the immediate is valid... | ||||
20751 | if (isValidImmForSVEVecImmAddrMode(N->getOperand(ImmPos), ScalarSizeInBytes)) | ||||
20752 | return SDValue(); | ||||
20753 | |||||
20754 | // ...otherwise swap the offset base with the offset... | ||||
20755 | SmallVector<SDValue, 5> Ops(N->op_begin(), N->op_end()); | ||||
20756 | std::swap(Ops[ImmPos], Ops[OffsetPos]); | ||||
20757 | // ...and remap the intrinsic `aarch64_sve_prf<T>_gather_scalar_offset` to | ||||
20758 | // `aarch64_sve_prfb_gather_uxtw_index`. | ||||
20759 | SDLoc DL(N); | ||||
20760 | Ops[1] = DAG.getConstant(Intrinsic::aarch64_sve_prfb_gather_uxtw_index, DL, | ||||
20761 | MVT::i64); | ||||
20762 | |||||
20763 | return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops); | ||||
20764 | } | ||||
20765 | |||||
20766 | // Return true if the vector operation can guarantee only the first lane of its | ||||
20767 | // result contains data, with all bits in other lanes set to zero. | ||||
20768 | static bool isLanes1toNKnownZero(SDValue Op) { | ||||
20769 | switch (Op.getOpcode()) { | ||||
20770 | default: | ||||
20771 | return false; | ||||
20772 | case AArch64ISD::ANDV_PRED: | ||||
20773 | case AArch64ISD::EORV_PRED: | ||||
20774 | case AArch64ISD::FADDA_PRED: | ||||
20775 | case AArch64ISD::FADDV_PRED: | ||||
20776 | case AArch64ISD::FMAXNMV_PRED: | ||||
20777 | case AArch64ISD::FMAXV_PRED: | ||||
20778 | case AArch64ISD::FMINNMV_PRED: | ||||
20779 | case AArch64ISD::FMINV_PRED: | ||||
20780 | case AArch64ISD::ORV_PRED: | ||||
20781 | case AArch64ISD::SADDV_PRED: | ||||
20782 | case AArch64ISD::SMAXV_PRED: | ||||
20783 | case AArch64ISD::SMINV_PRED: | ||||
20784 | case AArch64ISD::UADDV_PRED: | ||||
20785 | case AArch64ISD::UMAXV_PRED: | ||||
20786 | case AArch64ISD::UMINV_PRED: | ||||
20787 | return true; | ||||
20788 | } | ||||
20789 | } | ||||
20790 | |||||
20791 | static SDValue removeRedundantInsertVectorElt(SDNode *N) { | ||||
20792 | assert(N->getOpcode() == ISD::INSERT_VECTOR_ELT && "Unexpected node!")(static_cast <bool> (N->getOpcode() == ISD::INSERT_VECTOR_ELT && "Unexpected node!") ? void (0) : __assert_fail ("N->getOpcode() == ISD::INSERT_VECTOR_ELT && \"Unexpected node!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 20792, __extension__ __PRETTY_FUNCTION__)); | ||||
20793 | SDValue InsertVec = N->getOperand(0); | ||||
20794 | SDValue InsertElt = N->getOperand(1); | ||||
20795 | SDValue InsertIdx = N->getOperand(2); | ||||
20796 | |||||
20797 | // We only care about inserts into the first element... | ||||
20798 | if (!isNullConstant(InsertIdx)) | ||||
20799 | return SDValue(); | ||||
20800 | // ...of a zero'd vector... | ||||
20801 | if (!ISD::isConstantSplatVectorAllZeros(InsertVec.getNode())) | ||||
20802 | return SDValue(); | ||||
20803 | // ...where the inserted data was previously extracted... | ||||
20804 | if (InsertElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT) | ||||
20805 | return SDValue(); | ||||
20806 | |||||
20807 | SDValue ExtractVec = InsertElt.getOperand(0); | ||||
20808 | SDValue ExtractIdx = InsertElt.getOperand(1); | ||||
20809 | |||||
20810 | // ...from the first element of a vector. | ||||
20811 | if (!isNullConstant(ExtractIdx)) | ||||
20812 | return SDValue(); | ||||
20813 | |||||
20814 | // If we get here we are effectively trying to zero lanes 1-N of a vector. | ||||
20815 | |||||
20816 | // Ensure there's no type conversion going on. | ||||
20817 | if (N->getValueType(0) != ExtractVec.getValueType()) | ||||
20818 | return SDValue(); | ||||
20819 | |||||
20820 | if (!isLanes1toNKnownZero(ExtractVec)) | ||||
20821 | return SDValue(); | ||||
20822 | |||||
20823 | // The explicit zeroing is redundant. | ||||
20824 | return ExtractVec; | ||||
20825 | } | ||||
20826 | |||||
20827 | static SDValue | ||||
20828 | performInsertVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { | ||||
20829 | if (SDValue Res = removeRedundantInsertVectorElt(N)) | ||||
20830 | return Res; | ||||
20831 | |||||
20832 | return performPostLD1Combine(N, DCI, true); | ||||
20833 | } | ||||
20834 | |||||
20835 | static SDValue performSVESpliceCombine(SDNode *N, SelectionDAG &DAG) { | ||||
20836 | EVT Ty = N->getValueType(0); | ||||
20837 | if (Ty.isInteger()) | ||||
20838 | return SDValue(); | ||||
20839 | |||||
20840 | EVT IntTy = Ty.changeVectorElementTypeToInteger(); | ||||
20841 | EVT ExtIntTy = getPackedSVEVectorVT(IntTy.getVectorElementCount()); | ||||
20842 | if (ExtIntTy.getVectorElementType().getScalarSizeInBits() < | ||||
20843 | IntTy.getVectorElementType().getScalarSizeInBits()) | ||||
20844 | return SDValue(); | ||||
20845 | |||||
20846 | SDLoc DL(N); | ||||
20847 | SDValue LHS = DAG.getAnyExtOrTrunc(DAG.getBitcast(IntTy, N->getOperand(0)), | ||||
20848 | DL, ExtIntTy); | ||||
20849 | SDValue RHS = DAG.getAnyExtOrTrunc(DAG.getBitcast(IntTy, N->getOperand(1)), | ||||
20850 | DL, ExtIntTy); | ||||
20851 | SDValue Idx = N->getOperand(2); | ||||
20852 | SDValue Splice = DAG.getNode(ISD::VECTOR_SPLICE, DL, ExtIntTy, LHS, RHS, Idx); | ||||
20853 | SDValue Trunc = DAG.getAnyExtOrTrunc(Splice, DL, IntTy); | ||||
20854 | return DAG.getBitcast(Ty, Trunc); | ||||
20855 | } | ||||
20856 | |||||
20857 | static SDValue performFPExtendCombine(SDNode *N, SelectionDAG &DAG, | ||||
20858 | TargetLowering::DAGCombinerInfo &DCI, | ||||
20859 | const AArch64Subtarget *Subtarget) { | ||||
20860 | SDValue N0 = N->getOperand(0); | ||||
20861 | EVT VT = N->getValueType(0); | ||||
20862 | |||||
20863 | // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded. | ||||
20864 | if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::FP_ROUND) | ||||
20865 | return SDValue(); | ||||
20866 | |||||
20867 | // fold (fpext (load x)) -> (fpext (fptrunc (extload x))) | ||||
20868 | // We purposefully don't care about legality of the nodes here as we know | ||||
20869 | // they can be split down into something legal. | ||||
20870 | if (DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(N0.getNode()) && | ||||
20871 | N0.hasOneUse() && Subtarget->useSVEForFixedLengthVectors() && | ||||
20872 | VT.isFixedLengthVector() && | ||||
20873 | VT.getFixedSizeInBits() >= Subtarget->getMinSVEVectorSizeInBits()) { | ||||
20874 | LoadSDNode *LN0 = cast<LoadSDNode>(N0); | ||||
20875 | SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT, | ||||
20876 | LN0->getChain(), LN0->getBasePtr(), | ||||
20877 | N0.getValueType(), LN0->getMemOperand()); | ||||
20878 | DCI.CombineTo(N, ExtLoad); | ||||
20879 | DCI.CombineTo( | ||||
20880 | N0.getNode(), | ||||
20881 | DAG.getNode(ISD::FP_ROUND, SDLoc(N0), N0.getValueType(), ExtLoad, | ||||
20882 | DAG.getIntPtrConstant(1, SDLoc(N0), /*isTarget=*/true)), | ||||
20883 | ExtLoad.getValue(1)); | ||||
20884 | return SDValue(N, 0); // Return N so it doesn't get rechecked! | ||||
20885 | } | ||||
20886 | |||||
20887 | return SDValue(); | ||||
20888 | } | ||||
20889 | |||||
20890 | static SDValue performBSPExpandForSVE(SDNode *N, SelectionDAG &DAG, | ||||
20891 | const AArch64Subtarget *Subtarget) { | ||||
20892 | EVT VT = N->getValueType(0); | ||||
20893 | |||||
20894 | // Don't expand for NEON, SVE2 or SME | ||||
20895 | if (!VT.isScalableVector() || Subtarget->hasSVE2() || Subtarget->hasSME()) | ||||
20896 | return SDValue(); | ||||
20897 | |||||
20898 | SDLoc DL(N); | ||||
20899 | |||||
20900 | SDValue Mask = N->getOperand(0); | ||||
20901 | SDValue In1 = N->getOperand(1); | ||||
20902 | SDValue In2 = N->getOperand(2); | ||||
20903 | |||||
20904 | SDValue InvMask = DAG.getNOT(DL, Mask, VT); | ||||
20905 | SDValue Sel = DAG.getNode(ISD::AND, DL, VT, Mask, In1); | ||||
20906 | SDValue SelInv = DAG.getNode(ISD::AND, DL, VT, InvMask, In2); | ||||
20907 | return DAG.getNode(ISD::OR, DL, VT, Sel, SelInv); | ||||
20908 | } | ||||
20909 | |||||
20910 | static SDValue performDupLane128Combine(SDNode *N, SelectionDAG &DAG) { | ||||
20911 | EVT VT = N->getValueType(0); | ||||
20912 | |||||
20913 | SDValue Insert = N->getOperand(0); | ||||
20914 | if (Insert.getOpcode() != ISD::INSERT_SUBVECTOR) | ||||
20915 | return SDValue(); | ||||
20916 | |||||
20917 | if (!Insert.getOperand(0).isUndef()) | ||||
20918 | return SDValue(); | ||||
20919 | |||||
20920 | uint64_t IdxInsert = Insert.getConstantOperandVal(2); | ||||
20921 | uint64_t IdxDupLane = N->getConstantOperandVal(1); | ||||
20922 | if (IdxInsert != 0 || IdxDupLane != 0) | ||||
20923 | return SDValue(); | ||||
20924 | |||||
20925 | SDValue Bitcast = Insert.getOperand(1); | ||||
20926 | if (Bitcast.getOpcode() != ISD::BITCAST) | ||||
20927 | return SDValue(); | ||||
20928 | |||||
20929 | SDValue Subvec = Bitcast.getOperand(0); | ||||
20930 | EVT SubvecVT = Subvec.getValueType(); | ||||
20931 | if (!SubvecVT.is128BitVector()) | ||||
20932 | return SDValue(); | ||||
20933 | EVT NewSubvecVT = | ||||
20934 | getPackedSVEVectorVT(Subvec.getValueType().getVectorElementType()); | ||||
20935 | |||||
20936 | SDLoc DL(N); | ||||
20937 | SDValue NewInsert = | ||||
20938 | DAG.getNode(ISD::INSERT_SUBVECTOR, DL, NewSubvecVT, | ||||
20939 | DAG.getUNDEF(NewSubvecVT), Subvec, Insert->getOperand(2)); | ||||
20940 | SDValue NewDuplane128 = DAG.getNode(AArch64ISD::DUPLANE128, DL, NewSubvecVT, | ||||
20941 | NewInsert, N->getOperand(1)); | ||||
20942 | return DAG.getNode(ISD::BITCAST, DL, VT, NewDuplane128); | ||||
20943 | } | ||||
20944 | |||||
20945 | SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N, | ||||
20946 | DAGCombinerInfo &DCI) const { | ||||
20947 | SelectionDAG &DAG = DCI.DAG; | ||||
20948 | switch (N->getOpcode()) { | ||||
20949 | default: | ||||
20950 | LLVM_DEBUG(dbgs() << "Custom combining: skipping\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-lower")) { dbgs() << "Custom combining: skipping\n" ; } } while (false); | ||||
20951 | break; | ||||
20952 | case ISD::ADD: | ||||
20953 | case ISD::SUB: | ||||
20954 | return performAddSubCombine(N, DCI, DAG); | ||||
20955 | case ISD::BUILD_VECTOR: | ||||
20956 | return performBuildVectorCombine(N, DCI, DAG); | ||||
20957 | case AArch64ISD::ANDS: | ||||
20958 | return performFlagSettingCombine(N, DCI, ISD::AND); | ||||
20959 | case AArch64ISD::ADC: | ||||
20960 | if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ true)) | ||||
20961 | return R; | ||||
20962 | return foldADCToCINC(N, DAG); | ||||
20963 | case AArch64ISD::SBC: | ||||
20964 | return foldOverflowCheck(N, DAG, /* IsAdd */ false); | ||||
20965 | case AArch64ISD::ADCS: | ||||
20966 | if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ true)) | ||||
20967 | return R; | ||||
20968 | return performFlagSettingCombine(N, DCI, AArch64ISD::ADC); | ||||
20969 | case AArch64ISD::SBCS: | ||||
20970 | if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ false)) | ||||
20971 | return R; | ||||
20972 | return performFlagSettingCombine(N, DCI, AArch64ISD::SBC); | ||||
20973 | case ISD::XOR: | ||||
20974 | return performXorCombine(N, DAG, DCI, Subtarget); | ||||
20975 | case ISD::MUL: | ||||
20976 | return performMulCombine(N, DAG, DCI, Subtarget); | ||||
20977 | case ISD::SINT_TO_FP: | ||||
20978 | case ISD::UINT_TO_FP: | ||||
20979 | return performIntToFpCombine(N, DAG, Subtarget); | ||||
20980 | case ISD::FP_TO_SINT: | ||||
20981 | case ISD::FP_TO_UINT: | ||||
20982 | case ISD::FP_TO_SINT_SAT: | ||||
20983 | case ISD::FP_TO_UINT_SAT: | ||||
20984 | return performFpToIntCombine(N, DAG, DCI, Subtarget); | ||||
20985 | case ISD::FDIV: | ||||
20986 | return performFDivCombine(N, DAG, DCI, Subtarget); | ||||
20987 | case ISD::OR: | ||||
20988 | return performORCombine(N, DCI, Subtarget, *this); | ||||
20989 | case ISD::AND: | ||||
20990 | return performANDCombine(N, DCI); | ||||
20991 | case ISD::INTRINSIC_WO_CHAIN: | ||||
20992 | return performIntrinsicCombine(N, DCI, Subtarget); | ||||
20993 | case ISD::ANY_EXTEND: | ||||
20994 | case ISD::ZERO_EXTEND: | ||||
20995 | case ISD::SIGN_EXTEND: | ||||
20996 | return performExtendCombine(N, DCI, DAG); | ||||
20997 | case ISD::SIGN_EXTEND_INREG: | ||||
20998 | return performSignExtendInRegCombine(N, DCI, DAG); | ||||
20999 | case ISD::CONCAT_VECTORS: | ||||
21000 | return performConcatVectorsCombine(N, DCI, DAG); | ||||
21001 | case ISD::EXTRACT_SUBVECTOR: | ||||
21002 | return performExtractSubvectorCombine(N, DCI, DAG); | ||||
21003 | case ISD::INSERT_SUBVECTOR: | ||||
21004 | return performInsertSubvectorCombine(N, DCI, DAG); | ||||
21005 | case ISD::SELECT: | ||||
21006 | return performSelectCombine(N, DCI); | ||||
21007 | case ISD::VSELECT: | ||||
21008 | return performVSelectCombine(N, DCI.DAG); | ||||
21009 | case ISD::SETCC: | ||||
21010 | return performSETCCCombine(N, DCI, DAG); | ||||
21011 | case ISD::LOAD: | ||||
21012 | return performLOADCombine(N, DCI, DAG, Subtarget); | ||||
21013 | case ISD::STORE: | ||||
21014 | return performSTORECombine(N, DCI, DAG, Subtarget); | ||||
21015 | case ISD::MSTORE: | ||||
21016 | return performMSTORECombine(N, DCI, DAG, Subtarget); | ||||
21017 | case ISD::MGATHER: | ||||
21018 | case ISD::MSCATTER: | ||||
21019 | return performMaskedGatherScatterCombine(N, DCI, DAG); | ||||
21020 | case ISD::VECTOR_SPLICE: | ||||
21021 | return performSVESpliceCombine(N, DAG); | ||||
21022 | case ISD::FP_EXTEND: | ||||
21023 | return performFPExtendCombine(N, DAG, DCI, Subtarget); | ||||
21024 | case AArch64ISD::BRCOND: | ||||
21025 | return performBRCONDCombine(N, DCI, DAG); | ||||
21026 | case AArch64ISD::TBNZ: | ||||
21027 | case AArch64ISD::TBZ: | ||||
21028 | return performTBZCombine(N, DCI, DAG); | ||||
21029 | case AArch64ISD::CSEL: | ||||
21030 | return performCSELCombine(N, DCI, DAG); | ||||
21031 | case AArch64ISD::DUP: | ||||
21032 | return performDUPCombine(N, DCI); | ||||
21033 | case AArch64ISD::DUPLANE128: | ||||
21034 | return performDupLane128Combine(N, DAG); | ||||
21035 | case AArch64ISD::NVCAST: | ||||
21036 | return performNVCASTCombine(N); | ||||
21037 | case AArch64ISD::SPLICE: | ||||
21038 | return performSpliceCombine(N, DAG); | ||||
21039 | case AArch64ISD::UUNPKLO: | ||||
21040 | case AArch64ISD::UUNPKHI: | ||||
21041 | return performUnpackCombine(N, DAG, Subtarget); | ||||
21042 | case AArch64ISD::UZP1: | ||||
21043 | return performUzpCombine(N, DAG); | ||||
21044 | case AArch64ISD::SETCC_MERGE_ZERO: | ||||
21045 | return performSetccMergeZeroCombine(N, DCI); | ||||
21046 | case AArch64ISD::GLD1_MERGE_ZERO: | ||||
21047 | case AArch64ISD::GLD1_SCALED_MERGE_ZERO: | ||||
21048 | case AArch64ISD::GLD1_UXTW_MERGE_ZERO: | ||||
21049 | case AArch64ISD::GLD1_SXTW_MERGE_ZERO: | ||||
21050 | case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO: | ||||
21051 | case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO: | ||||
21052 | case AArch64ISD::GLD1_IMM_MERGE_ZERO: | ||||
21053 | case AArch64ISD::GLD1S_MERGE_ZERO: | ||||
21054 | case AArch64ISD::GLD1S_SCALED_MERGE_ZERO: | ||||
21055 | case AArch64ISD::GLD1S_UXTW_MERGE_ZERO: | ||||
21056 | case AArch64ISD::GLD1S_SXTW_MERGE_ZERO: | ||||
21057 | case AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO: | ||||
21058 | case AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO: | ||||
21059 | case AArch64ISD::GLD1S_IMM_MERGE_ZERO: | ||||
21060 | return performGLD1Combine(N, DAG); | ||||
21061 | case AArch64ISD::VASHR: | ||||
21062 | case AArch64ISD::VLSHR: | ||||
21063 | return performVectorShiftCombine(N, *this, DCI); | ||||
21064 | case AArch64ISD::SUNPKLO: | ||||
21065 | return performSunpkloCombine(N, DAG); | ||||
21066 | case AArch64ISD::BSP: | ||||
21067 | return performBSPExpandForSVE(N, DAG, Subtarget); | ||||
21068 | case ISD::INSERT_VECTOR_ELT: | ||||
21069 | return performInsertVectorEltCombine(N, DCI); | ||||
21070 | case ISD::EXTRACT_VECTOR_ELT: | ||||
21071 | return performExtractVectorEltCombine(N, DCI, Subtarget); | ||||
21072 | case ISD::VECREDUCE_ADD: | ||||
21073 | return performVecReduceAddCombine(N, DCI.DAG, Subtarget); | ||||
21074 | case AArch64ISD::UADDV: | ||||
21075 | return performUADDVCombine(N, DAG); | ||||
21076 | case AArch64ISD::SMULL: | ||||
21077 | case AArch64ISD::UMULL: | ||||
21078 | case AArch64ISD::PMULL: | ||||
21079 | return tryCombineLongOpWithDup(Intrinsic::not_intrinsic, N, DCI, DAG); | ||||
21080 | case ISD::INTRINSIC_VOID: | ||||
21081 | case ISD::INTRINSIC_W_CHAIN: | ||||
21082 | switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { | ||||
21083 | case Intrinsic::aarch64_sve_prfb_gather_scalar_offset: | ||||
21084 | return combineSVEPrefetchVecBaseImmOff(N, DAG, 1 /*=ScalarSizeInBytes*/); | ||||
21085 | case Intrinsic::aarch64_sve_prfh_gather_scalar_offset: | ||||
21086 | return combineSVEPrefetchVecBaseImmOff(N, DAG, 2 /*=ScalarSizeInBytes*/); | ||||
21087 | case Intrinsic::aarch64_sve_prfw_gather_scalar_offset: | ||||
21088 | return combineSVEPrefetchVecBaseImmOff(N, DAG, 4 /*=ScalarSizeInBytes*/); | ||||
21089 | case Intrinsic::aarch64_sve_prfd_gather_scalar_offset: | ||||
21090 | return combineSVEPrefetchVecBaseImmOff(N, DAG, 8 /*=ScalarSizeInBytes*/); | ||||
21091 | case Intrinsic::aarch64_sve_prfb_gather_uxtw_index: | ||||
21092 | case Intrinsic::aarch64_sve_prfb_gather_sxtw_index: | ||||
21093 | case Intrinsic::aarch64_sve_prfh_gather_uxtw_index: | ||||
21094 | case Intrinsic::aarch64_sve_prfh_gather_sxtw_index: | ||||
21095 | case Intrinsic::aarch64_sve_prfw_gather_uxtw_index: | ||||
21096 | case Intrinsic::aarch64_sve_prfw_gather_sxtw_index: | ||||
21097 | case Intrinsic::aarch64_sve_prfd_gather_uxtw_index: | ||||
21098 | case Intrinsic::aarch64_sve_prfd_gather_sxtw_index: | ||||
21099 | return legalizeSVEGatherPrefetchOffsVec(N, DAG); | ||||
21100 | case Intrinsic::aarch64_neon_ld2: | ||||
21101 | case Intrinsic::aarch64_neon_ld3: | ||||
21102 | case Intrinsic::aarch64_neon_ld4: | ||||
21103 | case Intrinsic::aarch64_neon_ld1x2: | ||||
21104 | case Intrinsic::aarch64_neon_ld1x3: | ||||
21105 | case Intrinsic::aarch64_neon_ld1x4: | ||||
21106 | case Intrinsic::aarch64_neon_ld2lane: | ||||
21107 | case Intrinsic::aarch64_neon_ld3lane: | ||||
21108 | case Intrinsic::aarch64_neon_ld4lane: | ||||
21109 | case Intrinsic::aarch64_neon_ld2r: | ||||
21110 | case Intrinsic::aarch64_neon_ld3r: | ||||
21111 | case Intrinsic::aarch64_neon_ld4r: | ||||
21112 | case Intrinsic::aarch64_neon_st2: | ||||
21113 | case Intrinsic::aarch64_neon_st3: | ||||
21114 | case Intrinsic::aarch64_neon_st4: | ||||
21115 | case Intrinsic::aarch64_neon_st1x2: | ||||
21116 | case Intrinsic::aarch64_neon_st1x3: | ||||
21117 | case Intrinsic::aarch64_neon_st1x4: | ||||
21118 | case Intrinsic::aarch64_neon_st2lane: | ||||
21119 | case Intrinsic::aarch64_neon_st3lane: | ||||
21120 | case Intrinsic::aarch64_neon_st4lane: | ||||
21121 | return performNEONPostLDSTCombine(N, DCI, DAG); | ||||
21122 | case Intrinsic::aarch64_sve_ldnt1: | ||||
21123 | return performLDNT1Combine(N, DAG); | ||||
21124 | case Intrinsic::aarch64_sve_ld1rq: | ||||
21125 | return performLD1ReplicateCombine<AArch64ISD::LD1RQ_MERGE_ZERO>(N, DAG); | ||||
21126 | case Intrinsic::aarch64_sve_ld1ro: | ||||
21127 | return performLD1ReplicateCombine<AArch64ISD::LD1RO_MERGE_ZERO>(N, DAG); | ||||
21128 | case Intrinsic::aarch64_sve_ldnt1_gather_scalar_offset: | ||||
21129 | return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO); | ||||
21130 | case Intrinsic::aarch64_sve_ldnt1_gather: | ||||
21131 | return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO); | ||||
21132 | case Intrinsic::aarch64_sve_ldnt1_gather_index: | ||||
21133 | return performGatherLoadCombine(N, DAG, | ||||
21134 | AArch64ISD::GLDNT1_INDEX_MERGE_ZERO); | ||||
21135 | case Intrinsic::aarch64_sve_ldnt1_gather_uxtw: | ||||
21136 | return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO); | ||||
21137 | case Intrinsic::aarch64_sve_ld1: | ||||
21138 | return performLD1Combine(N, DAG, AArch64ISD::LD1_MERGE_ZERO); | ||||
21139 | case Intrinsic::aarch64_sve_ldnf1: | ||||
21140 | return performLD1Combine(N, DAG, AArch64ISD::LDNF1_MERGE_ZERO); | ||||
21141 | case Intrinsic::aarch64_sve_ldff1: | ||||
21142 | return performLD1Combine(N, DAG, AArch64ISD::LDFF1_MERGE_ZERO); | ||||
21143 | case Intrinsic::aarch64_sve_st1: | ||||
21144 | return performST1Combine(N, DAG); | ||||
21145 | case Intrinsic::aarch64_sve_stnt1: | ||||
21146 | return performSTNT1Combine(N, DAG); | ||||
21147 | case Intrinsic::aarch64_sve_stnt1_scatter_scalar_offset: | ||||
21148 | return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED); | ||||
21149 | case Intrinsic::aarch64_sve_stnt1_scatter_uxtw: | ||||
21150 | return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED); | ||||
21151 | case Intrinsic::aarch64_sve_stnt1_scatter: | ||||
21152 | return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED); | ||||
21153 | case Intrinsic::aarch64_sve_stnt1_scatter_index: | ||||
21154 | return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_INDEX_PRED); | ||||
21155 | case Intrinsic::aarch64_sve_ld1_gather: | ||||
21156 | return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_MERGE_ZERO); | ||||
21157 | case Intrinsic::aarch64_sve_ld1_gather_index: | ||||
21158 | return performGatherLoadCombine(N, DAG, | ||||
21159 | AArch64ISD::GLD1_SCALED_MERGE_ZERO); | ||||
21160 | case Intrinsic::aarch64_sve_ld1_gather_sxtw: | ||||
21161 | return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_SXTW_MERGE_ZERO, | ||||
21162 | /*OnlyPackedOffsets=*/false); | ||||
21163 | case Intrinsic::aarch64_sve_ld1_gather_uxtw: | ||||
21164 | return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_UXTW_MERGE_ZERO, | ||||
21165 | /*OnlyPackedOffsets=*/false); | ||||
21166 | case Intrinsic::aarch64_sve_ld1_gather_sxtw_index: | ||||
21167 | return performGatherLoadCombine(N, DAG, | ||||
21168 | AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO, | ||||
21169 | /*OnlyPackedOffsets=*/false); | ||||
21170 | case Intrinsic::aarch64_sve_ld1_gather_uxtw_index: | ||||
21171 | return performGatherLoadCombine(N, DAG, | ||||
21172 | AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO, | ||||
21173 | /*OnlyPackedOffsets=*/false); | ||||
21174 | case Intrinsic::aarch64_sve_ld1_gather_scalar_offset: | ||||
21175 | return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_IMM_MERGE_ZERO); | ||||
21176 | case Intrinsic::aarch64_sve_ldff1_gather: | ||||
21177 | return performGatherLoadCombine(N, DAG, AArch64ISD::GLDFF1_MERGE_ZERO); | ||||
21178 | case Intrinsic::aarch64_sve_ldff1_gather_index: | ||||
21179 | return performGatherLoadCombine(N, DAG, | ||||
21180 | AArch64ISD::GLDFF1_SCALED_MERGE_ZERO); | ||||
21181 | case Intrinsic::aarch64_sve_ldff1_gather_sxtw: | ||||
21182 | return performGatherLoadCombine(N, DAG, | ||||
21183 | AArch64ISD::GLDFF1_SXTW_MERGE_ZERO, | ||||
21184 | /*OnlyPackedOffsets=*/false); | ||||
21185 | case Intrinsic::aarch64_sve_ldff1_gather_uxtw: | ||||
21186 | return performGatherLoadCombine(N, DAG, | ||||
21187 | AArch64ISD::GLDFF1_UXTW_MERGE_ZERO, | ||||
21188 | /*OnlyPackedOffsets=*/false); | ||||
21189 | case Intrinsic::aarch64_sve_ldff1_gather_sxtw_index: | ||||
21190 | return performGatherLoadCombine(N, DAG, | ||||
21191 | AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO, | ||||
21192 | /*OnlyPackedOffsets=*/false); | ||||
21193 | case Intrinsic::aarch64_sve_ldff1_gather_uxtw_index: | ||||
21194 | return performGatherLoadCombine(N, DAG, | ||||
21195 | AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO, | ||||
21196 | /*OnlyPackedOffsets=*/false); | ||||
21197 | case Intrinsic::aarch64_sve_ldff1_gather_scalar_offset: | ||||
21198 | return performGatherLoadCombine(N, DAG, | ||||
21199 | AArch64ISD::GLDFF1_IMM_MERGE_ZERO); | ||||
21200 | case Intrinsic::aarch64_sve_st1_scatter: | ||||
21201 | return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_PRED); | ||||
21202 | case Intrinsic::aarch64_sve_st1_scatter_index: | ||||
21203 | return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_SCALED_PRED); | ||||
21204 | case Intrinsic::aarch64_sve_st1_scatter_sxtw: | ||||
21205 | return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_SXTW_PRED, | ||||
21206 | /*OnlyPackedOffsets=*/false); | ||||
21207 | case Intrinsic::aarch64_sve_st1_scatter_uxtw: | ||||
21208 | return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_UXTW_PRED, | ||||
21209 | /*OnlyPackedOffsets=*/false); | ||||
21210 | case Intrinsic::aarch64_sve_st1_scatter_sxtw_index: | ||||
21211 | return performScatterStoreCombine(N, DAG, | ||||
21212 | AArch64ISD::SST1_SXTW_SCALED_PRED, | ||||
21213 | /*OnlyPackedOffsets=*/false); | ||||
21214 | case Intrinsic::aarch64_sve_st1_scatter_uxtw_index: | ||||
21215 | return performScatterStoreCombine(N, DAG, | ||||
21216 | AArch64ISD::SST1_UXTW_SCALED_PRED, | ||||
21217 | /*OnlyPackedOffsets=*/false); | ||||
21218 | case Intrinsic::aarch64_sve_st1_scatter_scalar_offset: | ||||
21219 | return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_IMM_PRED); | ||||
21220 | case Intrinsic::aarch64_rndr: | ||||
21221 | case Intrinsic::aarch64_rndrrs: { | ||||
21222 | unsigned IntrinsicID = | ||||
21223 | cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); | ||||
21224 | auto Register = | ||||
21225 | (IntrinsicID == Intrinsic::aarch64_rndr ? AArch64SysReg::RNDR | ||||
21226 | : AArch64SysReg::RNDRRS); | ||||
21227 | SDLoc DL(N); | ||||
21228 | SDValue A = DAG.getNode( | ||||
21229 | AArch64ISD::MRS, DL, DAG.getVTList(MVT::i64, MVT::Glue, MVT::Other), | ||||
21230 | N->getOperand(0), DAG.getConstant(Register, DL, MVT::i64)); | ||||
21231 | SDValue B = DAG.getNode( | ||||
21232 | AArch64ISD::CSINC, DL, MVT::i32, DAG.getConstant(0, DL, MVT::i32), | ||||
21233 | DAG.getConstant(0, DL, MVT::i32), | ||||
21234 | DAG.getConstant(AArch64CC::NE, DL, MVT::i32), A.getValue(1)); | ||||
21235 | return DAG.getMergeValues( | ||||
21236 | {A, DAG.getZExtOrTrunc(B, DL, MVT::i1), A.getValue(2)}, DL); | ||||
21237 | } | ||||
21238 | default: | ||||
21239 | break; | ||||
21240 | } | ||||
21241 | break; | ||||
21242 | case ISD::GlobalAddress: | ||||
21243 | return performGlobalAddressCombine(N, DAG, Subtarget, getTargetMachine()); | ||||
21244 | case ISD::CTLZ: | ||||
21245 | return performCTLZCombine(N, DAG, Subtarget); | ||||
21246 | } | ||||
21247 | return SDValue(); | ||||
21248 | } | ||||
21249 | |||||
21250 | // Check if the return value is used as only a return value, as otherwise | ||||
21251 | // we can't perform a tail-call. In particular, we need to check for | ||||
21252 | // target ISD nodes that are returns and any other "odd" constructs | ||||
21253 | // that the generic analysis code won't necessarily catch. | ||||
21254 | bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N, | ||||
21255 | SDValue &Chain) const { | ||||
21256 | if (N->getNumValues() != 1) | ||||
21257 | return false; | ||||
21258 | if (!N->hasNUsesOfValue(1, 0)) | ||||
21259 | return false; | ||||
21260 | |||||
21261 | SDValue TCChain = Chain; | ||||
21262 | SDNode *Copy = *N->use_begin(); | ||||
21263 | if (Copy->getOpcode() == ISD::CopyToReg) { | ||||
21264 | // If the copy has a glue operand, we conservatively assume it isn't safe to | ||||
21265 | // perform a tail call. | ||||
21266 | if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() == | ||||
21267 | MVT::Glue) | ||||
21268 | return false; | ||||
21269 | TCChain = Copy->getOperand(0); | ||||
21270 | } else if (Copy->getOpcode() != ISD::FP_EXTEND) | ||||
21271 | return false; | ||||
21272 | |||||
21273 | bool HasRet = false; | ||||
21274 | for (SDNode *Node : Copy->uses()) { | ||||
21275 | if (Node->getOpcode() != AArch64ISD::RET_FLAG) | ||||
21276 | return false; | ||||
21277 | HasRet = true; | ||||
21278 | } | ||||
21279 | |||||
21280 | if (!HasRet) | ||||
21281 | return false; | ||||
21282 | |||||
21283 | Chain = TCChain; | ||||
21284 | return true; | ||||
21285 | } | ||||
21286 | |||||
21287 | // Return whether the an instruction can potentially be optimized to a tail | ||||
21288 | // call. This will cause the optimizers to attempt to move, or duplicate, | ||||
21289 | // return instructions to help enable tail call optimizations for this | ||||
21290 | // instruction. | ||||
21291 | bool AArch64TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { | ||||
21292 | return CI->isTailCall(); | ||||
21293 | } | ||||
21294 | |||||
21295 | bool AArch64TargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base, | ||||
21296 | SDValue &Offset, | ||||
21297 | ISD::MemIndexedMode &AM, | ||||
21298 | bool &IsInc, | ||||
21299 | SelectionDAG &DAG) const { | ||||
21300 | if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) | ||||
21301 | return false; | ||||
21302 | |||||
21303 | Base = Op->getOperand(0); | ||||
21304 | // All of the indexed addressing mode instructions take a signed | ||||
21305 | // 9 bit immediate offset. | ||||
21306 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) { | ||||
21307 | int64_t RHSC = RHS->getSExtValue(); | ||||
21308 | if (Op->getOpcode() == ISD::SUB) | ||||
21309 | RHSC = -(uint64_t)RHSC; | ||||
21310 | if (!isInt<9>(RHSC)) | ||||
21311 | return false; | ||||
21312 | IsInc = (Op->getOpcode() == ISD::ADD); | ||||
21313 | Offset = Op->getOperand(1); | ||||
21314 | return true; | ||||
21315 | } | ||||
21316 | return false; | ||||
21317 | } | ||||
21318 | |||||
21319 | bool AArch64TargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, | ||||
21320 | SDValue &Offset, | ||||
21321 | ISD::MemIndexedMode &AM, | ||||
21322 | SelectionDAG &DAG) const { | ||||
21323 | EVT VT; | ||||
21324 | SDValue Ptr; | ||||
21325 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { | ||||
21326 | VT = LD->getMemoryVT(); | ||||
21327 | Ptr = LD->getBasePtr(); | ||||
21328 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { | ||||
21329 | VT = ST->getMemoryVT(); | ||||
21330 | Ptr = ST->getBasePtr(); | ||||
21331 | } else | ||||
21332 | return false; | ||||
21333 | |||||
21334 | bool IsInc; | ||||
21335 | if (!getIndexedAddressParts(Ptr.getNode(), Base, Offset, AM, IsInc, DAG)) | ||||
21336 | return false; | ||||
21337 | AM = IsInc ? ISD::PRE_INC : ISD::PRE_DEC; | ||||
21338 | return true; | ||||
21339 | } | ||||
21340 | |||||
21341 | bool AArch64TargetLowering::getPostIndexedAddressParts( | ||||
21342 | SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, | ||||
21343 | ISD::MemIndexedMode &AM, SelectionDAG &DAG) const { | ||||
21344 | EVT VT; | ||||
21345 | SDValue Ptr; | ||||
21346 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { | ||||
21347 | VT = LD->getMemoryVT(); | ||||
21348 | Ptr = LD->getBasePtr(); | ||||
21349 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { | ||||
21350 | VT = ST->getMemoryVT(); | ||||
21351 | Ptr = ST->getBasePtr(); | ||||
21352 | } else | ||||
21353 | return false; | ||||
21354 | |||||
21355 | bool IsInc; | ||||
21356 | if (!getIndexedAddressParts(Op, Base, Offset, AM, IsInc, DAG)) | ||||
21357 | return false; | ||||
21358 | // Post-indexing updates the base, so it's not a valid transform | ||||
21359 | // if that's not the same as the load's pointer. | ||||
21360 | if (Ptr != Base) | ||||
21361 | return false; | ||||
21362 | AM = IsInc ? ISD::POST_INC : ISD::POST_DEC; | ||||
21363 | return true; | ||||
21364 | } | ||||
21365 | |||||
21366 | void AArch64TargetLowering::ReplaceBITCASTResults( | ||||
21367 | SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { | ||||
21368 | SDLoc DL(N); | ||||
21369 | SDValue Op = N->getOperand(0); | ||||
21370 | EVT VT = N->getValueType(0); | ||||
21371 | EVT SrcVT = Op.getValueType(); | ||||
21372 | |||||
21373 | if (VT.isScalableVector() && !isTypeLegal(VT) && isTypeLegal(SrcVT)) { | ||||
21374 | assert(!VT.isFloatingPoint() && SrcVT.isFloatingPoint() &&(static_cast <bool> (!VT.isFloatingPoint() && SrcVT .isFloatingPoint() && "Expected fp->int bitcast!") ? void (0) : __assert_fail ("!VT.isFloatingPoint() && SrcVT.isFloatingPoint() && \"Expected fp->int bitcast!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 21375, __extension__ __PRETTY_FUNCTION__)) | ||||
21375 | "Expected fp->int bitcast!")(static_cast <bool> (!VT.isFloatingPoint() && SrcVT .isFloatingPoint() && "Expected fp->int bitcast!") ? void (0) : __assert_fail ("!VT.isFloatingPoint() && SrcVT.isFloatingPoint() && \"Expected fp->int bitcast!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 21375, __extension__ __PRETTY_FUNCTION__)); | ||||
21376 | |||||
21377 | // Bitcasting between unpacked vector types of different element counts is | ||||
21378 | // not a NOP because the live elements are laid out differently. | ||||
21379 | // 01234567 | ||||
21380 | // e.g. nxv2i32 = XX??XX?? | ||||
21381 | // nxv4f16 = X?X?X?X? | ||||
21382 | if (VT.getVectorElementCount() != SrcVT.getVectorElementCount()) | ||||
21383 | return; | ||||
21384 | |||||
21385 | SDValue CastResult = getSVESafeBitCast(getSVEContainerType(VT), Op, DAG); | ||||
21386 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, CastResult)); | ||||
21387 | return; | ||||
21388 | } | ||||
21389 | |||||
21390 | if (VT != MVT::i16 || (SrcVT != MVT::f16 && SrcVT != MVT::bf16)) | ||||
21391 | return; | ||||
21392 | |||||
21393 | Op = SDValue( | ||||
21394 | DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f32, | ||||
21395 | DAG.getUNDEF(MVT::i32), Op, | ||||
21396 | DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)), | ||||
21397 | 0); | ||||
21398 | Op = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op); | ||||
21399 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Op)); | ||||
21400 | } | ||||
21401 | |||||
21402 | static void ReplaceAddWithADDP(SDNode *N, SmallVectorImpl<SDValue> &Results, | ||||
21403 | SelectionDAG &DAG, | ||||
21404 | const AArch64Subtarget *Subtarget) { | ||||
21405 | EVT VT = N->getValueType(0); | ||||
21406 | if (!VT.is256BitVector() || | ||||
21407 | (VT.getScalarType().isFloatingPoint() && | ||||
21408 | !N->getFlags().hasAllowReassociation()) || | ||||
21409 | (VT.getScalarType() == MVT::f16 && !Subtarget->hasFullFP16())) | ||||
21410 | return; | ||||
21411 | |||||
21412 | SDValue X = N->getOperand(0); | ||||
21413 | auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N->getOperand(1)); | ||||
21414 | if (!Shuf) { | ||||
21415 | Shuf = dyn_cast<ShuffleVectorSDNode>(N->getOperand(0)); | ||||
21416 | X = N->getOperand(1); | ||||
21417 | if (!Shuf) | ||||
21418 | return; | ||||
21419 | } | ||||
21420 | |||||
21421 | if (Shuf->getOperand(0) != X || !Shuf->getOperand(1)->isUndef()) | ||||
21422 | return; | ||||
21423 | |||||
21424 | // Check the mask is 1,0,3,2,5,4,... | ||||
21425 | ArrayRef<int> Mask = Shuf->getMask(); | ||||
21426 | for (int I = 0, E = Mask.size(); I < E; I++) | ||||
21427 | if (Mask[I] != (I % 2 == 0 ? I + 1 : I - 1)) | ||||
21428 | return; | ||||
21429 | |||||
21430 | SDLoc DL(N); | ||||
21431 | auto LoHi = DAG.SplitVector(X, DL); | ||||
21432 | assert(LoHi.first.getValueType() == LoHi.second.getValueType())(static_cast <bool> (LoHi.first.getValueType() == LoHi. second.getValueType()) ? void (0) : __assert_fail ("LoHi.first.getValueType() == LoHi.second.getValueType()" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 21432, __extension__ __PRETTY_FUNCTION__)); | ||||
21433 | SDValue Addp = DAG.getNode(AArch64ISD::ADDP, N, LoHi.first.getValueType(), | ||||
21434 | LoHi.first, LoHi.second); | ||||
21435 | |||||
21436 | // Shuffle the elements back into order. | ||||
21437 | SmallVector<int> NMask; | ||||
21438 | for (unsigned I = 0, E = VT.getVectorNumElements() / 2; I < E; I++) { | ||||
21439 | NMask.push_back(I); | ||||
21440 | NMask.push_back(I); | ||||
21441 | } | ||||
21442 | Results.push_back( | ||||
21443 | DAG.getVectorShuffle(VT, DL, | ||||
21444 | DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Addp, | ||||
21445 | DAG.getUNDEF(LoHi.first.getValueType())), | ||||
21446 | DAG.getUNDEF(VT), NMask)); | ||||
21447 | } | ||||
21448 | |||||
21449 | static void ReplaceReductionResults(SDNode *N, | ||||
21450 | SmallVectorImpl<SDValue> &Results, | ||||
21451 | SelectionDAG &DAG, unsigned InterOp, | ||||
21452 | unsigned AcrossOp) { | ||||
21453 | EVT LoVT, HiVT; | ||||
21454 | SDValue Lo, Hi; | ||||
21455 | SDLoc dl(N); | ||||
21456 | std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); | ||||
21457 | std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0); | ||||
21458 | SDValue InterVal = DAG.getNode(InterOp, dl, LoVT, Lo, Hi); | ||||
21459 | SDValue SplitVal = DAG.getNode(AcrossOp, dl, LoVT, InterVal); | ||||
21460 | Results.push_back(SplitVal); | ||||
21461 | } | ||||
21462 | |||||
21463 | static std::pair<SDValue, SDValue> splitInt128(SDValue N, SelectionDAG &DAG) { | ||||
21464 | SDLoc DL(N); | ||||
21465 | SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, N); | ||||
21466 | SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, | ||||
21467 | DAG.getNode(ISD::SRL, DL, MVT::i128, N, | ||||
21468 | DAG.getConstant(64, DL, MVT::i64))); | ||||
21469 | return std::make_pair(Lo, Hi); | ||||
21470 | } | ||||
21471 | |||||
21472 | void AArch64TargetLowering::ReplaceExtractSubVectorResults( | ||||
21473 | SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { | ||||
21474 | SDValue In = N->getOperand(0); | ||||
21475 | EVT InVT = In.getValueType(); | ||||
21476 | |||||
21477 | // Common code will handle these just fine. | ||||
21478 | if (!InVT.isScalableVector() || !InVT.isInteger()) | ||||
21479 | return; | ||||
21480 | |||||
21481 | SDLoc DL(N); | ||||
21482 | EVT VT = N->getValueType(0); | ||||
21483 | |||||
21484 | // The following checks bail if this is not a halving operation. | ||||
21485 | |||||
21486 | ElementCount ResEC = VT.getVectorElementCount(); | ||||
21487 | |||||
21488 | if (InVT.getVectorElementCount() != (ResEC * 2)) | ||||
21489 | return; | ||||
21490 | |||||
21491 | auto *CIndex = dyn_cast<ConstantSDNode>(N->getOperand(1)); | ||||
21492 | if (!CIndex) | ||||
21493 | return; | ||||
21494 | |||||
21495 | unsigned Index = CIndex->getZExtValue(); | ||||
21496 | if ((Index != 0) && (Index != ResEC.getKnownMinValue())) | ||||
21497 | return; | ||||
21498 | |||||
21499 | unsigned Opcode = (Index == 0) ? AArch64ISD::UUNPKLO : AArch64ISD::UUNPKHI; | ||||
21500 | EVT ExtendedHalfVT = VT.widenIntegerVectorElementType(*DAG.getContext()); | ||||
21501 | |||||
21502 | SDValue Half = DAG.getNode(Opcode, DL, ExtendedHalfVT, N->getOperand(0)); | ||||
21503 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Half)); | ||||
21504 | } | ||||
21505 | |||||
21506 | // Create an even/odd pair of X registers holding integer value V. | ||||
21507 | static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) { | ||||
21508 | SDLoc dl(V.getNode()); | ||||
21509 | SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i64); | ||||
21510 | SDValue VHi = DAG.getAnyExtOrTrunc( | ||||
21511 | DAG.getNode(ISD::SRL, dl, MVT::i128, V, DAG.getConstant(64, dl, MVT::i64)), | ||||
21512 | dl, MVT::i64); | ||||
21513 | if (DAG.getDataLayout().isBigEndian()) | ||||
21514 | std::swap (VLo, VHi); | ||||
21515 | SDValue RegClass = | ||||
21516 | DAG.getTargetConstant(AArch64::XSeqPairsClassRegClassID, dl, MVT::i32); | ||||
21517 | SDValue SubReg0 = DAG.getTargetConstant(AArch64::sube64, dl, MVT::i32); | ||||
21518 | SDValue SubReg1 = DAG.getTargetConstant(AArch64::subo64, dl, MVT::i32); | ||||
21519 | const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 }; | ||||
21520 | return SDValue( | ||||
21521 | DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0); | ||||
21522 | } | ||||
21523 | |||||
21524 | static void ReplaceCMP_SWAP_128Results(SDNode *N, | ||||
21525 | SmallVectorImpl<SDValue> &Results, | ||||
21526 | SelectionDAG &DAG, | ||||
21527 | const AArch64Subtarget *Subtarget) { | ||||
21528 | assert(N->getValueType(0) == MVT::i128 &&(static_cast <bool> (N->getValueType(0) == MVT::i128 && "AtomicCmpSwap on types less than 128 should be legal" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i128 && \"AtomicCmpSwap on types less than 128 should be legal\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 21529, __extension__ __PRETTY_FUNCTION__)) | ||||
21529 | "AtomicCmpSwap on types less than 128 should be legal")(static_cast <bool> (N->getValueType(0) == MVT::i128 && "AtomicCmpSwap on types less than 128 should be legal" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i128 && \"AtomicCmpSwap on types less than 128 should be legal\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 21529, __extension__ __PRETTY_FUNCTION__)); | ||||
21530 | |||||
21531 | MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand(); | ||||
21532 | if (Subtarget->hasLSE() || Subtarget->outlineAtomics()) { | ||||
21533 | // LSE has a 128-bit compare and swap (CASP), but i128 is not a legal type, | ||||
21534 | // so lower it here, wrapped in REG_SEQUENCE and EXTRACT_SUBREG. | ||||
21535 | SDValue Ops[] = { | ||||
21536 | createGPRPairNode(DAG, N->getOperand(2)), // Compare value | ||||
21537 | createGPRPairNode(DAG, N->getOperand(3)), // Store value | ||||
21538 | N->getOperand(1), // Ptr | ||||
21539 | N->getOperand(0), // Chain in | ||||
21540 | }; | ||||
21541 | |||||
21542 | unsigned Opcode; | ||||
21543 | switch (MemOp->getMergedOrdering()) { | ||||
21544 | case AtomicOrdering::Monotonic: | ||||
21545 | Opcode = AArch64::CASPX; | ||||
21546 | break; | ||||
21547 | case AtomicOrdering::Acquire: | ||||
21548 | Opcode = AArch64::CASPAX; | ||||
21549 | break; | ||||
21550 | case AtomicOrdering::Release: | ||||
21551 | Opcode = AArch64::CASPLX; | ||||
21552 | break; | ||||
21553 | case AtomicOrdering::AcquireRelease: | ||||
21554 | case AtomicOrdering::SequentiallyConsistent: | ||||
21555 | Opcode = AArch64::CASPALX; | ||||
21556 | break; | ||||
21557 | default: | ||||
21558 | llvm_unreachable("Unexpected ordering!")::llvm::llvm_unreachable_internal("Unexpected ordering!", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 21558); | ||||
21559 | } | ||||
21560 | |||||
21561 | MachineSDNode *CmpSwap = DAG.getMachineNode( | ||||
21562 | Opcode, SDLoc(N), DAG.getVTList(MVT::Untyped, MVT::Other), Ops); | ||||
21563 | DAG.setNodeMemRefs(CmpSwap, {MemOp}); | ||||
21564 | |||||
21565 | unsigned SubReg1 = AArch64::sube64, SubReg2 = AArch64::subo64; | ||||
21566 | if (DAG.getDataLayout().isBigEndian()) | ||||
21567 | std::swap(SubReg1, SubReg2); | ||||
21568 | SDValue Lo = DAG.getTargetExtractSubreg(SubReg1, SDLoc(N), MVT::i64, | ||||
21569 | SDValue(CmpSwap, 0)); | ||||
21570 | SDValue Hi = DAG.getTargetExtractSubreg(SubReg2, SDLoc(N), MVT::i64, | ||||
21571 | SDValue(CmpSwap, 0)); | ||||
21572 | Results.push_back( | ||||
21573 | DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128, Lo, Hi)); | ||||
21574 | Results.push_back(SDValue(CmpSwap, 1)); // Chain out | ||||
21575 | return; | ||||
21576 | } | ||||
21577 | |||||
21578 | unsigned Opcode; | ||||
21579 | switch (MemOp->getMergedOrdering()) { | ||||
21580 | case AtomicOrdering::Monotonic: | ||||
21581 | Opcode = AArch64::CMP_SWAP_128_MONOTONIC; | ||||
21582 | break; | ||||
21583 | case AtomicOrdering::Acquire: | ||||
21584 | Opcode = AArch64::CMP_SWAP_128_ACQUIRE; | ||||
21585 | break; | ||||
21586 | case AtomicOrdering::Release: | ||||
21587 | Opcode = AArch64::CMP_SWAP_128_RELEASE; | ||||
21588 | break; | ||||
21589 | case AtomicOrdering::AcquireRelease: | ||||
21590 | case AtomicOrdering::SequentiallyConsistent: | ||||
21591 | Opcode = AArch64::CMP_SWAP_128; | ||||
21592 | break; | ||||
21593 | default: | ||||
21594 | llvm_unreachable("Unexpected ordering!")::llvm::llvm_unreachable_internal("Unexpected ordering!", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 21594); | ||||
21595 | } | ||||
21596 | |||||
21597 | auto Desired = splitInt128(N->getOperand(2), DAG); | ||||
21598 | auto New = splitInt128(N->getOperand(3), DAG); | ||||
21599 | SDValue Ops[] = {N->getOperand(1), Desired.first, Desired.second, | ||||
21600 | New.first, New.second, N->getOperand(0)}; | ||||
21601 | SDNode *CmpSwap = DAG.getMachineNode( | ||||
21602 | Opcode, SDLoc(N), DAG.getVTList(MVT::i64, MVT::i64, MVT::i32, MVT::Other), | ||||
21603 | Ops); | ||||
21604 | DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp}); | ||||
21605 | |||||
21606 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128, | ||||
21607 | SDValue(CmpSwap, 0), SDValue(CmpSwap, 1))); | ||||
21608 | Results.push_back(SDValue(CmpSwap, 3)); | ||||
21609 | } | ||||
21610 | |||||
21611 | void AArch64TargetLowering::ReplaceNodeResults( | ||||
21612 | SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { | ||||
21613 | switch (N->getOpcode()) { | ||||
21614 | default: | ||||
21615 | llvm_unreachable("Don't know how to custom expand this")::llvm::llvm_unreachable_internal("Don't know how to custom expand this" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 21615); | ||||
21616 | case ISD::BITCAST: | ||||
21617 | ReplaceBITCASTResults(N, Results, DAG); | ||||
21618 | return; | ||||
21619 | case ISD::VECREDUCE_ADD: | ||||
21620 | case ISD::VECREDUCE_SMAX: | ||||
21621 | case ISD::VECREDUCE_SMIN: | ||||
21622 | case ISD::VECREDUCE_UMAX: | ||||
21623 | case ISD::VECREDUCE_UMIN: | ||||
21624 | Results.push_back(LowerVECREDUCE(SDValue(N, 0), DAG)); | ||||
21625 | return; | ||||
21626 | case ISD::ADD: | ||||
21627 | case ISD::FADD: | ||||
21628 | ReplaceAddWithADDP(N, Results, DAG, Subtarget); | ||||
21629 | return; | ||||
21630 | |||||
21631 | case ISD::CTPOP: | ||||
21632 | case ISD::PARITY: | ||||
21633 | if (SDValue Result = LowerCTPOP_PARITY(SDValue(N, 0), DAG)) | ||||
21634 | Results.push_back(Result); | ||||
21635 | return; | ||||
21636 | case AArch64ISD::SADDV: | ||||
21637 | ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::SADDV); | ||||
21638 | return; | ||||
21639 | case AArch64ISD::UADDV: | ||||
21640 | ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::UADDV); | ||||
21641 | return; | ||||
21642 | case AArch64ISD::SMINV: | ||||
21643 | ReplaceReductionResults(N, Results, DAG, ISD::SMIN, AArch64ISD::SMINV); | ||||
21644 | return; | ||||
21645 | case AArch64ISD::UMINV: | ||||
21646 | ReplaceReductionResults(N, Results, DAG, ISD::UMIN, AArch64ISD::UMINV); | ||||
21647 | return; | ||||
21648 | case AArch64ISD::SMAXV: | ||||
21649 | ReplaceReductionResults(N, Results, DAG, ISD::SMAX, AArch64ISD::SMAXV); | ||||
21650 | return; | ||||
21651 | case AArch64ISD::UMAXV: | ||||
21652 | ReplaceReductionResults(N, Results, DAG, ISD::UMAX, AArch64ISD::UMAXV); | ||||
21653 | return; | ||||
21654 | case ISD::FP_TO_UINT: | ||||
21655 | case ISD::FP_TO_SINT: | ||||
21656 | case ISD::STRICT_FP_TO_SINT: | ||||
21657 | case ISD::STRICT_FP_TO_UINT: | ||||
21658 | assert(N->getValueType(0) == MVT::i128 && "unexpected illegal conversion")(static_cast <bool> (N->getValueType(0) == MVT::i128 && "unexpected illegal conversion") ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i128 && \"unexpected illegal conversion\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 21658, __extension__ __PRETTY_FUNCTION__)); | ||||
21659 | // Let normal code take care of it by not adding anything to Results. | ||||
21660 | return; | ||||
21661 | case ISD::ATOMIC_CMP_SWAP: | ||||
21662 | ReplaceCMP_SWAP_128Results(N, Results, DAG, Subtarget); | ||||
21663 | return; | ||||
21664 | case ISD::ATOMIC_LOAD: | ||||
21665 | case ISD::LOAD: { | ||||
21666 | MemSDNode *LoadNode = cast<MemSDNode>(N); | ||||
21667 | EVT MemVT = LoadNode->getMemoryVT(); | ||||
21668 | // Handle lowering 256 bit non temporal loads into LDNP for little-endian | ||||
21669 | // targets. | ||||
21670 | if (LoadNode->isNonTemporal() && Subtarget->isLittleEndian() && | ||||
21671 | MemVT.getSizeInBits() == 256u && | ||||
21672 | (MemVT.getScalarSizeInBits() == 8u || | ||||
21673 | MemVT.getScalarSizeInBits() == 16u || | ||||
21674 | MemVT.getScalarSizeInBits() == 32u || | ||||
21675 | MemVT.getScalarSizeInBits() == 64u)) { | ||||
21676 | |||||
21677 | SDValue Result = DAG.getMemIntrinsicNode( | ||||
21678 | AArch64ISD::LDNP, SDLoc(N), | ||||
21679 | DAG.getVTList({MemVT.getHalfNumVectorElementsVT(*DAG.getContext()), | ||||
21680 | MemVT.getHalfNumVectorElementsVT(*DAG.getContext()), | ||||
21681 | MVT::Other}), | ||||
21682 | {LoadNode->getChain(), LoadNode->getBasePtr()}, | ||||
21683 | LoadNode->getMemoryVT(), LoadNode->getMemOperand()); | ||||
21684 | |||||
21685 | SDValue Pair = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), MemVT, | ||||
21686 | Result.getValue(0), Result.getValue(1)); | ||||
21687 | Results.append({Pair, Result.getValue(2) /* Chain */}); | ||||
21688 | return; | ||||
21689 | } | ||||
21690 | |||||
21691 | if ((!LoadNode->isVolatile() && !LoadNode->isAtomic()) || | ||||
21692 | LoadNode->getMemoryVT() != MVT::i128) { | ||||
21693 | // Non-volatile or atomic loads are optimized later in AArch64's load/store | ||||
21694 | // optimizer. | ||||
21695 | return; | ||||
21696 | } | ||||
21697 | |||||
21698 | if (SDValue(N, 0).getValueType() == MVT::i128) { | ||||
21699 | SDValue Result = DAG.getMemIntrinsicNode( | ||||
21700 | AArch64ISD::LDP, SDLoc(N), | ||||
21701 | DAG.getVTList({MVT::i64, MVT::i64, MVT::Other}), | ||||
21702 | {LoadNode->getChain(), LoadNode->getBasePtr()}, | ||||
21703 | LoadNode->getMemoryVT(), LoadNode->getMemOperand()); | ||||
21704 | |||||
21705 | SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128, | ||||
21706 | Result.getValue(0), Result.getValue(1)); | ||||
21707 | Results.append({Pair, Result.getValue(2) /* Chain */}); | ||||
21708 | } | ||||
21709 | return; | ||||
21710 | } | ||||
21711 | case ISD::EXTRACT_SUBVECTOR: | ||||
21712 | ReplaceExtractSubVectorResults(N, Results, DAG); | ||||
21713 | return; | ||||
21714 | case ISD::INSERT_SUBVECTOR: | ||||
21715 | case ISD::CONCAT_VECTORS: | ||||
21716 | // Custom lowering has been requested for INSERT_SUBVECTOR and | ||||
21717 | // CONCAT_VECTORS -- but delegate to common code for result type | ||||
21718 | // legalisation | ||||
21719 | return; | ||||
21720 | case ISD::INTRINSIC_WO_CHAIN: { | ||||
21721 | EVT VT = N->getValueType(0); | ||||
21722 | assert((VT == MVT::i8 || VT == MVT::i16) &&(static_cast <bool> ((VT == MVT::i8 || VT == MVT::i16) && "custom lowering for unexpected type") ? void (0) : __assert_fail ("(VT == MVT::i8 || VT == MVT::i16) && \"custom lowering for unexpected type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 21723, __extension__ __PRETTY_FUNCTION__)) | ||||
21723 | "custom lowering for unexpected type")(static_cast <bool> ((VT == MVT::i8 || VT == MVT::i16) && "custom lowering for unexpected type") ? void (0) : __assert_fail ("(VT == MVT::i8 || VT == MVT::i16) && \"custom lowering for unexpected type\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 21723, __extension__ __PRETTY_FUNCTION__)); | ||||
21724 | |||||
21725 | ConstantSDNode *CN = cast<ConstantSDNode>(N->getOperand(0)); | ||||
21726 | Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue()); | ||||
21727 | switch (IntID) { | ||||
21728 | default: | ||||
21729 | return; | ||||
21730 | case Intrinsic::aarch64_sve_clasta_n: { | ||||
21731 | SDLoc DL(N); | ||||
21732 | auto Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, N->getOperand(2)); | ||||
21733 | auto V = DAG.getNode(AArch64ISD::CLASTA_N, DL, MVT::i32, | ||||
21734 | N->getOperand(1), Op2, N->getOperand(3)); | ||||
21735 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V)); | ||||
21736 | return; | ||||
21737 | } | ||||
21738 | case Intrinsic::aarch64_sve_clastb_n: { | ||||
21739 | SDLoc DL(N); | ||||
21740 | auto Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, N->getOperand(2)); | ||||
21741 | auto V = DAG.getNode(AArch64ISD::CLASTB_N, DL, MVT::i32, | ||||
21742 | N->getOperand(1), Op2, N->getOperand(3)); | ||||
21743 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V)); | ||||
21744 | return; | ||||
21745 | } | ||||
21746 | case Intrinsic::aarch64_sve_lasta: { | ||||
21747 | SDLoc DL(N); | ||||
21748 | auto V = DAG.getNode(AArch64ISD::LASTA, DL, MVT::i32, | ||||
21749 | N->getOperand(1), N->getOperand(2)); | ||||
21750 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V)); | ||||
21751 | return; | ||||
21752 | } | ||||
21753 | case Intrinsic::aarch64_sve_lastb: { | ||||
21754 | SDLoc DL(N); | ||||
21755 | auto V = DAG.getNode(AArch64ISD::LASTB, DL, MVT::i32, | ||||
21756 | N->getOperand(1), N->getOperand(2)); | ||||
21757 | Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V)); | ||||
21758 | return; | ||||
21759 | } | ||||
21760 | } | ||||
21761 | } | ||||
21762 | case ISD::READ_REGISTER: { | ||||
21763 | SDLoc DL(N); | ||||
21764 | assert(N->getValueType(0) == MVT::i128 &&(static_cast <bool> (N->getValueType(0) == MVT::i128 && "READ_REGISTER custom lowering is only for 128-bit sysregs" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i128 && \"READ_REGISTER custom lowering is only for 128-bit sysregs\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 21765, __extension__ __PRETTY_FUNCTION__)) | ||||
21765 | "READ_REGISTER custom lowering is only for 128-bit sysregs")(static_cast <bool> (N->getValueType(0) == MVT::i128 && "READ_REGISTER custom lowering is only for 128-bit sysregs" ) ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i128 && \"READ_REGISTER custom lowering is only for 128-bit sysregs\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 21765, __extension__ __PRETTY_FUNCTION__)); | ||||
21766 | SDValue Chain = N->getOperand(0); | ||||
21767 | SDValue SysRegName = N->getOperand(1); | ||||
21768 | |||||
21769 | SDValue Result = DAG.getNode( | ||||
21770 | AArch64ISD::MRRS, DL, DAG.getVTList({MVT::i64, MVT::i64, MVT::Other}), | ||||
21771 | Chain, SysRegName); | ||||
21772 | |||||
21773 | // Sysregs are not endian. Result.getValue(0) always contains the lower half | ||||
21774 | // of the 128-bit System Register value. | ||||
21775 | SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, | ||||
21776 | Result.getValue(0), Result.getValue(1)); | ||||
21777 | Results.push_back(Pair); | ||||
21778 | Results.push_back(Result.getValue(2)); // Chain | ||||
21779 | return; | ||||
21780 | } | ||||
21781 | } | ||||
21782 | } | ||||
21783 | |||||
21784 | bool AArch64TargetLowering::useLoadStackGuardNode() const { | ||||
21785 | if (Subtarget->isTargetAndroid() || Subtarget->isTargetFuchsia()) | ||||
21786 | return TargetLowering::useLoadStackGuardNode(); | ||||
21787 | return true; | ||||
21788 | } | ||||
21789 | |||||
21790 | unsigned AArch64TargetLowering::combineRepeatedFPDivisors() const { | ||||
21791 | // Combine multiple FDIVs with the same divisor into multiple FMULs by the | ||||
21792 | // reciprocal if there are three or more FDIVs. | ||||
21793 | return 3; | ||||
21794 | } | ||||
21795 | |||||
21796 | TargetLoweringBase::LegalizeTypeAction | ||||
21797 | AArch64TargetLowering::getPreferredVectorAction(MVT VT) const { | ||||
21798 | // During type legalization, we prefer to widen v1i8, v1i16, v1i32 to v8i8, | ||||
21799 | // v4i16, v2i32 instead of to promote. | ||||
21800 | if (VT == MVT::v1i8 || VT == MVT::v1i16 || VT == MVT::v1i32 || | ||||
21801 | VT == MVT::v1f32) | ||||
21802 | return TypeWidenVector; | ||||
21803 | |||||
21804 | return TargetLoweringBase::getPreferredVectorAction(VT); | ||||
21805 | } | ||||
21806 | |||||
21807 | // In v8.4a, ldp and stp instructions are guaranteed to be single-copy atomic | ||||
21808 | // provided the address is 16-byte aligned. | ||||
21809 | bool AArch64TargetLowering::isOpSuitableForLDPSTP(const Instruction *I) const { | ||||
21810 | if (!Subtarget->hasLSE2()) | ||||
21811 | return false; | ||||
21812 | |||||
21813 | if (auto LI = dyn_cast<LoadInst>(I)) | ||||
21814 | return LI->getType()->getPrimitiveSizeInBits() == 128 && | ||||
21815 | LI->getAlign() >= Align(16); | ||||
21816 | |||||
21817 | if (auto SI = dyn_cast<StoreInst>(I)) | ||||
21818 | return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() == 128 && | ||||
21819 | SI->getAlign() >= Align(16); | ||||
21820 | |||||
21821 | return false; | ||||
21822 | } | ||||
21823 | |||||
21824 | bool AArch64TargetLowering::shouldInsertFencesForAtomic( | ||||
21825 | const Instruction *I) const { | ||||
21826 | return isOpSuitableForLDPSTP(I); | ||||
21827 | } | ||||
21828 | |||||
21829 | // Loads and stores less than 128-bits are already atomic; ones above that | ||||
21830 | // are doomed anyway, so defer to the default libcall and blame the OS when | ||||
21831 | // things go wrong. | ||||
21832 | TargetLoweringBase::AtomicExpansionKind | ||||
21833 | AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { | ||||
21834 | unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); | ||||
21835 | if (Size != 128 || isOpSuitableForLDPSTP(SI)) | ||||
21836 | return AtomicExpansionKind::None; | ||||
21837 | return AtomicExpansionKind::Expand; | ||||
21838 | } | ||||
21839 | |||||
21840 | // Loads and stores less than 128-bits are already atomic; ones above that | ||||
21841 | // are doomed anyway, so defer to the default libcall and blame the OS when | ||||
21842 | // things go wrong. | ||||
21843 | TargetLowering::AtomicExpansionKind | ||||
21844 | AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { | ||||
21845 | unsigned Size = LI->getType()->getPrimitiveSizeInBits(); | ||||
21846 | |||||
21847 | if (Size != 128 || isOpSuitableForLDPSTP(LI)) | ||||
21848 | return AtomicExpansionKind::None; | ||||
21849 | |||||
21850 | // At -O0, fast-regalloc cannot cope with the live vregs necessary to | ||||
21851 | // implement atomicrmw without spilling. If the target address is also on the | ||||
21852 | // stack and close enough to the spill slot, this can lead to a situation | ||||
21853 | // where the monitor always gets cleared and the atomic operation can never | ||||
21854 | // succeed. So at -O0 lower this operation to a CAS loop. | ||||
21855 | if (getTargetMachine().getOptLevel() == CodeGenOpt::None) | ||||
21856 | return AtomicExpansionKind::CmpXChg; | ||||
21857 | |||||
21858 | return AtomicExpansionKind::LLSC; | ||||
21859 | } | ||||
21860 | |||||
21861 | // For the real atomic operations, we have ldxr/stxr up to 128 bits, | ||||
21862 | TargetLowering::AtomicExpansionKind | ||||
21863 | AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { | ||||
21864 | if (AI->isFloatingPointOperation()) | ||||
21865 | return AtomicExpansionKind::CmpXChg; | ||||
21866 | |||||
21867 | unsigned Size = AI->getType()->getPrimitiveSizeInBits(); | ||||
21868 | if (Size > 128) return AtomicExpansionKind::None; | ||||
21869 | |||||
21870 | // Nand is not supported in LSE. | ||||
21871 | // Leave 128 bits to LLSC or CmpXChg. | ||||
21872 | if (AI->getOperation() != AtomicRMWInst::Nand && Size < 128) { | ||||
21873 | if (Subtarget->hasLSE()) | ||||
21874 | return AtomicExpansionKind::None; | ||||
21875 | if (Subtarget->outlineAtomics()) { | ||||
21876 | // [U]Min/[U]Max RWM atomics are used in __sync_fetch_ libcalls so far. | ||||
21877 | // Don't outline them unless | ||||
21878 | // (1) high level <atomic> support approved: | ||||
21879 | // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p0493r1.pdf | ||||
21880 | // (2) low level libgcc and compiler-rt support implemented by: | ||||
21881 | // min/max outline atomics helpers | ||||
21882 | if (AI->getOperation() != AtomicRMWInst::Min && | ||||
21883 | AI->getOperation() != AtomicRMWInst::Max && | ||||
21884 | AI->getOperation() != AtomicRMWInst::UMin && | ||||
21885 | AI->getOperation() != AtomicRMWInst::UMax) { | ||||
21886 | return AtomicExpansionKind::None; | ||||
21887 | } | ||||
21888 | } | ||||
21889 | } | ||||
21890 | |||||
21891 | // At -O0, fast-regalloc cannot cope with the live vregs necessary to | ||||
21892 | // implement atomicrmw without spilling. If the target address is also on the | ||||
21893 | // stack and close enough to the spill slot, this can lead to a situation | ||||
21894 | // where the monitor always gets cleared and the atomic operation can never | ||||
21895 | // succeed. So at -O0 lower this operation to a CAS loop. | ||||
21896 | if (getTargetMachine().getOptLevel() == CodeGenOpt::None) | ||||
21897 | return AtomicExpansionKind::CmpXChg; | ||||
21898 | |||||
21899 | return AtomicExpansionKind::LLSC; | ||||
21900 | } | ||||
21901 | |||||
21902 | TargetLowering::AtomicExpansionKind | ||||
21903 | AArch64TargetLowering::shouldExpandAtomicCmpXchgInIR( | ||||
21904 | AtomicCmpXchgInst *AI) const { | ||||
21905 | // If subtarget has LSE, leave cmpxchg intact for codegen. | ||||
21906 | if (Subtarget->hasLSE() || Subtarget->outlineAtomics()) | ||||
21907 | return AtomicExpansionKind::None; | ||||
21908 | // At -O0, fast-regalloc cannot cope with the live vregs necessary to | ||||
21909 | // implement cmpxchg without spilling. If the address being exchanged is also | ||||
21910 | // on the stack and close enough to the spill slot, this can lead to a | ||||
21911 | // situation where the monitor always gets cleared and the atomic operation | ||||
21912 | // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. | ||||
21913 | if (getTargetMachine().getOptLevel() == CodeGenOpt::None) | ||||
21914 | return AtomicExpansionKind::None; | ||||
21915 | |||||
21916 | // 128-bit atomic cmpxchg is weird; AtomicExpand doesn't know how to expand | ||||
21917 | // it. | ||||
21918 | unsigned Size = AI->getCompareOperand()->getType()->getPrimitiveSizeInBits(); | ||||
21919 | if (Size > 64) | ||||
21920 | return AtomicExpansionKind::None; | ||||
21921 | |||||
21922 | return AtomicExpansionKind::LLSC; | ||||
21923 | } | ||||
21924 | |||||
21925 | Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder, | ||||
21926 | Type *ValueTy, Value *Addr, | ||||
21927 | AtomicOrdering Ord) const { | ||||
21928 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); | ||||
21929 | bool IsAcquire = isAcquireOrStronger(Ord); | ||||
21930 | |||||
21931 | // Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd | ||||
21932 | // intrinsic must return {i64, i64} and we have to recombine them into a | ||||
21933 | // single i128 here. | ||||
21934 | if (ValueTy->getPrimitiveSizeInBits() == 128) { | ||||
21935 | Intrinsic::ID Int = | ||||
21936 | IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp; | ||||
21937 | Function *Ldxr = Intrinsic::getDeclaration(M, Int); | ||||
21938 | |||||
21939 | Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); | ||||
21940 | Value *LoHi = Builder.CreateCall(Ldxr, Addr, "lohi"); | ||||
21941 | |||||
21942 | Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); | ||||
21943 | Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi"); | ||||
21944 | Lo = Builder.CreateZExt(Lo, ValueTy, "lo64"); | ||||
21945 | Hi = Builder.CreateZExt(Hi, ValueTy, "hi64"); | ||||
21946 | return Builder.CreateOr( | ||||
21947 | Lo, Builder.CreateShl(Hi, ConstantInt::get(ValueTy, 64)), "val64"); | ||||
21948 | } | ||||
21949 | |||||
21950 | Type *Tys[] = { Addr->getType() }; | ||||
21951 | Intrinsic::ID Int = | ||||
21952 | IsAcquire ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr; | ||||
21953 | Function *Ldxr = Intrinsic::getDeclaration(M, Int, Tys); | ||||
21954 | |||||
21955 | const DataLayout &DL = M->getDataLayout(); | ||||
21956 | IntegerType *IntEltTy = Builder.getIntNTy(DL.getTypeSizeInBits(ValueTy)); | ||||
21957 | CallInst *CI = Builder.CreateCall(Ldxr, Addr); | ||||
21958 | CI->addParamAttr( | ||||
21959 | 0, Attribute::get(Builder.getContext(), Attribute::ElementType, ValueTy)); | ||||
21960 | Value *Trunc = Builder.CreateTrunc(CI, IntEltTy); | ||||
21961 | |||||
21962 | return Builder.CreateBitCast(Trunc, ValueTy); | ||||
21963 | } | ||||
21964 | |||||
21965 | void AArch64TargetLowering::emitAtomicCmpXchgNoStoreLLBalance( | ||||
21966 | IRBuilderBase &Builder) const { | ||||
21967 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); | ||||
21968 | Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::aarch64_clrex)); | ||||
21969 | } | ||||
21970 | |||||
21971 | Value *AArch64TargetLowering::emitStoreConditional(IRBuilderBase &Builder, | ||||
21972 | Value *Val, Value *Addr, | ||||
21973 | AtomicOrdering Ord) const { | ||||
21974 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); | ||||
21975 | bool IsRelease = isReleaseOrStronger(Ord); | ||||
21976 | |||||
21977 | // Since the intrinsics must have legal type, the i128 intrinsics take two | ||||
21978 | // parameters: "i64, i64". We must marshal Val into the appropriate form | ||||
21979 | // before the call. | ||||
21980 | if (Val->getType()->getPrimitiveSizeInBits() == 128) { | ||||
21981 | Intrinsic::ID Int = | ||||
21982 | IsRelease ? Intrinsic::aarch64_stlxp : Intrinsic::aarch64_stxp; | ||||
21983 | Function *Stxr = Intrinsic::getDeclaration(M, Int); | ||||
21984 | Type *Int64Ty = Type::getInt64Ty(M->getContext()); | ||||
21985 | |||||
21986 | Value *Lo = Builder.CreateTrunc(Val, Int64Ty, "lo"); | ||||
21987 | Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 64), Int64Ty, "hi"); | ||||
21988 | Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); | ||||
21989 | return Builder.CreateCall(Stxr, {Lo, Hi, Addr}); | ||||
21990 | } | ||||
21991 | |||||
21992 | Intrinsic::ID Int = | ||||
21993 | IsRelease ? Intrinsic::aarch64_stlxr : Intrinsic::aarch64_stxr; | ||||
21994 | Type *Tys[] = { Addr->getType() }; | ||||
21995 | Function *Stxr = Intrinsic::getDeclaration(M, Int, Tys); | ||||
21996 | |||||
21997 | const DataLayout &DL = M->getDataLayout(); | ||||
21998 | IntegerType *IntValTy = Builder.getIntNTy(DL.getTypeSizeInBits(Val->getType())); | ||||
21999 | Val = Builder.CreateBitCast(Val, IntValTy); | ||||
22000 | |||||
22001 | CallInst *CI = Builder.CreateCall( | ||||
22002 | Stxr, {Builder.CreateZExtOrBitCast( | ||||
22003 | Val, Stxr->getFunctionType()->getParamType(0)), | ||||
22004 | Addr}); | ||||
22005 | CI->addParamAttr(1, Attribute::get(Builder.getContext(), | ||||
22006 | Attribute::ElementType, Val->getType())); | ||||
22007 | return CI; | ||||
22008 | } | ||||
22009 | |||||
22010 | bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters( | ||||
22011 | Type *Ty, CallingConv::ID CallConv, bool isVarArg, | ||||
22012 | const DataLayout &DL) const { | ||||
22013 | if (!Ty->isArrayTy()) { | ||||
22014 | const TypeSize &TySize = Ty->getPrimitiveSizeInBits(); | ||||
22015 | return TySize.isScalable() && TySize.getKnownMinSize() > 128; | ||||
22016 | } | ||||
22017 | |||||
22018 | // All non aggregate members of the type must have the same type | ||||
22019 | SmallVector<EVT> ValueVTs; | ||||
22020 | ComputeValueVTs(*this, DL, Ty, ValueVTs); | ||||
22021 | return all_equal(ValueVTs); | ||||
22022 | } | ||||
22023 | |||||
22024 | bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &, | ||||
22025 | EVT) const { | ||||
22026 | return false; | ||||
22027 | } | ||||
22028 | |||||
22029 | static Value *UseTlsOffset(IRBuilderBase &IRB, unsigned Offset) { | ||||
22030 | Module *M = IRB.GetInsertBlock()->getParent()->getParent(); | ||||
22031 | Function *ThreadPointerFunc = | ||||
22032 | Intrinsic::getDeclaration(M, Intrinsic::thread_pointer); | ||||
22033 | return IRB.CreatePointerCast( | ||||
22034 | IRB.CreateConstGEP1_32(IRB.getInt8Ty(), IRB.CreateCall(ThreadPointerFunc), | ||||
22035 | Offset), | ||||
22036 | IRB.getInt8PtrTy()->getPointerTo(0)); | ||||
22037 | } | ||||
22038 | |||||
22039 | Value *AArch64TargetLowering::getIRStackGuard(IRBuilderBase &IRB) const { | ||||
22040 | // Android provides a fixed TLS slot for the stack cookie. See the definition | ||||
22041 | // of TLS_SLOT_STACK_GUARD in | ||||
22042 | // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h | ||||
22043 | if (Subtarget->isTargetAndroid()) | ||||
22044 | return UseTlsOffset(IRB, 0x28); | ||||
22045 | |||||
22046 | // Fuchsia is similar. | ||||
22047 | // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value. | ||||
22048 | if (Subtarget->isTargetFuchsia()) | ||||
22049 | return UseTlsOffset(IRB, -0x10); | ||||
22050 | |||||
22051 | return TargetLowering::getIRStackGuard(IRB); | ||||
22052 | } | ||||
22053 | |||||
22054 | void AArch64TargetLowering::insertSSPDeclarations(Module &M) const { | ||||
22055 | // MSVC CRT provides functionalities for stack protection. | ||||
22056 | if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) { | ||||
22057 | // MSVC CRT has a global variable holding security cookie. | ||||
22058 | M.getOrInsertGlobal("__security_cookie", | ||||
22059 | Type::getInt8PtrTy(M.getContext())); | ||||
22060 | |||||
22061 | // MSVC CRT has a function to validate security cookie. | ||||
22062 | FunctionCallee SecurityCheckCookie = M.getOrInsertFunction( | ||||
22063 | Subtarget->getSecurityCheckCookieName(), | ||||
22064 | Type::getVoidTy(M.getContext()), Type::getInt8PtrTy(M.getContext())); | ||||
22065 | if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) { | ||||
22066 | F->setCallingConv(CallingConv::Win64); | ||||
22067 | F->addParamAttr(0, Attribute::AttrKind::InReg); | ||||
22068 | } | ||||
22069 | return; | ||||
22070 | } | ||||
22071 | TargetLowering::insertSSPDeclarations(M); | ||||
22072 | } | ||||
22073 | |||||
22074 | Value *AArch64TargetLowering::getSDagStackGuard(const Module &M) const { | ||||
22075 | // MSVC CRT has a global variable holding security cookie. | ||||
22076 | if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) | ||||
22077 | return M.getGlobalVariable("__security_cookie"); | ||||
22078 | return TargetLowering::getSDagStackGuard(M); | ||||
22079 | } | ||||
22080 | |||||
22081 | Function *AArch64TargetLowering::getSSPStackGuardCheck(const Module &M) const { | ||||
22082 | // MSVC CRT has a function to validate security cookie. | ||||
22083 | if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) | ||||
22084 | return M.getFunction(Subtarget->getSecurityCheckCookieName()); | ||||
22085 | return TargetLowering::getSSPStackGuardCheck(M); | ||||
22086 | } | ||||
22087 | |||||
22088 | Value * | ||||
22089 | AArch64TargetLowering::getSafeStackPointerLocation(IRBuilderBase &IRB) const { | ||||
22090 | // Android provides a fixed TLS slot for the SafeStack pointer. See the | ||||
22091 | // definition of TLS_SLOT_SAFESTACK in | ||||
22092 | // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h | ||||
22093 | if (Subtarget->isTargetAndroid()) | ||||
22094 | return UseTlsOffset(IRB, 0x48); | ||||
22095 | |||||
22096 | // Fuchsia is similar. | ||||
22097 | // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value. | ||||
22098 | if (Subtarget->isTargetFuchsia()) | ||||
22099 | return UseTlsOffset(IRB, -0x8); | ||||
22100 | |||||
22101 | return TargetLowering::getSafeStackPointerLocation(IRB); | ||||
22102 | } | ||||
22103 | |||||
22104 | bool AArch64TargetLowering::isMaskAndCmp0FoldingBeneficial( | ||||
22105 | const Instruction &AndI) const { | ||||
22106 | // Only sink 'and' mask to cmp use block if it is masking a single bit, since | ||||
22107 | // this is likely to be fold the and/cmp/br into a single tbz instruction. It | ||||
22108 | // may be beneficial to sink in other cases, but we would have to check that | ||||
22109 | // the cmp would not get folded into the br to form a cbz for these to be | ||||
22110 | // beneficial. | ||||
22111 | ConstantInt* Mask = dyn_cast<ConstantInt>(AndI.getOperand(1)); | ||||
22112 | if (!Mask) | ||||
22113 | return false; | ||||
22114 | return Mask->getValue().isPowerOf2(); | ||||
22115 | } | ||||
22116 | |||||
22117 | bool AArch64TargetLowering:: | ||||
22118 | shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( | ||||
22119 | SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, | ||||
22120 | unsigned OldShiftOpcode, unsigned NewShiftOpcode, | ||||
22121 | SelectionDAG &DAG) const { | ||||
22122 | // Does baseline recommend not to perform the fold by default? | ||||
22123 | if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( | ||||
22124 | X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG)) | ||||
22125 | return false; | ||||
22126 | // Else, if this is a vector shift, prefer 'shl'. | ||||
22127 | return X.getValueType().isScalarInteger() || NewShiftOpcode == ISD::SHL; | ||||
22128 | } | ||||
22129 | |||||
22130 | bool AArch64TargetLowering::shouldExpandShift(SelectionDAG &DAG, | ||||
22131 | SDNode *N) const { | ||||
22132 | if (DAG.getMachineFunction().getFunction().hasMinSize() && | ||||
22133 | !Subtarget->isTargetWindows() && !Subtarget->isTargetDarwin()) | ||||
22134 | return false; | ||||
22135 | return true; | ||||
22136 | } | ||||
22137 | |||||
22138 | void AArch64TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { | ||||
22139 | // Update IsSplitCSR in AArch64unctionInfo. | ||||
22140 | AArch64FunctionInfo *AFI = Entry->getParent()->getInfo<AArch64FunctionInfo>(); | ||||
22141 | AFI->setIsSplitCSR(true); | ||||
22142 | } | ||||
22143 | |||||
22144 | void AArch64TargetLowering::insertCopiesSplitCSR( | ||||
22145 | MachineBasicBlock *Entry, | ||||
22146 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const { | ||||
22147 | const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); | ||||
22148 | const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); | ||||
22149 | if (!IStart) | ||||
22150 | return; | ||||
22151 | |||||
22152 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | ||||
22153 | MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); | ||||
22154 | MachineBasicBlock::iterator MBBI = Entry->begin(); | ||||
22155 | for (const MCPhysReg *I = IStart; *I; ++I) { | ||||
22156 | const TargetRegisterClass *RC = nullptr; | ||||
22157 | if (AArch64::GPR64RegClass.contains(*I)) | ||||
22158 | RC = &AArch64::GPR64RegClass; | ||||
22159 | else if (AArch64::FPR64RegClass.contains(*I)) | ||||
22160 | RC = &AArch64::FPR64RegClass; | ||||
22161 | else | ||||
22162 | llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22162); | ||||
22163 | |||||
22164 | Register NewVR = MRI->createVirtualRegister(RC); | ||||
22165 | // Create copy from CSR to a virtual register. | ||||
22166 | // FIXME: this currently does not emit CFI pseudo-instructions, it works | ||||
22167 | // fine for CXX_FAST_TLS since the C++-style TLS access functions should be | ||||
22168 | // nounwind. If we want to generalize this later, we may need to emit | ||||
22169 | // CFI pseudo-instructions. | ||||
22170 | assert(Entry->getParent()->getFunction().hasFnAttribute((static_cast <bool> (Entry->getParent()->getFunction ().hasFnAttribute( Attribute::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!" ) ? void (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22172, __extension__ __PRETTY_FUNCTION__)) | ||||
22171 | Attribute::NoUnwind) &&(static_cast <bool> (Entry->getParent()->getFunction ().hasFnAttribute( Attribute::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!" ) ? void (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22172, __extension__ __PRETTY_FUNCTION__)) | ||||
22172 | "Function should be nounwind in insertCopiesSplitCSR!")(static_cast <bool> (Entry->getParent()->getFunction ().hasFnAttribute( Attribute::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!" ) ? void (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22172, __extension__ __PRETTY_FUNCTION__)); | ||||
22173 | Entry->addLiveIn(*I); | ||||
22174 | BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) | ||||
22175 | .addReg(*I); | ||||
22176 | |||||
22177 | // Insert the copy-back instructions right before the terminator. | ||||
22178 | for (auto *Exit : Exits) | ||||
22179 | BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), | ||||
22180 | TII->get(TargetOpcode::COPY), *I) | ||||
22181 | .addReg(NewVR); | ||||
22182 | } | ||||
22183 | } | ||||
22184 | |||||
22185 | bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const { | ||||
22186 | // Integer division on AArch64 is expensive. However, when aggressively | ||||
22187 | // optimizing for code size, we prefer to use a div instruction, as it is | ||||
22188 | // usually smaller than the alternative sequence. | ||||
22189 | // The exception to this is vector division. Since AArch64 doesn't have vector | ||||
22190 | // integer division, leaving the division as-is is a loss even in terms of | ||||
22191 | // size, because it will have to be scalarized, while the alternative code | ||||
22192 | // sequence can be performed in vector form. | ||||
22193 | bool OptSize = Attr.hasFnAttr(Attribute::MinSize); | ||||
22194 | return OptSize && !VT.isVector(); | ||||
22195 | } | ||||
22196 | |||||
22197 | bool AArch64TargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { | ||||
22198 | // We want inc-of-add for scalars and sub-of-not for vectors. | ||||
22199 | return VT.isScalarInteger(); | ||||
22200 | } | ||||
22201 | |||||
22202 | bool AArch64TargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT, | ||||
22203 | EVT VT) const { | ||||
22204 | // v8f16 without fp16 need to be extended to v8f32, which is more difficult to | ||||
22205 | // legalize. | ||||
22206 | if (FPVT == MVT::v8f16 && !Subtarget->hasFullFP16()) | ||||
22207 | return false; | ||||
22208 | return TargetLowering::shouldConvertFpToSat(Op, FPVT, VT); | ||||
22209 | } | ||||
22210 | |||||
22211 | bool AArch64TargetLowering::enableAggressiveFMAFusion(EVT VT) const { | ||||
22212 | return Subtarget->hasAggressiveFMA() && VT.isFloatingPoint(); | ||||
22213 | } | ||||
22214 | |||||
22215 | unsigned | ||||
22216 | AArch64TargetLowering::getVaListSizeInBits(const DataLayout &DL) const { | ||||
22217 | if (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows()) | ||||
22218 | return getPointerTy(DL).getSizeInBits(); | ||||
22219 | |||||
22220 | return 3 * getPointerTy(DL).getSizeInBits() + 2 * 32; | ||||
22221 | } | ||||
22222 | |||||
22223 | void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const { | ||||
22224 | MachineFrameInfo &MFI = MF.getFrameInfo(); | ||||
22225 | // If we have any vulnerable SVE stack objects then the stack protector | ||||
22226 | // needs to be placed at the top of the SVE stack area, as the SVE locals | ||||
22227 | // are placed above the other locals, so we allocate it as if it were a | ||||
22228 | // scalable vector. | ||||
22229 | // FIXME: It may be worthwhile having a specific interface for this rather | ||||
22230 | // than doing it here in finalizeLowering. | ||||
22231 | if (MFI.hasStackProtectorIndex()) { | ||||
22232 | for (unsigned int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { | ||||
22233 | if (MFI.getStackID(i) == TargetStackID::ScalableVector && | ||||
22234 | MFI.getObjectSSPLayout(i) != MachineFrameInfo::SSPLK_None) { | ||||
22235 | MFI.setStackID(MFI.getStackProtectorIndex(), | ||||
22236 | TargetStackID::ScalableVector); | ||||
22237 | MFI.setObjectAlignment(MFI.getStackProtectorIndex(), Align(16)); | ||||
22238 | break; | ||||
22239 | } | ||||
22240 | } | ||||
22241 | } | ||||
22242 | MFI.computeMaxCallFrameSize(MF); | ||||
22243 | TargetLoweringBase::finalizeLowering(MF); | ||||
22244 | } | ||||
22245 | |||||
22246 | // Unlike X86, we let frame lowering assign offsets to all catch objects. | ||||
22247 | bool AArch64TargetLowering::needsFixedCatchObjects() const { | ||||
22248 | return false; | ||||
22249 | } | ||||
22250 | |||||
22251 | bool AArch64TargetLowering::shouldLocalize( | ||||
22252 | const MachineInstr &MI, const TargetTransformInfo *TTI) const { | ||||
22253 | auto &MF = *MI.getMF(); | ||||
22254 | auto &MRI = MF.getRegInfo(); | ||||
22255 | auto maxUses = [](unsigned RematCost) { | ||||
22256 | // A cost of 1 means remats are basically free. | ||||
22257 | if (RematCost == 1) | ||||
22258 | return std::numeric_limits<unsigned>::max(); | ||||
22259 | if (RematCost == 2) | ||||
22260 | return 2U; | ||||
22261 | |||||
22262 | // Remat is too expensive, only sink if there's one user. | ||||
22263 | if (RematCost > 2) | ||||
22264 | return 1U; | ||||
22265 | llvm_unreachable("Unexpected remat cost")::llvm::llvm_unreachable_internal("Unexpected remat cost", "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp" , 22265); | ||||
22266 | }; | ||||
22267 | |||||
22268 | switch (MI.getOpcode()) { | ||||
22269 | case TargetOpcode::G_GLOBAL_VALUE: { | ||||
22270 | // On Darwin, TLS global vars get selected into function calls, which | ||||
22271 | // we don't want localized, as they can get moved into the middle of a | ||||
22272 | // another call sequence. | ||||
22273 | const GlobalValue &GV = *MI.getOperand(1).getGlobal(); | ||||
22274 | if (GV.isThreadLocal() && Subtarget->isTargetMachO()) | ||||
22275 | return false; | ||||
22276 | break; | ||||
22277 | } | ||||
22278 | case TargetOpcode::G_CONSTANT: { | ||||
22279 | auto *CI = MI.getOperand(1).getCImm(); | ||||
22280 | APInt Imm = CI->getValue(); | ||||
22281 | InstructionCost Cost = TTI->getIntImmCost( | ||||
22282 | Imm, CI->getType(), TargetTransformInfo::TCK_CodeSize); | ||||
22283 | assert(Cost.isValid() && "Expected a valid imm cost")(static_cast <bool> (Cost.isValid() && "Expected a valid imm cost" ) ? void (0) : __assert_fail ("Cost.isValid() && \"Expected a valid imm cost\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22283, __extension__ __PRETTY_FUNCTION__)); | ||||
22284 | |||||
22285 | unsigned RematCost = *Cost.getValue(); | ||||
22286 | Register Reg = MI.getOperand(0).getReg(); | ||||
22287 | unsigned MaxUses = maxUses(RematCost); | ||||
22288 | // Don't pass UINT_MAX sentinal value to hasAtMostUserInstrs(). | ||||
22289 | if (MaxUses == std::numeric_limits<unsigned>::max()) | ||||
22290 | --MaxUses; | ||||
22291 | return MRI.hasAtMostUserInstrs(Reg, MaxUses); | ||||
22292 | } | ||||
22293 | // If we legalized G_GLOBAL_VALUE into ADRP + G_ADD_LOW, mark both as being | ||||
22294 | // localizable. | ||||
22295 | case AArch64::ADRP: | ||||
22296 | case AArch64::G_ADD_LOW: | ||||
22297 | return true; | ||||
22298 | default: | ||||
22299 | break; | ||||
22300 | } | ||||
22301 | return TargetLoweringBase::shouldLocalize(MI, TTI); | ||||
22302 | } | ||||
22303 | |||||
22304 | bool AArch64TargetLowering::fallBackToDAGISel(const Instruction &Inst) const { | ||||
22305 | if (isa<ScalableVectorType>(Inst.getType())) | ||||
22306 | return true; | ||||
22307 | |||||
22308 | for (unsigned i = 0; i < Inst.getNumOperands(); ++i) | ||||
22309 | if (isa<ScalableVectorType>(Inst.getOperand(i)->getType())) | ||||
22310 | return true; | ||||
22311 | |||||
22312 | if (const AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) { | ||||
22313 | if (isa<ScalableVectorType>(AI->getAllocatedType())) | ||||
22314 | return true; | ||||
22315 | } | ||||
22316 | |||||
22317 | // Checks to allow the use of SME instructions | ||||
22318 | if (auto *Base = dyn_cast<CallBase>(&Inst)) { | ||||
22319 | auto CallerAttrs = SMEAttrs(*Inst.getFunction()); | ||||
22320 | auto CalleeAttrs = SMEAttrs(*Base); | ||||
22321 | if (CallerAttrs.requiresSMChange(CalleeAttrs, | ||||
22322 | /*BodyOverridesInterface=*/false) || | ||||
22323 | CallerAttrs.requiresLazySave(CalleeAttrs)) | ||||
22324 | return true; | ||||
22325 | } | ||||
22326 | return false; | ||||
22327 | } | ||||
22328 | |||||
22329 | // Return the largest legal scalable vector type that matches VT's element type. | ||||
22330 | static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT) { | ||||
22331 | assert(VT.isFixedLengthVector() &&(static_cast <bool> (VT.isFixedLengthVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && "Expected legal fixed length vector!" ) ? void (0) : __assert_fail ("VT.isFixedLengthVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && \"Expected legal fixed length vector!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22333, __extension__ __PRETTY_FUNCTION__)) | ||||
22332 | DAG.getTargetLoweringInfo().isTypeLegal(VT) &&(static_cast <bool> (VT.isFixedLengthVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && "Expected legal fixed length vector!" ) ? void (0) : __assert_fail ("VT.isFixedLengthVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && \"Expected legal fixed length vector!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22333, __extension__ __PRETTY_FUNCTION__)) | ||||
22333 | "Expected legal fixed length vector!")(static_cast <bool> (VT.isFixedLengthVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && "Expected legal fixed length vector!" ) ? void (0) : __assert_fail ("VT.isFixedLengthVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && \"Expected legal fixed length vector!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22333, __extension__ __PRETTY_FUNCTION__)); | ||||
22334 | switch (VT.getVectorElementType().getSimpleVT().SimpleTy) { | ||||
22335 | default: | ||||
22336 | llvm_unreachable("unexpected element type for SVE container")::llvm::llvm_unreachable_internal("unexpected element type for SVE container" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22336); | ||||
22337 | case MVT::i8: | ||||
22338 | return EVT(MVT::nxv16i8); | ||||
22339 | case MVT::i16: | ||||
22340 | return EVT(MVT::nxv8i16); | ||||
22341 | case MVT::i32: | ||||
22342 | return EVT(MVT::nxv4i32); | ||||
22343 | case MVT::i64: | ||||
22344 | return EVT(MVT::nxv2i64); | ||||
22345 | case MVT::f16: | ||||
22346 | return EVT(MVT::nxv8f16); | ||||
22347 | case MVT::f32: | ||||
22348 | return EVT(MVT::nxv4f32); | ||||
22349 | case MVT::f64: | ||||
22350 | return EVT(MVT::nxv2f64); | ||||
22351 | } | ||||
22352 | } | ||||
22353 | |||||
22354 | // Return a PTRUE with active lanes corresponding to the extent of VT. | ||||
22355 | static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL, | ||||
22356 | EVT VT) { | ||||
22357 | assert(VT.isFixedLengthVector() &&(static_cast <bool> (VT.isFixedLengthVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && "Expected legal fixed length vector!" ) ? void (0) : __assert_fail ("VT.isFixedLengthVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && \"Expected legal fixed length vector!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22359, __extension__ __PRETTY_FUNCTION__)) | ||||
22358 | DAG.getTargetLoweringInfo().isTypeLegal(VT) &&(static_cast <bool> (VT.isFixedLengthVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && "Expected legal fixed length vector!" ) ? void (0) : __assert_fail ("VT.isFixedLengthVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && \"Expected legal fixed length vector!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22359, __extension__ __PRETTY_FUNCTION__)) | ||||
22359 | "Expected legal fixed length vector!")(static_cast <bool> (VT.isFixedLengthVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && "Expected legal fixed length vector!" ) ? void (0) : __assert_fail ("VT.isFixedLengthVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && \"Expected legal fixed length vector!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22359, __extension__ __PRETTY_FUNCTION__)); | ||||
22360 | |||||
22361 | std::optional<unsigned> PgPattern = | ||||
22362 | getSVEPredPatternFromNumElements(VT.getVectorNumElements()); | ||||
22363 | assert(PgPattern && "Unexpected element count for SVE predicate")(static_cast <bool> (PgPattern && "Unexpected element count for SVE predicate" ) ? void (0) : __assert_fail ("PgPattern && \"Unexpected element count for SVE predicate\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22363, __extension__ __PRETTY_FUNCTION__)); | ||||
22364 | |||||
22365 | // For vectors that are exactly getMaxSVEVectorSizeInBits big, we can use | ||||
22366 | // AArch64SVEPredPattern::all, which can enable the use of unpredicated | ||||
22367 | // variants of instructions when available. | ||||
22368 | const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>(); | ||||
22369 | unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits(); | ||||
22370 | unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits(); | ||||
22371 | if (MaxSVESize && MinSVESize == MaxSVESize && | ||||
22372 | MaxSVESize == VT.getSizeInBits()) | ||||
22373 | PgPattern = AArch64SVEPredPattern::all; | ||||
22374 | |||||
22375 | MVT MaskVT; | ||||
22376 | switch (VT.getVectorElementType().getSimpleVT().SimpleTy) { | ||||
22377 | default: | ||||
22378 | llvm_unreachable("unexpected element type for SVE predicate")::llvm::llvm_unreachable_internal("unexpected element type for SVE predicate" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22378); | ||||
22379 | case MVT::i8: | ||||
22380 | MaskVT = MVT::nxv16i1; | ||||
22381 | break; | ||||
22382 | case MVT::i16: | ||||
22383 | case MVT::f16: | ||||
22384 | MaskVT = MVT::nxv8i1; | ||||
22385 | break; | ||||
22386 | case MVT::i32: | ||||
22387 | case MVT::f32: | ||||
22388 | MaskVT = MVT::nxv4i1; | ||||
22389 | break; | ||||
22390 | case MVT::i64: | ||||
22391 | case MVT::f64: | ||||
22392 | MaskVT = MVT::nxv2i1; | ||||
22393 | break; | ||||
22394 | } | ||||
22395 | |||||
22396 | return getPTrue(DAG, DL, MaskVT, *PgPattern); | ||||
22397 | } | ||||
22398 | |||||
22399 | static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL, | ||||
22400 | EVT VT) { | ||||
22401 | assert(VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&(static_cast <bool> (VT.isScalableVector() && DAG .getTargetLoweringInfo().isTypeLegal(VT) && "Expected legal scalable vector!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && \"Expected legal scalable vector!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22402, __extension__ __PRETTY_FUNCTION__)) | ||||
22402 | "Expected legal scalable vector!")(static_cast <bool> (VT.isScalableVector() && DAG .getTargetLoweringInfo().isTypeLegal(VT) && "Expected legal scalable vector!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && \"Expected legal scalable vector!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22402, __extension__ __PRETTY_FUNCTION__)); | ||||
22403 | auto PredTy = VT.changeVectorElementType(MVT::i1); | ||||
22404 | return getPTrue(DAG, DL, PredTy, AArch64SVEPredPattern::all); | ||||
22405 | } | ||||
22406 | |||||
22407 | static SDValue getPredicateForVector(SelectionDAG &DAG, SDLoc &DL, EVT VT) { | ||||
22408 | if (VT.isFixedLengthVector()) | ||||
22409 | return getPredicateForFixedLengthVector(DAG, DL, VT); | ||||
22410 | |||||
22411 | return getPredicateForScalableVector(DAG, DL, VT); | ||||
22412 | } | ||||
22413 | |||||
22414 | // Grow V to consume an entire SVE register. | ||||
22415 | static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) { | ||||
22416 | assert(VT.isScalableVector() &&(static_cast <bool> (VT.isScalableVector() && "Expected to convert into a scalable vector!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && \"Expected to convert into a scalable vector!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22417, __extension__ __PRETTY_FUNCTION__)) | ||||
22417 | "Expected to convert into a scalable vector!")(static_cast <bool> (VT.isScalableVector() && "Expected to convert into a scalable vector!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && \"Expected to convert into a scalable vector!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22417, __extension__ __PRETTY_FUNCTION__)); | ||||
22418 | assert(V.getValueType().isFixedLengthVector() &&(static_cast <bool> (V.getValueType().isFixedLengthVector () && "Expected a fixed length vector operand!") ? void (0) : __assert_fail ("V.getValueType().isFixedLengthVector() && \"Expected a fixed length vector operand!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22419, __extension__ __PRETTY_FUNCTION__)) | ||||
22419 | "Expected a fixed length vector operand!")(static_cast <bool> (V.getValueType().isFixedLengthVector () && "Expected a fixed length vector operand!") ? void (0) : __assert_fail ("V.getValueType().isFixedLengthVector() && \"Expected a fixed length vector operand!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22419, __extension__ __PRETTY_FUNCTION__)); | ||||
22420 | SDLoc DL(V); | ||||
22421 | SDValue Zero = DAG.getConstant(0, DL, MVT::i64); | ||||
22422 | return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero); | ||||
22423 | } | ||||
22424 | |||||
22425 | // Shrink V so it's just big enough to maintain a VT's worth of data. | ||||
22426 | static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) { | ||||
22427 | assert(VT.isFixedLengthVector() &&(static_cast <bool> (VT.isFixedLengthVector() && "Expected to convert into a fixed length vector!") ? void (0 ) : __assert_fail ("VT.isFixedLengthVector() && \"Expected to convert into a fixed length vector!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22428, __extension__ __PRETTY_FUNCTION__)) | ||||
22428 | "Expected to convert into a fixed length vector!")(static_cast <bool> (VT.isFixedLengthVector() && "Expected to convert into a fixed length vector!") ? void (0 ) : __assert_fail ("VT.isFixedLengthVector() && \"Expected to convert into a fixed length vector!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22428, __extension__ __PRETTY_FUNCTION__)); | ||||
22429 | assert(V.getValueType().isScalableVector() &&(static_cast <bool> (V.getValueType().isScalableVector( ) && "Expected a scalable vector operand!") ? void (0 ) : __assert_fail ("V.getValueType().isScalableVector() && \"Expected a scalable vector operand!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22430, __extension__ __PRETTY_FUNCTION__)) | ||||
22430 | "Expected a scalable vector operand!")(static_cast <bool> (V.getValueType().isScalableVector( ) && "Expected a scalable vector operand!") ? void (0 ) : __assert_fail ("V.getValueType().isScalableVector() && \"Expected a scalable vector operand!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22430, __extension__ __PRETTY_FUNCTION__)); | ||||
22431 | SDLoc DL(V); | ||||
22432 | SDValue Zero = DAG.getConstant(0, DL, MVT::i64); | ||||
22433 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero); | ||||
22434 | } | ||||
22435 | |||||
22436 | // Convert all fixed length vector loads larger than NEON to masked_loads. | ||||
22437 | SDValue AArch64TargetLowering::LowerFixedLengthVectorLoadToSVE( | ||||
22438 | SDValue Op, SelectionDAG &DAG) const { | ||||
22439 | auto Load = cast<LoadSDNode>(Op); | ||||
22440 | |||||
22441 | SDLoc DL(Op); | ||||
22442 | EVT VT = Op.getValueType(); | ||||
22443 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); | ||||
22444 | EVT LoadVT = ContainerVT; | ||||
22445 | EVT MemVT = Load->getMemoryVT(); | ||||
22446 | |||||
22447 | auto Pg = getPredicateForFixedLengthVector(DAG, DL, VT); | ||||
22448 | |||||
22449 | if (VT.isFloatingPoint()) { | ||||
22450 | LoadVT = ContainerVT.changeTypeToInteger(); | ||||
22451 | MemVT = MemVT.changeTypeToInteger(); | ||||
22452 | } | ||||
22453 | |||||
22454 | SDValue NewLoad = DAG.getMaskedLoad( | ||||
22455 | LoadVT, DL, Load->getChain(), Load->getBasePtr(), Load->getOffset(), Pg, | ||||
22456 | DAG.getUNDEF(LoadVT), MemVT, Load->getMemOperand(), | ||||
22457 | Load->getAddressingMode(), Load->getExtensionType()); | ||||
22458 | |||||
22459 | SDValue Result = NewLoad; | ||||
22460 | if (VT.isFloatingPoint() && Load->getExtensionType() == ISD::EXTLOAD) { | ||||
22461 | EVT ExtendVT = ContainerVT.changeVectorElementType( | ||||
22462 | Load->getMemoryVT().getVectorElementType()); | ||||
22463 | |||||
22464 | Result = getSVESafeBitCast(ExtendVT, Result, DAG); | ||||
22465 | Result = DAG.getNode(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU, DL, ContainerVT, | ||||
22466 | Pg, Result, DAG.getUNDEF(ContainerVT)); | ||||
22467 | } else if (VT.isFloatingPoint()) { | ||||
22468 | Result = DAG.getNode(ISD::BITCAST, DL, ContainerVT, Result); | ||||
22469 | } | ||||
22470 | |||||
22471 | Result = convertFromScalableVector(DAG, VT, Result); | ||||
22472 | SDValue MergedValues[2] = {Result, NewLoad.getValue(1)}; | ||||
22473 | return DAG.getMergeValues(MergedValues, DL); | ||||
22474 | } | ||||
22475 | |||||
22476 | static SDValue convertFixedMaskToScalableVector(SDValue Mask, | ||||
22477 | SelectionDAG &DAG) { | ||||
22478 | SDLoc DL(Mask); | ||||
22479 | EVT InVT = Mask.getValueType(); | ||||
22480 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT); | ||||
22481 | |||||
22482 | auto Pg = getPredicateForFixedLengthVector(DAG, DL, InVT); | ||||
22483 | |||||
22484 | if (ISD::isBuildVectorAllOnes(Mask.getNode())) | ||||
22485 | return Pg; | ||||
22486 | |||||
22487 | auto Op1 = convertToScalableVector(DAG, ContainerVT, Mask); | ||||
22488 | auto Op2 = DAG.getConstant(0, DL, ContainerVT); | ||||
22489 | |||||
22490 | return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, Pg.getValueType(), | ||||
22491 | {Pg, Op1, Op2, DAG.getCondCode(ISD::SETNE)}); | ||||
22492 | } | ||||
22493 | |||||
22494 | // Convert all fixed length vector loads larger than NEON to masked_loads. | ||||
22495 | SDValue AArch64TargetLowering::LowerFixedLengthVectorMLoadToSVE( | ||||
22496 | SDValue Op, SelectionDAG &DAG) const { | ||||
22497 | auto Load = cast<MaskedLoadSDNode>(Op); | ||||
22498 | |||||
22499 | SDLoc DL(Op); | ||||
22500 | EVT VT = Op.getValueType(); | ||||
22501 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); | ||||
22502 | |||||
22503 | SDValue Mask = convertFixedMaskToScalableVector(Load->getMask(), DAG); | ||||
22504 | |||||
22505 | SDValue PassThru; | ||||
22506 | bool IsPassThruZeroOrUndef = false; | ||||
22507 | |||||
22508 | if (Load->getPassThru()->isUndef()) { | ||||
22509 | PassThru = DAG.getUNDEF(ContainerVT); | ||||
22510 | IsPassThruZeroOrUndef = true; | ||||
22511 | } else { | ||||
22512 | if (ContainerVT.isInteger()) | ||||
22513 | PassThru = DAG.getConstant(0, DL, ContainerVT); | ||||
22514 | else | ||||
22515 | PassThru = DAG.getConstantFP(0, DL, ContainerVT); | ||||
22516 | if (isZerosVector(Load->getPassThru().getNode())) | ||||
22517 | IsPassThruZeroOrUndef = true; | ||||
22518 | } | ||||
22519 | |||||
22520 | SDValue NewLoad = DAG.getMaskedLoad( | ||||
22521 | ContainerVT, DL, Load->getChain(), Load->getBasePtr(), Load->getOffset(), | ||||
22522 | Mask, PassThru, Load->getMemoryVT(), Load->getMemOperand(), | ||||
22523 | Load->getAddressingMode(), Load->getExtensionType()); | ||||
22524 | |||||
22525 | SDValue Result = NewLoad; | ||||
22526 | if (!IsPassThruZeroOrUndef) { | ||||
22527 | SDValue OldPassThru = | ||||
22528 | convertToScalableVector(DAG, ContainerVT, Load->getPassThru()); | ||||
22529 | Result = DAG.getSelect(DL, ContainerVT, Mask, Result, OldPassThru); | ||||
22530 | } | ||||
22531 | |||||
22532 | Result = convertFromScalableVector(DAG, VT, Result); | ||||
22533 | SDValue MergedValues[2] = {Result, NewLoad.getValue(1)}; | ||||
22534 | return DAG.getMergeValues(MergedValues, DL); | ||||
22535 | } | ||||
22536 | |||||
22537 | // Convert all fixed length vector stores larger than NEON to masked_stores. | ||||
22538 | SDValue AArch64TargetLowering::LowerFixedLengthVectorStoreToSVE( | ||||
22539 | SDValue Op, SelectionDAG &DAG) const { | ||||
22540 | auto Store = cast<StoreSDNode>(Op); | ||||
22541 | |||||
22542 | SDLoc DL(Op); | ||||
22543 | EVT VT = Store->getValue().getValueType(); | ||||
22544 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); | ||||
22545 | EVT MemVT = Store->getMemoryVT(); | ||||
22546 | |||||
22547 | auto Pg = getPredicateForFixedLengthVector(DAG, DL, VT); | ||||
22548 | auto NewValue = convertToScalableVector(DAG, ContainerVT, Store->getValue()); | ||||
22549 | |||||
22550 | if (VT.isFloatingPoint() && Store->isTruncatingStore()) { | ||||
22551 | EVT TruncVT = ContainerVT.changeVectorElementType( | ||||
22552 | Store->getMemoryVT().getVectorElementType()); | ||||
22553 | MemVT = MemVT.changeTypeToInteger(); | ||||
22554 | NewValue = DAG.getNode(AArch64ISD::FP_ROUND_MERGE_PASSTHRU, DL, TruncVT, Pg, | ||||
22555 | NewValue, DAG.getTargetConstant(0, DL, MVT::i64), | ||||
22556 | DAG.getUNDEF(TruncVT)); | ||||
22557 | NewValue = | ||||
22558 | getSVESafeBitCast(ContainerVT.changeTypeToInteger(), NewValue, DAG); | ||||
22559 | } else if (VT.isFloatingPoint()) { | ||||
22560 | MemVT = MemVT.changeTypeToInteger(); | ||||
22561 | NewValue = | ||||
22562 | getSVESafeBitCast(ContainerVT.changeTypeToInteger(), NewValue, DAG); | ||||
22563 | } | ||||
22564 | |||||
22565 | return DAG.getMaskedStore(Store->getChain(), DL, NewValue, | ||||
22566 | Store->getBasePtr(), Store->getOffset(), Pg, MemVT, | ||||
22567 | Store->getMemOperand(), Store->getAddressingMode(), | ||||
22568 | Store->isTruncatingStore()); | ||||
22569 | } | ||||
22570 | |||||
22571 | SDValue AArch64TargetLowering::LowerFixedLengthVectorMStoreToSVE( | ||||
22572 | SDValue Op, SelectionDAG &DAG) const { | ||||
22573 | auto *Store = cast<MaskedStoreSDNode>(Op); | ||||
22574 | |||||
22575 | SDLoc DL(Op); | ||||
22576 | EVT VT = Store->getValue().getValueType(); | ||||
22577 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); | ||||
22578 | |||||
22579 | auto NewValue = convertToScalableVector(DAG, ContainerVT, Store->getValue()); | ||||
22580 | SDValue Mask = convertFixedMaskToScalableVector(Store->getMask(), DAG); | ||||
22581 | |||||
22582 | return DAG.getMaskedStore( | ||||
22583 | Store->getChain(), DL, NewValue, Store->getBasePtr(), Store->getOffset(), | ||||
22584 | Mask, Store->getMemoryVT(), Store->getMemOperand(), | ||||
22585 | Store->getAddressingMode(), Store->isTruncatingStore()); | ||||
22586 | } | ||||
22587 | |||||
22588 | SDValue AArch64TargetLowering::LowerFixedLengthVectorIntDivideToSVE( | ||||
22589 | SDValue Op, SelectionDAG &DAG) const { | ||||
22590 | SDLoc dl(Op); | ||||
22591 | EVT VT = Op.getValueType(); | ||||
22592 | EVT EltVT = VT.getVectorElementType(); | ||||
22593 | |||||
22594 | bool Signed = Op.getOpcode() == ISD::SDIV; | ||||
22595 | unsigned PredOpcode = Signed ? AArch64ISD::SDIV_PRED : AArch64ISD::UDIV_PRED; | ||||
22596 | |||||
22597 | bool Negated; | ||||
22598 | uint64_t SplatVal; | ||||
22599 | if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated)) { | ||||
22600 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); | ||||
22601 | SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0)); | ||||
22602 | SDValue Op2 = DAG.getTargetConstant(Log2_64(SplatVal), dl, MVT::i32); | ||||
22603 | |||||
22604 | SDValue Pg = getPredicateForFixedLengthVector(DAG, dl, VT); | ||||
22605 | SDValue Res = DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, dl, ContainerVT, Pg, Op1, Op2); | ||||
22606 | if (Negated) | ||||
22607 | Res = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), Res); | ||||
22608 | |||||
22609 | return convertFromScalableVector(DAG, VT, Res); | ||||
22610 | } | ||||
22611 | |||||
22612 | // Scalable vector i32/i64 DIV is supported. | ||||
22613 | if (EltVT == MVT::i32 || EltVT == MVT::i64) | ||||
22614 | return LowerToPredicatedOp(Op, DAG, PredOpcode); | ||||
22615 | |||||
22616 | // Scalable vector i8/i16 DIV is not supported. Promote it to i32. | ||||
22617 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); | ||||
22618 | EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); | ||||
22619 | EVT FixedWidenedVT = HalfVT.widenIntegerVectorElementType(*DAG.getContext()); | ||||
22620 | EVT ScalableWidenedVT = getContainerForFixedLengthVector(DAG, FixedWidenedVT); | ||||
22621 | |||||
22622 | // If this is not a full vector, extend, div, and truncate it. | ||||
22623 | EVT WidenedVT = VT.widenIntegerVectorElementType(*DAG.getContext()); | ||||
22624 | if (DAG.getTargetLoweringInfo().isTypeLegal(WidenedVT)) { | ||||
22625 | unsigned ExtendOpcode = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; | ||||
22626 | SDValue Op0 = DAG.getNode(ExtendOpcode, dl, WidenedVT, Op.getOperand(0)); | ||||
22627 | SDValue Op1 = DAG.getNode(ExtendOpcode, dl, WidenedVT, Op.getOperand(1)); | ||||
22628 | SDValue Div = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0, Op1); | ||||
22629 | return DAG.getNode(ISD::TRUNCATE, dl, VT, Div); | ||||
22630 | } | ||||
22631 | |||||
22632 | // Convert the operands to scalable vectors. | ||||
22633 | SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0)); | ||||
22634 | SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(1)); | ||||
22635 | |||||
22636 | // Extend the scalable operands. | ||||
22637 | unsigned UnpkLo = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO; | ||||
22638 | unsigned UnpkHi = Signed ? AArch64ISD::SUNPKHI : AArch64ISD::UUNPKHI; | ||||
22639 | SDValue Op0Lo = DAG.getNode(UnpkLo, dl, ScalableWidenedVT, Op0); | ||||
22640 | SDValue Op1Lo = DAG.getNode(UnpkLo, dl, ScalableWidenedVT, Op1); | ||||
22641 | SDValue Op0Hi = DAG.getNode(UnpkHi, dl, ScalableWidenedVT, Op0); | ||||
22642 | SDValue Op1Hi = DAG.getNode(UnpkHi, dl, ScalableWidenedVT, Op1); | ||||
22643 | |||||
22644 | // Convert back to fixed vectors so the DIV can be further lowered. | ||||
22645 | Op0Lo = convertFromScalableVector(DAG, FixedWidenedVT, Op0Lo); | ||||
22646 | Op1Lo = convertFromScalableVector(DAG, FixedWidenedVT, Op1Lo); | ||||
22647 | Op0Hi = convertFromScalableVector(DAG, FixedWidenedVT, Op0Hi); | ||||
22648 | Op1Hi = convertFromScalableVector(DAG, FixedWidenedVT, Op1Hi); | ||||
22649 | SDValue ResultLo = DAG.getNode(Op.getOpcode(), dl, FixedWidenedVT, | ||||
22650 | Op0Lo, Op1Lo); | ||||
22651 | SDValue ResultHi = DAG.getNode(Op.getOpcode(), dl, FixedWidenedVT, | ||||
22652 | Op0Hi, Op1Hi); | ||||
22653 | |||||
22654 | // Convert again to scalable vectors to truncate. | ||||
22655 | ResultLo = convertToScalableVector(DAG, ScalableWidenedVT, ResultLo); | ||||
22656 | ResultHi = convertToScalableVector(DAG, ScalableWidenedVT, ResultHi); | ||||
22657 | SDValue ScalableResult = DAG.getNode(AArch64ISD::UZP1, dl, ContainerVT, | ||||
22658 | ResultLo, ResultHi); | ||||
22659 | |||||
22660 | return convertFromScalableVector(DAG, VT, ScalableResult); | ||||
22661 | } | ||||
22662 | |||||
22663 | SDValue AArch64TargetLowering::LowerFixedLengthVectorIntExtendToSVE( | ||||
22664 | SDValue Op, SelectionDAG &DAG) const { | ||||
22665 | EVT VT = Op.getValueType(); | ||||
22666 | assert(VT.isFixedLengthVector() && "Expected fixed length vector type!")(static_cast <bool> (VT.isFixedLengthVector() && "Expected fixed length vector type!") ? void (0) : __assert_fail ("VT.isFixedLengthVector() && \"Expected fixed length vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22666, __extension__ __PRETTY_FUNCTION__)); | ||||
22667 | |||||
22668 | SDLoc DL(Op); | ||||
22669 | SDValue Val = Op.getOperand(0); | ||||
22670 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, Val.getValueType()); | ||||
22671 | Val = convertToScalableVector(DAG, ContainerVT, Val); | ||||
22672 | |||||
22673 | bool Signed = Op.getOpcode() == ISD::SIGN_EXTEND; | ||||
22674 | unsigned ExtendOpc = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO; | ||||
22675 | |||||
22676 | // Repeatedly unpack Val until the result is of the desired element type. | ||||
22677 | switch (ContainerVT.getSimpleVT().SimpleTy) { | ||||
22678 | default: | ||||
22679 | llvm_unreachable("unimplemented container type")::llvm::llvm_unreachable_internal("unimplemented container type" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22679); | ||||
22680 | case MVT::nxv16i8: | ||||
22681 | Val = DAG.getNode(ExtendOpc, DL, MVT::nxv8i16, Val); | ||||
22682 | if (VT.getVectorElementType() == MVT::i16) | ||||
22683 | break; | ||||
22684 | [[fallthrough]]; | ||||
22685 | case MVT::nxv8i16: | ||||
22686 | Val = DAG.getNode(ExtendOpc, DL, MVT::nxv4i32, Val); | ||||
22687 | if (VT.getVectorElementType() == MVT::i32) | ||||
22688 | break; | ||||
22689 | [[fallthrough]]; | ||||
22690 | case MVT::nxv4i32: | ||||
22691 | Val = DAG.getNode(ExtendOpc, DL, MVT::nxv2i64, Val); | ||||
22692 | assert(VT.getVectorElementType() == MVT::i64 && "Unexpected element type!")(static_cast <bool> (VT.getVectorElementType() == MVT:: i64 && "Unexpected element type!") ? void (0) : __assert_fail ("VT.getVectorElementType() == MVT::i64 && \"Unexpected element type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22692, __extension__ __PRETTY_FUNCTION__)); | ||||
22693 | break; | ||||
22694 | } | ||||
22695 | |||||
22696 | return convertFromScalableVector(DAG, VT, Val); | ||||
22697 | } | ||||
22698 | |||||
22699 | SDValue AArch64TargetLowering::LowerFixedLengthVectorTruncateToSVE( | ||||
22700 | SDValue Op, SelectionDAG &DAG) const { | ||||
22701 | EVT VT = Op.getValueType(); | ||||
22702 | assert(VT.isFixedLengthVector() && "Expected fixed length vector type!")(static_cast <bool> (VT.isFixedLengthVector() && "Expected fixed length vector type!") ? void (0) : __assert_fail ("VT.isFixedLengthVector() && \"Expected fixed length vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22702, __extension__ __PRETTY_FUNCTION__)); | ||||
22703 | |||||
22704 | SDLoc DL(Op); | ||||
22705 | SDValue Val = Op.getOperand(0); | ||||
22706 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, Val.getValueType()); | ||||
22707 | Val = convertToScalableVector(DAG, ContainerVT, Val); | ||||
22708 | |||||
22709 | // Repeatedly truncate Val until the result is of the desired element type. | ||||
22710 | switch (ContainerVT.getSimpleVT().SimpleTy) { | ||||
22711 | default: | ||||
22712 | llvm_unreachable("unimplemented container type")::llvm::llvm_unreachable_internal("unimplemented container type" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22712); | ||||
22713 | case MVT::nxv2i64: | ||||
22714 | Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv4i32, Val); | ||||
22715 | Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv4i32, Val, Val); | ||||
22716 | if (VT.getVectorElementType() == MVT::i32) | ||||
22717 | break; | ||||
22718 | [[fallthrough]]; | ||||
22719 | case MVT::nxv4i32: | ||||
22720 | Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv8i16, Val); | ||||
22721 | Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv8i16, Val, Val); | ||||
22722 | if (VT.getVectorElementType() == MVT::i16) | ||||
22723 | break; | ||||
22724 | [[fallthrough]]; | ||||
22725 | case MVT::nxv8i16: | ||||
22726 | Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv16i8, Val); | ||||
22727 | Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv16i8, Val, Val); | ||||
22728 | assert(VT.getVectorElementType() == MVT::i8 && "Unexpected element type!")(static_cast <bool> (VT.getVectorElementType() == MVT:: i8 && "Unexpected element type!") ? void (0) : __assert_fail ("VT.getVectorElementType() == MVT::i8 && \"Unexpected element type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22728, __extension__ __PRETTY_FUNCTION__)); | ||||
22729 | break; | ||||
22730 | } | ||||
22731 | |||||
22732 | return convertFromScalableVector(DAG, VT, Val); | ||||
22733 | } | ||||
22734 | |||||
22735 | SDValue AArch64TargetLowering::LowerFixedLengthExtractVectorElt( | ||||
22736 | SDValue Op, SelectionDAG &DAG) const { | ||||
22737 | EVT VT = Op.getValueType(); | ||||
22738 | EVT InVT = Op.getOperand(0).getValueType(); | ||||
22739 | assert(InVT.isFixedLengthVector() && "Expected fixed length vector type!")(static_cast <bool> (InVT.isFixedLengthVector() && "Expected fixed length vector type!") ? void (0) : __assert_fail ("InVT.isFixedLengthVector() && \"Expected fixed length vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22739, __extension__ __PRETTY_FUNCTION__)); | ||||
22740 | |||||
22741 | SDLoc DL(Op); | ||||
22742 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT); | ||||
22743 | SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(0)); | ||||
22744 | |||||
22745 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op0, Op.getOperand(1)); | ||||
22746 | } | ||||
22747 | |||||
22748 | SDValue AArch64TargetLowering::LowerFixedLengthInsertVectorElt( | ||||
22749 | SDValue Op, SelectionDAG &DAG) const { | ||||
22750 | EVT VT = Op.getValueType(); | ||||
22751 | assert(VT.isFixedLengthVector() && "Expected fixed length vector type!")(static_cast <bool> (VT.isFixedLengthVector() && "Expected fixed length vector type!") ? void (0) : __assert_fail ("VT.isFixedLengthVector() && \"Expected fixed length vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22751, __extension__ __PRETTY_FUNCTION__)); | ||||
22752 | |||||
22753 | SDLoc DL(Op); | ||||
22754 | EVT InVT = Op.getOperand(0).getValueType(); | ||||
22755 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT); | ||||
22756 | SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(0)); | ||||
22757 | |||||
22758 | auto ScalableRes = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT, Op0, | ||||
22759 | Op.getOperand(1), Op.getOperand(2)); | ||||
22760 | |||||
22761 | return convertFromScalableVector(DAG, VT, ScalableRes); | ||||
22762 | } | ||||
22763 | |||||
22764 | // Convert vector operation 'Op' to an equivalent predicated operation whereby | ||||
22765 | // the original operation's type is used to construct a suitable predicate. | ||||
22766 | // NOTE: The results for inactive lanes are undefined. | ||||
22767 | SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op, | ||||
22768 | SelectionDAG &DAG, | ||||
22769 | unsigned NewOp) const { | ||||
22770 | EVT VT = Op.getValueType(); | ||||
22771 | SDLoc DL(Op); | ||||
22772 | auto Pg = getPredicateForVector(DAG, DL, VT); | ||||
22773 | |||||
22774 | if (VT.isFixedLengthVector()) { | ||||
22775 | assert(isTypeLegal(VT) && "Expected only legal fixed-width types")(static_cast <bool> (isTypeLegal(VT) && "Expected only legal fixed-width types" ) ? void (0) : __assert_fail ("isTypeLegal(VT) && \"Expected only legal fixed-width types\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22775, __extension__ __PRETTY_FUNCTION__)); | ||||
22776 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); | ||||
22777 | |||||
22778 | // Create list of operands by converting existing ones to scalable types. | ||||
22779 | SmallVector<SDValue, 4> Operands = {Pg}; | ||||
22780 | for (const SDValue &V : Op->op_values()) { | ||||
22781 | if (isa<CondCodeSDNode>(V)) { | ||||
22782 | Operands.push_back(V); | ||||
22783 | continue; | ||||
22784 | } | ||||
22785 | |||||
22786 | if (const VTSDNode *VTNode = dyn_cast<VTSDNode>(V)) { | ||||
22787 | EVT VTArg = VTNode->getVT().getVectorElementType(); | ||||
22788 | EVT NewVTArg = ContainerVT.changeVectorElementType(VTArg); | ||||
22789 | Operands.push_back(DAG.getValueType(NewVTArg)); | ||||
22790 | continue; | ||||
22791 | } | ||||
22792 | |||||
22793 | assert(isTypeLegal(V.getValueType()) &&(static_cast <bool> (isTypeLegal(V.getValueType()) && "Expected only legal fixed-width types") ? void (0) : __assert_fail ("isTypeLegal(V.getValueType()) && \"Expected only legal fixed-width types\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22794, __extension__ __PRETTY_FUNCTION__)) | ||||
22794 | "Expected only legal fixed-width types")(static_cast <bool> (isTypeLegal(V.getValueType()) && "Expected only legal fixed-width types") ? void (0) : __assert_fail ("isTypeLegal(V.getValueType()) && \"Expected only legal fixed-width types\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22794, __extension__ __PRETTY_FUNCTION__)); | ||||
22795 | Operands.push_back(convertToScalableVector(DAG, ContainerVT, V)); | ||||
22796 | } | ||||
22797 | |||||
22798 | if (isMergePassthruOpcode(NewOp)) | ||||
22799 | Operands.push_back(DAG.getUNDEF(ContainerVT)); | ||||
22800 | |||||
22801 | auto ScalableRes = DAG.getNode(NewOp, DL, ContainerVT, Operands); | ||||
22802 | return convertFromScalableVector(DAG, VT, ScalableRes); | ||||
22803 | } | ||||
22804 | |||||
22805 | assert(VT.isScalableVector() && "Only expect to lower scalable vector op!")(static_cast <bool> (VT.isScalableVector() && "Only expect to lower scalable vector op!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && \"Only expect to lower scalable vector op!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22805, __extension__ __PRETTY_FUNCTION__)); | ||||
22806 | |||||
22807 | SmallVector<SDValue, 4> Operands = {Pg}; | ||||
22808 | for (const SDValue &V : Op->op_values()) { | ||||
22809 | assert((!V.getValueType().isVector() ||(static_cast <bool> ((!V.getValueType().isVector() || V .getValueType().isScalableVector()) && "Only scalable vectors are supported!" ) ? void (0) : __assert_fail ("(!V.getValueType().isVector() || V.getValueType().isScalableVector()) && \"Only scalable vectors are supported!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22811, __extension__ __PRETTY_FUNCTION__)) | ||||
22810 | V.getValueType().isScalableVector()) &&(static_cast <bool> ((!V.getValueType().isVector() || V .getValueType().isScalableVector()) && "Only scalable vectors are supported!" ) ? void (0) : __assert_fail ("(!V.getValueType().isVector() || V.getValueType().isScalableVector()) && \"Only scalable vectors are supported!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22811, __extension__ __PRETTY_FUNCTION__)) | ||||
22811 | "Only scalable vectors are supported!")(static_cast <bool> ((!V.getValueType().isVector() || V .getValueType().isScalableVector()) && "Only scalable vectors are supported!" ) ? void (0) : __assert_fail ("(!V.getValueType().isVector() || V.getValueType().isScalableVector()) && \"Only scalable vectors are supported!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22811, __extension__ __PRETTY_FUNCTION__)); | ||||
22812 | Operands.push_back(V); | ||||
22813 | } | ||||
22814 | |||||
22815 | if (isMergePassthruOpcode(NewOp)) | ||||
22816 | Operands.push_back(DAG.getUNDEF(VT)); | ||||
22817 | |||||
22818 | return DAG.getNode(NewOp, DL, VT, Operands, Op->getFlags()); | ||||
22819 | } | ||||
22820 | |||||
22821 | // If a fixed length vector operation has no side effects when applied to | ||||
22822 | // undefined elements, we can safely use scalable vectors to perform the same | ||||
22823 | // operation without needing to worry about predication. | ||||
22824 | SDValue AArch64TargetLowering::LowerToScalableOp(SDValue Op, | ||||
22825 | SelectionDAG &DAG) const { | ||||
22826 | EVT VT = Op.getValueType(); | ||||
22827 | assert(VT.isFixedLengthVector() && isTypeLegal(VT) &&(static_cast <bool> (VT.isFixedLengthVector() && isTypeLegal(VT) && "Only expected to lower fixed length vector operation!" ) ? void (0) : __assert_fail ("VT.isFixedLengthVector() && isTypeLegal(VT) && \"Only expected to lower fixed length vector operation!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22828, __extension__ __PRETTY_FUNCTION__)) | ||||
22828 | "Only expected to lower fixed length vector operation!")(static_cast <bool> (VT.isFixedLengthVector() && isTypeLegal(VT) && "Only expected to lower fixed length vector operation!" ) ? void (0) : __assert_fail ("VT.isFixedLengthVector() && isTypeLegal(VT) && \"Only expected to lower fixed length vector operation!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22828, __extension__ __PRETTY_FUNCTION__)); | ||||
22829 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); | ||||
22830 | |||||
22831 | // Create list of operands by converting existing ones to scalable types. | ||||
22832 | SmallVector<SDValue, 4> Ops; | ||||
22833 | for (const SDValue &V : Op->op_values()) { | ||||
22834 | assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!")(static_cast <bool> (!isa<VTSDNode>(V) && "Unexpected VTSDNode node!") ? void (0) : __assert_fail ("!isa<VTSDNode>(V) && \"Unexpected VTSDNode node!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22834, __extension__ __PRETTY_FUNCTION__)); | ||||
22835 | |||||
22836 | // Pass through non-vector operands. | ||||
22837 | if (!V.getValueType().isVector()) { | ||||
22838 | Ops.push_back(V); | ||||
22839 | continue; | ||||
22840 | } | ||||
22841 | |||||
22842 | // "cast" fixed length vector to a scalable vector. | ||||
22843 | assert(V.getValueType().isFixedLengthVector() &&(static_cast <bool> (V.getValueType().isFixedLengthVector () && isTypeLegal(V.getValueType()) && "Only fixed length vectors are supported!" ) ? void (0) : __assert_fail ("V.getValueType().isFixedLengthVector() && isTypeLegal(V.getValueType()) && \"Only fixed length vectors are supported!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22845, __extension__ __PRETTY_FUNCTION__)) | ||||
22844 | isTypeLegal(V.getValueType()) &&(static_cast <bool> (V.getValueType().isFixedLengthVector () && isTypeLegal(V.getValueType()) && "Only fixed length vectors are supported!" ) ? void (0) : __assert_fail ("V.getValueType().isFixedLengthVector() && isTypeLegal(V.getValueType()) && \"Only fixed length vectors are supported!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22845, __extension__ __PRETTY_FUNCTION__)) | ||||
22845 | "Only fixed length vectors are supported!")(static_cast <bool> (V.getValueType().isFixedLengthVector () && isTypeLegal(V.getValueType()) && "Only fixed length vectors are supported!" ) ? void (0) : __assert_fail ("V.getValueType().isFixedLengthVector() && isTypeLegal(V.getValueType()) && \"Only fixed length vectors are supported!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22845, __extension__ __PRETTY_FUNCTION__)); | ||||
22846 | Ops.push_back(convertToScalableVector(DAG, ContainerVT, V)); | ||||
22847 | } | ||||
22848 | |||||
22849 | auto ScalableRes = DAG.getNode(Op.getOpcode(), SDLoc(Op), ContainerVT, Ops); | ||||
22850 | return convertFromScalableVector(DAG, VT, ScalableRes); | ||||
22851 | } | ||||
22852 | |||||
22853 | SDValue AArch64TargetLowering::LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp, | ||||
22854 | SelectionDAG &DAG) const { | ||||
22855 | SDLoc DL(ScalarOp); | ||||
22856 | SDValue AccOp = ScalarOp.getOperand(0); | ||||
22857 | SDValue VecOp = ScalarOp.getOperand(1); | ||||
22858 | EVT SrcVT = VecOp.getValueType(); | ||||
22859 | EVT ResVT = SrcVT.getVectorElementType(); | ||||
22860 | |||||
22861 | EVT ContainerVT = SrcVT; | ||||
22862 | if (SrcVT.isFixedLengthVector()) { | ||||
22863 | ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT); | ||||
22864 | VecOp = convertToScalableVector(DAG, ContainerVT, VecOp); | ||||
22865 | } | ||||
22866 | |||||
22867 | SDValue Pg = getPredicateForVector(DAG, DL, SrcVT); | ||||
22868 | SDValue Zero = DAG.getConstant(0, DL, MVT::i64); | ||||
22869 | |||||
22870 | // Convert operands to Scalable. | ||||
22871 | AccOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT, | ||||
22872 | DAG.getUNDEF(ContainerVT), AccOp, Zero); | ||||
22873 | |||||
22874 | // Perform reduction. | ||||
22875 | SDValue Rdx = DAG.getNode(AArch64ISD::FADDA_PRED, DL, ContainerVT, | ||||
22876 | Pg, AccOp, VecOp); | ||||
22877 | |||||
22878 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Rdx, Zero); | ||||
22879 | } | ||||
22880 | |||||
22881 | SDValue AArch64TargetLowering::LowerPredReductionToSVE(SDValue ReduceOp, | ||||
22882 | SelectionDAG &DAG) const { | ||||
22883 | SDLoc DL(ReduceOp); | ||||
22884 | SDValue Op = ReduceOp.getOperand(0); | ||||
22885 | EVT OpVT = Op.getValueType(); | ||||
22886 | EVT VT = ReduceOp.getValueType(); | ||||
22887 | |||||
22888 | if (!OpVT.isScalableVector() || OpVT.getVectorElementType() != MVT::i1) | ||||
22889 | return SDValue(); | ||||
22890 | |||||
22891 | SDValue Pg = getPredicateForVector(DAG, DL, OpVT); | ||||
22892 | |||||
22893 | switch (ReduceOp.getOpcode()) { | ||||
22894 | default: | ||||
22895 | return SDValue(); | ||||
22896 | case ISD::VECREDUCE_OR: | ||||
22897 | if (isAllActivePredicate(DAG, Pg) && OpVT == MVT::nxv16i1) | ||||
22898 | // The predicate can be 'Op' because | ||||
22899 | // vecreduce_or(Op & <all true>) <=> vecreduce_or(Op). | ||||
22900 | return getPTest(DAG, VT, Op, Op, AArch64CC::ANY_ACTIVE); | ||||
22901 | else | ||||
22902 | return getPTest(DAG, VT, Pg, Op, AArch64CC::ANY_ACTIVE); | ||||
22903 | case ISD::VECREDUCE_AND: { | ||||
22904 | Op = DAG.getNode(ISD::XOR, DL, OpVT, Op, Pg); | ||||
22905 | return getPTest(DAG, VT, Pg, Op, AArch64CC::NONE_ACTIVE); | ||||
22906 | } | ||||
22907 | case ISD::VECREDUCE_XOR: { | ||||
22908 | SDValue ID = | ||||
22909 | DAG.getTargetConstant(Intrinsic::aarch64_sve_cntp, DL, MVT::i64); | ||||
22910 | if (OpVT == MVT::nxv1i1) { | ||||
22911 | // Emulate a CNTP on .Q using .D and a different governing predicate. | ||||
22912 | Pg = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv2i1, Pg); | ||||
22913 | Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv2i1, Op); | ||||
22914 | } | ||||
22915 | SDValue Cntp = | ||||
22916 | DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64, ID, Pg, Op); | ||||
22917 | return DAG.getAnyExtOrTrunc(Cntp, DL, VT); | ||||
22918 | } | ||||
22919 | } | ||||
22920 | |||||
22921 | return SDValue(); | ||||
22922 | } | ||||
22923 | |||||
22924 | SDValue AArch64TargetLowering::LowerReductionToSVE(unsigned Opcode, | ||||
22925 | SDValue ScalarOp, | ||||
22926 | SelectionDAG &DAG) const { | ||||
22927 | SDLoc DL(ScalarOp); | ||||
22928 | SDValue VecOp = ScalarOp.getOperand(0); | ||||
22929 | EVT SrcVT = VecOp.getValueType(); | ||||
22930 | |||||
22931 | if (useSVEForFixedLengthVectorVT( | ||||
22932 | SrcVT, | ||||
22933 | /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors())) { | ||||
22934 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT); | ||||
22935 | VecOp = convertToScalableVector(DAG, ContainerVT, VecOp); | ||||
22936 | } | ||||
22937 | |||||
22938 | // UADDV always returns an i64 result. | ||||
22939 | EVT ResVT = (Opcode == AArch64ISD::UADDV_PRED) ? MVT::i64 : | ||||
22940 | SrcVT.getVectorElementType(); | ||||
22941 | EVT RdxVT = SrcVT; | ||||
22942 | if (SrcVT.isFixedLengthVector() || Opcode == AArch64ISD::UADDV_PRED) | ||||
22943 | RdxVT = getPackedSVEVectorVT(ResVT); | ||||
22944 | |||||
22945 | SDValue Pg = getPredicateForVector(DAG, DL, SrcVT); | ||||
22946 | SDValue Rdx = DAG.getNode(Opcode, DL, RdxVT, Pg, VecOp); | ||||
22947 | SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, | ||||
22948 | Rdx, DAG.getConstant(0, DL, MVT::i64)); | ||||
22949 | |||||
22950 | // The VEC_REDUCE nodes expect an element size result. | ||||
22951 | if (ResVT != ScalarOp.getValueType()) | ||||
22952 | Res = DAG.getAnyExtOrTrunc(Res, DL, ScalarOp.getValueType()); | ||||
22953 | |||||
22954 | return Res; | ||||
22955 | } | ||||
22956 | |||||
22957 | SDValue | ||||
22958 | AArch64TargetLowering::LowerFixedLengthVectorSelectToSVE(SDValue Op, | ||||
22959 | SelectionDAG &DAG) const { | ||||
22960 | EVT VT = Op.getValueType(); | ||||
22961 | SDLoc DL(Op); | ||||
22962 | |||||
22963 | EVT InVT = Op.getOperand(1).getValueType(); | ||||
22964 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT); | ||||
22965 | SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(1)); | ||||
22966 | SDValue Op2 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(2)); | ||||
22967 | |||||
22968 | // Convert the mask to a predicated (NOTE: We don't need to worry about | ||||
22969 | // inactive lanes since VSELECT is safe when given undefined elements). | ||||
22970 | EVT MaskVT = Op.getOperand(0).getValueType(); | ||||
22971 | EVT MaskContainerVT = getContainerForFixedLengthVector(DAG, MaskVT); | ||||
22972 | auto Mask = convertToScalableVector(DAG, MaskContainerVT, Op.getOperand(0)); | ||||
22973 | Mask = DAG.getNode(ISD::TRUNCATE, DL, | ||||
22974 | MaskContainerVT.changeVectorElementType(MVT::i1), Mask); | ||||
22975 | |||||
22976 | auto ScalableRes = DAG.getNode(ISD::VSELECT, DL, ContainerVT, | ||||
22977 | Mask, Op1, Op2); | ||||
22978 | |||||
22979 | return convertFromScalableVector(DAG, VT, ScalableRes); | ||||
22980 | } | ||||
22981 | |||||
22982 | SDValue AArch64TargetLowering::LowerFixedLengthVectorSetccToSVE( | ||||
22983 | SDValue Op, SelectionDAG &DAG) const { | ||||
22984 | SDLoc DL(Op); | ||||
22985 | EVT InVT = Op.getOperand(0).getValueType(); | ||||
22986 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT); | ||||
22987 | |||||
22988 | assert(InVT.isFixedLengthVector() && isTypeLegal(InVT) &&(static_cast <bool> (InVT.isFixedLengthVector() && isTypeLegal(InVT) && "Only expected to lower fixed length vector operation!" ) ? void (0) : __assert_fail ("InVT.isFixedLengthVector() && isTypeLegal(InVT) && \"Only expected to lower fixed length vector operation!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22989, __extension__ __PRETTY_FUNCTION__)) | ||||
22989 | "Only expected to lower fixed length vector operation!")(static_cast <bool> (InVT.isFixedLengthVector() && isTypeLegal(InVT) && "Only expected to lower fixed length vector operation!" ) ? void (0) : __assert_fail ("InVT.isFixedLengthVector() && isTypeLegal(InVT) && \"Only expected to lower fixed length vector operation!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22989, __extension__ __PRETTY_FUNCTION__)); | ||||
22990 | assert(Op.getValueType() == InVT.changeTypeToInteger() &&(static_cast <bool> (Op.getValueType() == InVT.changeTypeToInteger () && "Expected integer result of the same bit length as the inputs!" ) ? void (0) : __assert_fail ("Op.getValueType() == InVT.changeTypeToInteger() && \"Expected integer result of the same bit length as the inputs!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22991, __extension__ __PRETTY_FUNCTION__)) | ||||
22991 | "Expected integer result of the same bit length as the inputs!")(static_cast <bool> (Op.getValueType() == InVT.changeTypeToInteger () && "Expected integer result of the same bit length as the inputs!" ) ? void (0) : __assert_fail ("Op.getValueType() == InVT.changeTypeToInteger() && \"Expected integer result of the same bit length as the inputs!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 22991, __extension__ __PRETTY_FUNCTION__)); | ||||
22992 | |||||
22993 | auto Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0)); | ||||
22994 | auto Op2 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(1)); | ||||
22995 | auto Pg = getPredicateForFixedLengthVector(DAG, DL, InVT); | ||||
22996 | |||||
22997 | EVT CmpVT = Pg.getValueType(); | ||||
22998 | auto Cmp = DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, CmpVT, | ||||
22999 | {Pg, Op1, Op2, Op.getOperand(2)}); | ||||
23000 | |||||
23001 | EVT PromoteVT = ContainerVT.changeTypeToInteger(); | ||||
23002 | auto Promote = DAG.getBoolExtOrTrunc(Cmp, DL, PromoteVT, InVT); | ||||
23003 | return convertFromScalableVector(DAG, Op.getValueType(), Promote); | ||||
23004 | } | ||||
23005 | |||||
23006 | SDValue | ||||
23007 | AArch64TargetLowering::LowerFixedLengthBitcastToSVE(SDValue Op, | ||||
23008 | SelectionDAG &DAG) const { | ||||
23009 | SDLoc DL(Op); | ||||
23010 | auto SrcOp = Op.getOperand(0); | ||||
23011 | EVT VT = Op.getValueType(); | ||||
23012 | EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT); | ||||
23013 | EVT ContainerSrcVT = | ||||
23014 | getContainerForFixedLengthVector(DAG, SrcOp.getValueType()); | ||||
23015 | |||||
23016 | SrcOp = convertToScalableVector(DAG, ContainerSrcVT, SrcOp); | ||||
23017 | Op = DAG.getNode(ISD::BITCAST, DL, ContainerDstVT, SrcOp); | ||||
23018 | return convertFromScalableVector(DAG, VT, Op); | ||||
23019 | } | ||||
23020 | |||||
23021 | SDValue AArch64TargetLowering::LowerFixedLengthConcatVectorsToSVE( | ||||
23022 | SDValue Op, SelectionDAG &DAG) const { | ||||
23023 | SDLoc DL(Op); | ||||
23024 | unsigned NumOperands = Op->getNumOperands(); | ||||
23025 | |||||
23026 | assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&(static_cast <bool> (NumOperands > 1 && isPowerOf2_32 (NumOperands) && "Unexpected number of operands in CONCAT_VECTORS" ) ? void (0) : __assert_fail ("NumOperands > 1 && isPowerOf2_32(NumOperands) && \"Unexpected number of operands in CONCAT_VECTORS\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23027, __extension__ __PRETTY_FUNCTION__)) | ||||
23027 | "Unexpected number of operands in CONCAT_VECTORS")(static_cast <bool> (NumOperands > 1 && isPowerOf2_32 (NumOperands) && "Unexpected number of operands in CONCAT_VECTORS" ) ? void (0) : __assert_fail ("NumOperands > 1 && isPowerOf2_32(NumOperands) && \"Unexpected number of operands in CONCAT_VECTORS\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23027, __extension__ __PRETTY_FUNCTION__)); | ||||
23028 | |||||
23029 | auto SrcOp1 = Op.getOperand(0); | ||||
23030 | auto SrcOp2 = Op.getOperand(1); | ||||
23031 | EVT VT = Op.getValueType(); | ||||
23032 | EVT SrcVT = SrcOp1.getValueType(); | ||||
23033 | |||||
23034 | if (NumOperands > 2) { | ||||
23035 | SmallVector<SDValue, 4> Ops; | ||||
23036 | EVT PairVT = SrcVT.getDoubleNumVectorElementsVT(*DAG.getContext()); | ||||
23037 | for (unsigned I = 0; I < NumOperands; I += 2) | ||||
23038 | Ops.push_back(DAG.getNode(ISD::CONCAT_VECTORS, DL, PairVT, | ||||
23039 | Op->getOperand(I), Op->getOperand(I + 1))); | ||||
23040 | |||||
23041 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Ops); | ||||
23042 | } | ||||
23043 | |||||
23044 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); | ||||
23045 | |||||
23046 | SDValue Pg = getPredicateForFixedLengthVector(DAG, DL, SrcVT); | ||||
23047 | SrcOp1 = convertToScalableVector(DAG, ContainerVT, SrcOp1); | ||||
23048 | SrcOp2 = convertToScalableVector(DAG, ContainerVT, SrcOp2); | ||||
23049 | |||||
23050 | Op = DAG.getNode(AArch64ISD::SPLICE, DL, ContainerVT, Pg, SrcOp1, SrcOp2); | ||||
23051 | |||||
23052 | return convertFromScalableVector(DAG, VT, Op); | ||||
23053 | } | ||||
23054 | |||||
23055 | SDValue | ||||
23056 | AArch64TargetLowering::LowerFixedLengthFPExtendToSVE(SDValue Op, | ||||
23057 | SelectionDAG &DAG) const { | ||||
23058 | EVT VT = Op.getValueType(); | ||||
23059 | assert(VT.isFixedLengthVector() && "Expected fixed length vector type!")(static_cast <bool> (VT.isFixedLengthVector() && "Expected fixed length vector type!") ? void (0) : __assert_fail ("VT.isFixedLengthVector() && \"Expected fixed length vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23059, __extension__ __PRETTY_FUNCTION__)); | ||||
23060 | |||||
23061 | SDLoc DL(Op); | ||||
23062 | SDValue Val = Op.getOperand(0); | ||||
23063 | SDValue Pg = getPredicateForVector(DAG, DL, VT); | ||||
23064 | EVT SrcVT = Val.getValueType(); | ||||
23065 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); | ||||
23066 | EVT ExtendVT = ContainerVT.changeVectorElementType( | ||||
23067 | SrcVT.getVectorElementType()); | ||||
23068 | |||||
23069 | Val = DAG.getNode(ISD::BITCAST, DL, SrcVT.changeTypeToInteger(), Val); | ||||
23070 | Val = DAG.getNode(ISD::ANY_EXTEND, DL, VT.changeTypeToInteger(), Val); | ||||
23071 | |||||
23072 | Val = convertToScalableVector(DAG, ContainerVT.changeTypeToInteger(), Val); | ||||
23073 | Val = getSVESafeBitCast(ExtendVT, Val, DAG); | ||||
23074 | Val = DAG.getNode(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU, DL, ContainerVT, | ||||
23075 | Pg, Val, DAG.getUNDEF(ContainerVT)); | ||||
23076 | |||||
23077 | return convertFromScalableVector(DAG, VT, Val); | ||||
23078 | } | ||||
23079 | |||||
23080 | SDValue | ||||
23081 | AArch64TargetLowering::LowerFixedLengthFPRoundToSVE(SDValue Op, | ||||
23082 | SelectionDAG &DAG) const { | ||||
23083 | EVT VT = Op.getValueType(); | ||||
23084 | assert(VT.isFixedLengthVector() && "Expected fixed length vector type!")(static_cast <bool> (VT.isFixedLengthVector() && "Expected fixed length vector type!") ? void (0) : __assert_fail ("VT.isFixedLengthVector() && \"Expected fixed length vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23084, __extension__ __PRETTY_FUNCTION__)); | ||||
23085 | |||||
23086 | SDLoc DL(Op); | ||||
23087 | SDValue Val = Op.getOperand(0); | ||||
23088 | EVT SrcVT = Val.getValueType(); | ||||
23089 | EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT); | ||||
23090 | EVT RoundVT = ContainerSrcVT.changeVectorElementType( | ||||
23091 | VT.getVectorElementType()); | ||||
23092 | SDValue Pg = getPredicateForVector(DAG, DL, RoundVT); | ||||
23093 | |||||
23094 | Val = convertToScalableVector(DAG, ContainerSrcVT, Val); | ||||
23095 | Val = DAG.getNode(AArch64ISD::FP_ROUND_MERGE_PASSTHRU, DL, RoundVT, Pg, Val, | ||||
23096 | Op.getOperand(1), DAG.getUNDEF(RoundVT)); | ||||
23097 | Val = getSVESafeBitCast(ContainerSrcVT.changeTypeToInteger(), Val, DAG); | ||||
23098 | Val = convertFromScalableVector(DAG, SrcVT.changeTypeToInteger(), Val); | ||||
23099 | |||||
23100 | Val = DAG.getNode(ISD::TRUNCATE, DL, VT.changeTypeToInteger(), Val); | ||||
23101 | return DAG.getNode(ISD::BITCAST, DL, VT, Val); | ||||
23102 | } | ||||
23103 | |||||
23104 | SDValue | ||||
23105 | AArch64TargetLowering::LowerFixedLengthIntToFPToSVE(SDValue Op, | ||||
23106 | SelectionDAG &DAG) const { | ||||
23107 | EVT VT = Op.getValueType(); | ||||
23108 | assert(VT.isFixedLengthVector() && "Expected fixed length vector type!")(static_cast <bool> (VT.isFixedLengthVector() && "Expected fixed length vector type!") ? void (0) : __assert_fail ("VT.isFixedLengthVector() && \"Expected fixed length vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23108, __extension__ __PRETTY_FUNCTION__)); | ||||
23109 | |||||
23110 | bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP; | ||||
23111 | unsigned Opcode = IsSigned ? AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU | ||||
23112 | : AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU; | ||||
23113 | |||||
23114 | SDLoc DL(Op); | ||||
23115 | SDValue Val = Op.getOperand(0); | ||||
23116 | EVT SrcVT = Val.getValueType(); | ||||
23117 | EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT); | ||||
23118 | EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT); | ||||
23119 | |||||
23120 | if (VT.bitsGE(SrcVT)) { | ||||
23121 | SDValue Pg = getPredicateForFixedLengthVector(DAG, DL, VT); | ||||
23122 | |||||
23123 | Val = DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, | ||||
23124 | VT.changeTypeToInteger(), Val); | ||||
23125 | |||||
23126 | Val = convertToScalableVector(DAG, ContainerSrcVT, Val); | ||||
23127 | Val = getSVESafeBitCast(ContainerDstVT.changeTypeToInteger(), Val, DAG); | ||||
23128 | // Safe to use a larger than specified operand since we just unpacked the | ||||
23129 | // data, hence the upper bits are zero. | ||||
23130 | Val = DAG.getNode(Opcode, DL, ContainerDstVT, Pg, Val, | ||||
23131 | DAG.getUNDEF(ContainerDstVT)); | ||||
23132 | return convertFromScalableVector(DAG, VT, Val); | ||||
23133 | } else { | ||||
23134 | EVT CvtVT = ContainerSrcVT.changeVectorElementType( | ||||
23135 | ContainerDstVT.getVectorElementType()); | ||||
23136 | SDValue Pg = getPredicateForFixedLengthVector(DAG, DL, SrcVT); | ||||
23137 | |||||
23138 | Val = convertToScalableVector(DAG, ContainerSrcVT, Val); | ||||
23139 | Val = DAG.getNode(Opcode, DL, CvtVT, Pg, Val, DAG.getUNDEF(CvtVT)); | ||||
23140 | Val = getSVESafeBitCast(ContainerSrcVT, Val, DAG); | ||||
23141 | Val = convertFromScalableVector(DAG, SrcVT, Val); | ||||
23142 | |||||
23143 | Val = DAG.getNode(ISD::TRUNCATE, DL, VT.changeTypeToInteger(), Val); | ||||
23144 | return DAG.getNode(ISD::BITCAST, DL, VT, Val); | ||||
23145 | } | ||||
23146 | } | ||||
23147 | |||||
23148 | SDValue | ||||
23149 | AArch64TargetLowering::LowerFixedLengthFPToIntToSVE(SDValue Op, | ||||
23150 | SelectionDAG &DAG) const { | ||||
23151 | EVT VT = Op.getValueType(); | ||||
23152 | assert(VT.isFixedLengthVector() && "Expected fixed length vector type!")(static_cast <bool> (VT.isFixedLengthVector() && "Expected fixed length vector type!") ? void (0) : __assert_fail ("VT.isFixedLengthVector() && \"Expected fixed length vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23152, __extension__ __PRETTY_FUNCTION__)); | ||||
23153 | |||||
23154 | bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT; | ||||
23155 | unsigned Opcode = IsSigned ? AArch64ISD::FCVTZS_MERGE_PASSTHRU | ||||
23156 | : AArch64ISD::FCVTZU_MERGE_PASSTHRU; | ||||
23157 | |||||
23158 | SDLoc DL(Op); | ||||
23159 | SDValue Val = Op.getOperand(0); | ||||
23160 | EVT SrcVT = Val.getValueType(); | ||||
23161 | EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT); | ||||
23162 | EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT); | ||||
23163 | |||||
23164 | if (VT.bitsGT(SrcVT)) { | ||||
23165 | EVT CvtVT = ContainerDstVT.changeVectorElementType( | ||||
23166 | ContainerSrcVT.getVectorElementType()); | ||||
23167 | SDValue Pg = getPredicateForFixedLengthVector(DAG, DL, VT); | ||||
23168 | |||||
23169 | Val = DAG.getNode(ISD::BITCAST, DL, SrcVT.changeTypeToInteger(), Val); | ||||
23170 | Val = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Val); | ||||
23171 | |||||
23172 | Val = convertToScalableVector(DAG, ContainerSrcVT, Val); | ||||
23173 | Val = getSVESafeBitCast(CvtVT, Val, DAG); | ||||
23174 | Val = DAG.getNode(Opcode, DL, ContainerDstVT, Pg, Val, | ||||
23175 | DAG.getUNDEF(ContainerDstVT)); | ||||
23176 | return convertFromScalableVector(DAG, VT, Val); | ||||
23177 | } else { | ||||
23178 | EVT CvtVT = ContainerSrcVT.changeTypeToInteger(); | ||||
23179 | SDValue Pg = getPredicateForFixedLengthVector(DAG, DL, SrcVT); | ||||
23180 | |||||
23181 | // Safe to use a larger than specified result since an fp_to_int where the | ||||
23182 | // result doesn't fit into the destination is undefined. | ||||
23183 | Val = convertToScalableVector(DAG, ContainerSrcVT, Val); | ||||
23184 | Val = DAG.getNode(Opcode, DL, CvtVT, Pg, Val, DAG.getUNDEF(CvtVT)); | ||||
23185 | Val = convertFromScalableVector(DAG, SrcVT.changeTypeToInteger(), Val); | ||||
23186 | |||||
23187 | return DAG.getNode(ISD::TRUNCATE, DL, VT, Val); | ||||
23188 | } | ||||
23189 | } | ||||
23190 | |||||
23191 | SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE( | ||||
23192 | SDValue Op, SelectionDAG &DAG) const { | ||||
23193 | EVT VT = Op.getValueType(); | ||||
23194 | assert(VT.isFixedLengthVector() && "Expected fixed length vector type!")(static_cast <bool> (VT.isFixedLengthVector() && "Expected fixed length vector type!") ? void (0) : __assert_fail ("VT.isFixedLengthVector() && \"Expected fixed length vector type!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23194, __extension__ __PRETTY_FUNCTION__)); | ||||
23195 | |||||
23196 | auto *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); | ||||
23197 | auto ShuffleMask = SVN->getMask(); | ||||
23198 | |||||
23199 | SDLoc DL(Op); | ||||
23200 | SDValue Op1 = Op.getOperand(0); | ||||
23201 | SDValue Op2 = Op.getOperand(1); | ||||
23202 | |||||
23203 | EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); | ||||
23204 | Op1 = convertToScalableVector(DAG, ContainerVT, Op1); | ||||
23205 | Op2 = convertToScalableVector(DAG, ContainerVT, Op2); | ||||
23206 | |||||
23207 | auto MinLegalExtractEltScalarTy = [](EVT ScalarTy) -> EVT { | ||||
23208 | if (ScalarTy == MVT::i8 || ScalarTy == MVT::i16) | ||||
23209 | return MVT::i32; | ||||
23210 | return ScalarTy; | ||||
23211 | }; | ||||
23212 | |||||
23213 | if (SVN->isSplat()) { | ||||
23214 | unsigned Lane = std::max(0, SVN->getSplatIndex()); | ||||
23215 | EVT ScalarTy = MinLegalExtractEltScalarTy(VT.getVectorElementType()); | ||||
23216 | SDValue SplatEl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarTy, Op1, | ||||
23217 | DAG.getConstant(Lane, DL, MVT::i64)); | ||||
23218 | Op = DAG.getNode(ISD::SPLAT_VECTOR, DL, ContainerVT, SplatEl); | ||||
23219 | return convertFromScalableVector(DAG, VT, Op); | ||||
23220 | } | ||||
23221 | |||||
23222 | bool ReverseEXT = false; | ||||
23223 | unsigned Imm; | ||||
23224 | if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm) && | ||||
23225 | Imm == VT.getVectorNumElements() - 1) { | ||||
23226 | if (ReverseEXT) | ||||
23227 | std::swap(Op1, Op2); | ||||
23228 | EVT ScalarTy = MinLegalExtractEltScalarTy(VT.getVectorElementType()); | ||||
23229 | SDValue Scalar = DAG.getNode( | ||||
23230 | ISD::EXTRACT_VECTOR_ELT, DL, ScalarTy, Op1, | ||||
23231 | DAG.getConstant(VT.getVectorNumElements() - 1, DL, MVT::i64)); | ||||
23232 | Op = DAG.getNode(AArch64ISD::INSR, DL, ContainerVT, Op2, Scalar); | ||||
23233 | return convertFromScalableVector(DAG, VT, Op); | ||||
23234 | } | ||||
23235 | |||||
23236 | for (unsigned LaneSize : {64U, 32U, 16U}) { | ||||
23237 | if (isREVMask(ShuffleMask, VT, LaneSize)) { | ||||
23238 | EVT NewVT = | ||||
23239 | getPackedSVEVectorVT(EVT::getIntegerVT(*DAG.getContext(), LaneSize)); | ||||
23240 | unsigned RevOp; | ||||
23241 | unsigned EltSz = VT.getScalarSizeInBits(); | ||||
23242 | if (EltSz == 8) | ||||
23243 | RevOp = AArch64ISD::BSWAP_MERGE_PASSTHRU; | ||||
23244 | else if (EltSz == 16) | ||||
23245 | RevOp = AArch64ISD::REVH_MERGE_PASSTHRU; | ||||
23246 | else | ||||
23247 | RevOp = AArch64ISD::REVW_MERGE_PASSTHRU; | ||||
23248 | |||||
23249 | Op = DAG.getNode(ISD::BITCAST, DL, NewVT, Op1); | ||||
23250 | Op = LowerToPredicatedOp(Op, DAG, RevOp); | ||||
23251 | Op = DAG.getNode(ISD::BITCAST, DL, ContainerVT, Op); | ||||
23252 | return convertFromScalableVector(DAG, VT, Op); | ||||
23253 | } | ||||
23254 | } | ||||
23255 | |||||
23256 | if (Subtarget->hasSVE2p1() && VT.getScalarSizeInBits() == 64 && | ||||
23257 | isREVMask(ShuffleMask, VT, 128)) { | ||||
23258 | if (!VT.isFloatingPoint()) | ||||
23259 | return LowerToPredicatedOp(Op, DAG, AArch64ISD::REVD_MERGE_PASSTHRU); | ||||
23260 | |||||
23261 | EVT NewVT = getPackedSVEVectorVT(EVT::getIntegerVT(*DAG.getContext(), 64)); | ||||
23262 | Op = DAG.getNode(ISD::BITCAST, DL, NewVT, Op1); | ||||
23263 | Op = LowerToPredicatedOp(Op, DAG, AArch64ISD::REVD_MERGE_PASSTHRU); | ||||
23264 | Op = DAG.getNode(ISD::BITCAST, DL, ContainerVT, Op); | ||||
23265 | return convertFromScalableVector(DAG, VT, Op); | ||||
23266 | } | ||||
23267 | |||||
23268 | unsigned WhichResult; | ||||
23269 | if (isZIPMask(ShuffleMask, VT, WhichResult) && WhichResult == 0) | ||||
23270 | return convertFromScalableVector( | ||||
23271 | DAG, VT, DAG.getNode(AArch64ISD::ZIP1, DL, ContainerVT, Op1, Op2)); | ||||
23272 | |||||
23273 | if (isTRNMask(ShuffleMask, VT, WhichResult)) { | ||||
23274 | unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2; | ||||
23275 | return convertFromScalableVector( | ||||
23276 | DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op2)); | ||||
23277 | } | ||||
23278 | |||||
23279 | if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult) && WhichResult == 0) | ||||
23280 | return convertFromScalableVector( | ||||
23281 | DAG, VT, DAG.getNode(AArch64ISD::ZIP1, DL, ContainerVT, Op1, Op1)); | ||||
23282 | |||||
23283 | if (isTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) { | ||||
23284 | unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2; | ||||
23285 | return convertFromScalableVector( | ||||
23286 | DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op1)); | ||||
23287 | } | ||||
23288 | |||||
23289 | // Functions like isZIPMask return true when a ISD::VECTOR_SHUFFLE's mask | ||||
23290 | // represents the same logical operation as performed by a ZIP instruction. In | ||||
23291 | // isolation these functions do not mean the ISD::VECTOR_SHUFFLE is exactly | ||||
23292 | // equivalent to an AArch64 instruction. There's the extra component of | ||||
23293 | // ISD::VECTOR_SHUFFLE's value type to consider. Prior to SVE these functions | ||||
23294 | // only operated on 64/128bit vector types that have a direct mapping to a | ||||
23295 | // target register and so an exact mapping is implied. | ||||
23296 | // However, when using SVE for fixed length vectors, most legal vector types | ||||
23297 | // are actually sub-vectors of a larger SVE register. When mapping | ||||
23298 | // ISD::VECTOR_SHUFFLE to an SVE instruction care must be taken to consider | ||||
23299 | // how the mask's indices translate. Specifically, when the mapping requires | ||||
23300 | // an exact meaning for a specific vector index (e.g. Index X is the last | ||||
23301 | // vector element in the register) then such mappings are often only safe when | ||||
23302 | // the exact SVE register size is know. The main exception to this is when | ||||
23303 | // indices are logically relative to the first element of either | ||||
23304 | // ISD::VECTOR_SHUFFLE operand because these relative indices don't change | ||||
23305 | // when converting from fixed-length to scalable vector types (i.e. the start | ||||
23306 | // of a fixed length vector is always the start of a scalable vector). | ||||
23307 | unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits(); | ||||
23308 | unsigned MaxSVESize = Subtarget->getMaxSVEVectorSizeInBits(); | ||||
23309 | if (MinSVESize == MaxSVESize && MaxSVESize == VT.getSizeInBits()) { | ||||
23310 | if (ShuffleVectorInst::isReverseMask(ShuffleMask) && Op2.isUndef()) { | ||||
23311 | Op = DAG.getNode(ISD::VECTOR_REVERSE, DL, ContainerVT, Op1); | ||||
23312 | return convertFromScalableVector(DAG, VT, Op); | ||||
23313 | } | ||||
23314 | |||||
23315 | if (isZIPMask(ShuffleMask, VT, WhichResult) && WhichResult != 0) | ||||
23316 | return convertFromScalableVector( | ||||
23317 | DAG, VT, DAG.getNode(AArch64ISD::ZIP2, DL, ContainerVT, Op1, Op2)); | ||||
23318 | |||||
23319 | if (isUZPMask(ShuffleMask, VT, WhichResult)) { | ||||
23320 | unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; | ||||
23321 | return convertFromScalableVector( | ||||
23322 | DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op2)); | ||||
23323 | } | ||||
23324 | |||||
23325 | if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult) && WhichResult != 0) | ||||
23326 | return convertFromScalableVector( | ||||
23327 | DAG, VT, DAG.getNode(AArch64ISD::ZIP2, DL, ContainerVT, Op1, Op1)); | ||||
23328 | |||||
23329 | if (isUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) { | ||||
23330 | unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; | ||||
23331 | return convertFromScalableVector( | ||||
23332 | DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op1)); | ||||
23333 | } | ||||
23334 | } | ||||
23335 | |||||
23336 | return SDValue(); | ||||
23337 | } | ||||
23338 | |||||
23339 | SDValue AArch64TargetLowering::getSVESafeBitCast(EVT VT, SDValue Op, | ||||
23340 | SelectionDAG &DAG) const { | ||||
23341 | SDLoc DL(Op); | ||||
23342 | EVT InVT = Op.getValueType(); | ||||
23343 | |||||
23344 | assert(VT.isScalableVector() && isTypeLegal(VT) &&(static_cast <bool> (VT.isScalableVector() && isTypeLegal (VT) && InVT.isScalableVector() && isTypeLegal (InVT) && "Only expect to cast between legal scalable vector types!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && isTypeLegal(VT) && InVT.isScalableVector() && isTypeLegal(InVT) && \"Only expect to cast between legal scalable vector types!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23346, __extension__ __PRETTY_FUNCTION__)) | ||||
23345 | InVT.isScalableVector() && isTypeLegal(InVT) &&(static_cast <bool> (VT.isScalableVector() && isTypeLegal (VT) && InVT.isScalableVector() && isTypeLegal (InVT) && "Only expect to cast between legal scalable vector types!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && isTypeLegal(VT) && InVT.isScalableVector() && isTypeLegal(InVT) && \"Only expect to cast between legal scalable vector types!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23346, __extension__ __PRETTY_FUNCTION__)) | ||||
23346 | "Only expect to cast between legal scalable vector types!")(static_cast <bool> (VT.isScalableVector() && isTypeLegal (VT) && InVT.isScalableVector() && isTypeLegal (InVT) && "Only expect to cast between legal scalable vector types!" ) ? void (0) : __assert_fail ("VT.isScalableVector() && isTypeLegal(VT) && InVT.isScalableVector() && isTypeLegal(InVT) && \"Only expect to cast between legal scalable vector types!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23346, __extension__ __PRETTY_FUNCTION__)); | ||||
23347 | assert(VT.getVectorElementType() != MVT::i1 &&(static_cast <bool> (VT.getVectorElementType() != MVT:: i1 && InVT.getVectorElementType() != MVT::i1 && "For predicate bitcasts, use getSVEPredicateBitCast") ? void (0) : __assert_fail ("VT.getVectorElementType() != MVT::i1 && InVT.getVectorElementType() != MVT::i1 && \"For predicate bitcasts, use getSVEPredicateBitCast\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23349, __extension__ __PRETTY_FUNCTION__)) | ||||
23348 | InVT.getVectorElementType() != MVT::i1 &&(static_cast <bool> (VT.getVectorElementType() != MVT:: i1 && InVT.getVectorElementType() != MVT::i1 && "For predicate bitcasts, use getSVEPredicateBitCast") ? void (0) : __assert_fail ("VT.getVectorElementType() != MVT::i1 && InVT.getVectorElementType() != MVT::i1 && \"For predicate bitcasts, use getSVEPredicateBitCast\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23349, __extension__ __PRETTY_FUNCTION__)) | ||||
23349 | "For predicate bitcasts, use getSVEPredicateBitCast")(static_cast <bool> (VT.getVectorElementType() != MVT:: i1 && InVT.getVectorElementType() != MVT::i1 && "For predicate bitcasts, use getSVEPredicateBitCast") ? void (0) : __assert_fail ("VT.getVectorElementType() != MVT::i1 && InVT.getVectorElementType() != MVT::i1 && \"For predicate bitcasts, use getSVEPredicateBitCast\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23349, __extension__ __PRETTY_FUNCTION__)); | ||||
23350 | |||||
23351 | if (InVT == VT) | ||||
23352 | return Op; | ||||
23353 | |||||
23354 | EVT PackedVT = getPackedSVEVectorVT(VT.getVectorElementType()); | ||||
23355 | EVT PackedInVT = getPackedSVEVectorVT(InVT.getVectorElementType()); | ||||
23356 | |||||
23357 | // Safe bitcasting between unpacked vector types of different element counts | ||||
23358 | // is currently unsupported because the following is missing the necessary | ||||
23359 | // work to ensure the result's elements live where they're supposed to within | ||||
23360 | // an SVE register. | ||||
23361 | // 01234567 | ||||
23362 | // e.g. nxv2i32 = XX??XX?? | ||||
23363 | // nxv4f16 = X?X?X?X? | ||||
23364 | assert((VT.getVectorElementCount() == InVT.getVectorElementCount() ||(static_cast <bool> ((VT.getVectorElementCount() == InVT .getVectorElementCount() || VT == PackedVT || InVT == PackedInVT ) && "Unexpected bitcast!") ? void (0) : __assert_fail ("(VT.getVectorElementCount() == InVT.getVectorElementCount() || VT == PackedVT || InVT == PackedInVT) && \"Unexpected bitcast!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23366, __extension__ __PRETTY_FUNCTION__)) | ||||
23365 | VT == PackedVT || InVT == PackedInVT) &&(static_cast <bool> ((VT.getVectorElementCount() == InVT .getVectorElementCount() || VT == PackedVT || InVT == PackedInVT ) && "Unexpected bitcast!") ? void (0) : __assert_fail ("(VT.getVectorElementCount() == InVT.getVectorElementCount() || VT == PackedVT || InVT == PackedInVT) && \"Unexpected bitcast!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23366, __extension__ __PRETTY_FUNCTION__)) | ||||
23366 | "Unexpected bitcast!")(static_cast <bool> ((VT.getVectorElementCount() == InVT .getVectorElementCount() || VT == PackedVT || InVT == PackedInVT ) && "Unexpected bitcast!") ? void (0) : __assert_fail ("(VT.getVectorElementCount() == InVT.getVectorElementCount() || VT == PackedVT || InVT == PackedInVT) && \"Unexpected bitcast!\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23366, __extension__ __PRETTY_FUNCTION__)); | ||||
23367 | |||||
23368 | // Pack input if required. | ||||
23369 | if (InVT != PackedInVT) | ||||
23370 | Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, PackedInVT, Op); | ||||
23371 | |||||
23372 | Op = DAG.getNode(ISD::BITCAST, DL, PackedVT, Op); | ||||
23373 | |||||
23374 | // Unpack result if required. | ||||
23375 | if (VT != PackedVT) | ||||
23376 | Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Op); | ||||
23377 | |||||
23378 | return Op; | ||||
23379 | } | ||||
23380 | |||||
23381 | bool AArch64TargetLowering::isAllActivePredicate(SelectionDAG &DAG, | ||||
23382 | SDValue N) const { | ||||
23383 | return ::isAllActivePredicate(DAG, N); | ||||
23384 | } | ||||
23385 | |||||
23386 | EVT AArch64TargetLowering::getPromotedVTForPredicate(EVT VT) const { | ||||
23387 | return ::getPromotedVTForPredicate(VT); | ||||
23388 | } | ||||
23389 | |||||
23390 | bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode( | ||||
23391 | SDValue Op, const APInt &OriginalDemandedBits, | ||||
23392 | const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, | ||||
23393 | unsigned Depth) const { | ||||
23394 | |||||
23395 | unsigned Opc = Op.getOpcode(); | ||||
23396 | switch (Opc) { | ||||
23397 | case AArch64ISD::VSHL: { | ||||
23398 | // Match (VSHL (VLSHR Val X) X) | ||||
23399 | SDValue ShiftL = Op; | ||||
23400 | SDValue ShiftR = Op->getOperand(0); | ||||
23401 | if (ShiftR->getOpcode() != AArch64ISD::VLSHR) | ||||
23402 | return false; | ||||
23403 | |||||
23404 | if (!ShiftL.hasOneUse() || !ShiftR.hasOneUse()) | ||||
23405 | return false; | ||||
23406 | |||||
23407 | unsigned ShiftLBits = ShiftL->getConstantOperandVal(1); | ||||
23408 | unsigned ShiftRBits = ShiftR->getConstantOperandVal(1); | ||||
23409 | |||||
23410 | // Other cases can be handled as well, but this is not | ||||
23411 | // implemented. | ||||
23412 | if (ShiftRBits != ShiftLBits) | ||||
23413 | return false; | ||||
23414 | |||||
23415 | unsigned ScalarSize = Op.getScalarValueSizeInBits(); | ||||
23416 | assert(ScalarSize > ShiftLBits && "Invalid shift imm")(static_cast <bool> (ScalarSize > ShiftLBits && "Invalid shift imm") ? void (0) : __assert_fail ("ScalarSize > ShiftLBits && \"Invalid shift imm\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23416, __extension__ __PRETTY_FUNCTION__)); | ||||
23417 | |||||
23418 | APInt ZeroBits = APInt::getLowBitsSet(ScalarSize, ShiftLBits); | ||||
23419 | APInt UnusedBits = ~OriginalDemandedBits; | ||||
23420 | |||||
23421 | if ((ZeroBits & UnusedBits) != ZeroBits) | ||||
23422 | return false; | ||||
23423 | |||||
23424 | // All bits that are zeroed by (VSHL (VLSHR Val X) X) are not | ||||
23425 | // used - simplify to just Val. | ||||
23426 | return TLO.CombineTo(Op, ShiftR->getOperand(0)); | ||||
23427 | } | ||||
23428 | case ISD::INTRINSIC_WO_CHAIN: { | ||||
23429 | if (auto ElementSize = IsSVECntIntrinsic(Op)) { | ||||
23430 | unsigned MaxSVEVectorSizeInBits = Subtarget->getMaxSVEVectorSizeInBits(); | ||||
23431 | if (!MaxSVEVectorSizeInBits) | ||||
23432 | MaxSVEVectorSizeInBits = AArch64::SVEMaxBitsPerVector; | ||||
23433 | unsigned MaxElements = MaxSVEVectorSizeInBits / *ElementSize; | ||||
23434 | // The SVE count intrinsics don't support the multiplier immediate so we | ||||
23435 | // don't have to account for that here. The value returned may be slightly | ||||
23436 | // over the true required bits, as this is based on the "ALL" pattern. The | ||||
23437 | // other patterns are also exposed by these intrinsics, but they all | ||||
23438 | // return a value that's strictly less than "ALL". | ||||
23439 | unsigned RequiredBits = Log2_32(MaxElements) + 1; | ||||
23440 | unsigned BitWidth = Known.Zero.getBitWidth(); | ||||
23441 | if (RequiredBits < BitWidth) | ||||
23442 | Known.Zero.setHighBits(BitWidth - RequiredBits); | ||||
23443 | return false; | ||||
23444 | } | ||||
23445 | } | ||||
23446 | } | ||||
23447 | |||||
23448 | return TargetLowering::SimplifyDemandedBitsForTargetNode( | ||||
23449 | Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth); | ||||
23450 | } | ||||
23451 | |||||
23452 | bool AArch64TargetLowering::isTargetCanonicalConstantNode(SDValue Op) const { | ||||
23453 | return Op.getOpcode() == AArch64ISD::DUP || | ||||
23454 | Op.getOpcode() == AArch64ISD::MOVI || | ||||
23455 | (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR && | ||||
23456 | Op.getOperand(0).getOpcode() == AArch64ISD::DUP) || | ||||
23457 | TargetLowering::isTargetCanonicalConstantNode(Op); | ||||
23458 | } | ||||
23459 | |||||
23460 | bool AArch64TargetLowering::isConstantUnsignedBitfieldExtractLegal( | ||||
23461 | unsigned Opc, LLT Ty1, LLT Ty2) const { | ||||
23462 | return Ty1 == Ty2 && (Ty1 == LLT::scalar(32) || Ty1 == LLT::scalar(64)); | ||||
23463 | } | ||||
23464 | |||||
23465 | bool AArch64TargetLowering::isComplexDeinterleavingSupported() const { | ||||
23466 | return Subtarget->hasComplxNum(); | ||||
23467 | } | ||||
23468 | |||||
23469 | bool AArch64TargetLowering::isComplexDeinterleavingOperationSupported( | ||||
23470 | ComplexDeinterleavingOperation Operation, Type *Ty) const { | ||||
23471 | auto *VTy = dyn_cast<FixedVectorType>(Ty); | ||||
23472 | if (!VTy) | ||||
23473 | return false; | ||||
23474 | |||||
23475 | auto *ScalarTy = VTy->getScalarType(); | ||||
23476 | unsigned NumElements = VTy->getNumElements(); | ||||
23477 | |||||
23478 | unsigned VTyWidth = VTy->getScalarSizeInBits() * NumElements; | ||||
23479 | if ((VTyWidth < 128 && VTyWidth != 64) || !llvm::isPowerOf2_32(VTyWidth)) | ||||
23480 | return false; | ||||
23481 | |||||
23482 | return (ScalarTy->isHalfTy() && Subtarget->hasFullFP16()) || | ||||
23483 | ScalarTy->isFloatTy() || ScalarTy->isDoubleTy(); | ||||
23484 | } | ||||
23485 | |||||
23486 | Value *AArch64TargetLowering::createComplexDeinterleavingIR( | ||||
23487 | Instruction *I, ComplexDeinterleavingOperation OperationType, | ||||
23488 | ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, | ||||
23489 | Value *Accumulator) const { | ||||
23490 | FixedVectorType *Ty = cast<FixedVectorType>(InputA->getType()); | ||||
23491 | |||||
23492 | IRBuilder<> B(I); | ||||
23493 | |||||
23494 | unsigned TyWidth = Ty->getScalarSizeInBits() * Ty->getNumElements(); | ||||
23495 | |||||
23496 | assert(((TyWidth >= 128 && llvm::isPowerOf2_32(TyWidth)) || TyWidth == 64) &&(static_cast <bool> (((TyWidth >= 128 && llvm ::isPowerOf2_32(TyWidth)) || TyWidth == 64) && "Vector type must be either 64 or a power of 2 that is at least 128" ) ? void (0) : __assert_fail ("((TyWidth >= 128 && llvm::isPowerOf2_32(TyWidth)) || TyWidth == 64) && \"Vector type must be either 64 or a power of 2 that is at least 128\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23497, __extension__ __PRETTY_FUNCTION__)) | ||||
23497 | "Vector type must be either 64 or a power of 2 that is at least 128")(static_cast <bool> (((TyWidth >= 128 && llvm ::isPowerOf2_32(TyWidth)) || TyWidth == 64) && "Vector type must be either 64 or a power of 2 that is at least 128" ) ? void (0) : __assert_fail ("((TyWidth >= 128 && llvm::isPowerOf2_32(TyWidth)) || TyWidth == 64) && \"Vector type must be either 64 or a power of 2 that is at least 128\"" , "llvm/lib/Target/AArch64/AArch64ISelLowering.cpp", 23497, __extension__ __PRETTY_FUNCTION__)); | ||||
23498 | |||||
23499 | if (TyWidth > 128) { | ||||
23500 | int Stride = Ty->getNumElements() / 2; | ||||
23501 | auto SplitSeq = llvm::seq<int>(0, Ty->getNumElements()); | ||||
23502 | auto SplitSeqVec = llvm::to_vector(SplitSeq); | ||||
23503 | ArrayRef<int> LowerSplitMask(&SplitSeqVec[0], Stride); | ||||
23504 | ArrayRef<int> UpperSplitMask(&SplitSeqVec[Stride], Stride); | ||||
23505 | |||||
23506 | auto *LowerSplitA = B.CreateShuffleVector(InputA, LowerSplitMask); | ||||
23507 | auto *LowerSplitB = B.CreateShuffleVector(InputB, LowerSplitMask); | ||||
23508 | auto *UpperSplitA = B.CreateShuffleVector(InputA, UpperSplitMask); | ||||
23509 | auto *UpperSplitB = B.CreateShuffleVector(InputB, UpperSplitMask); | ||||
23510 | Value *LowerSplitAcc = nullptr; | ||||
23511 | Value *UpperSplitAcc = nullptr; | ||||
23512 | |||||
23513 | if (Accumulator) { | ||||
23514 | LowerSplitAcc = B.CreateShuffleVector(Accumulator, LowerSplitMask); | ||||
23515 | UpperSplitAcc = B.CreateShuffleVector(Accumulator, UpperSplitMask); | ||||
23516 | } | ||||
23517 | |||||
23518 | auto *LowerSplitInt = createComplexDeinterleavingIR( | ||||
23519 | I, OperationType, Rotation, LowerSplitA, LowerSplitB, LowerSplitAcc); | ||||
23520 | auto *UpperSplitInt = createComplexDeinterleavingIR( | ||||
23521 | I, OperationType, Rotation, UpperSplitA, UpperSplitB, UpperSplitAcc); | ||||
23522 | |||||
23523 | ArrayRef<int> JoinMask(&SplitSeqVec[0], Ty->getNumElements()); | ||||
23524 | return B.CreateShuffleVector(LowerSplitInt, UpperSplitInt, JoinMask); | ||||
23525 | } | ||||
23526 | |||||
23527 | if (OperationType == ComplexDeinterleavingOperation::CMulPartial) { | ||||
23528 | Intrinsic::ID IdMap[4] = {Intrinsic::aarch64_neon_vcmla_rot0, | ||||
23529 | Intrinsic::aarch64_neon_vcmla_rot90, | ||||
23530 | Intrinsic::aarch64_neon_vcmla_rot180, | ||||
23531 | Intrinsic::aarch64_neon_vcmla_rot270}; | ||||
23532 | |||||
23533 | if (Accumulator == nullptr) | ||||
23534 | Accumulator = ConstantFP::get(Ty, 0); | ||||
23535 | |||||
23536 | return B.CreateIntrinsic(IdMap[(int)Rotation], Ty, | ||||
23537 | {Accumulator, InputB, InputA}); | ||||
23538 | } | ||||
23539 | |||||
23540 | if (OperationType == ComplexDeinterleavingOperation::CAdd) { | ||||
23541 | Intrinsic::ID IntId = Intrinsic::not_intrinsic; | ||||
23542 | if (Rotation == ComplexDeinterleavingRotation::Rotation_90) | ||||
23543 | IntId = Intrinsic::aarch64_neon_vcadd_rot90; | ||||
23544 | else if (Rotation == ComplexDeinterleavingRotation::Rotation_270) | ||||
23545 | IntId = Intrinsic::aarch64_neon_vcadd_rot270; | ||||
23546 | |||||
23547 | if (IntId == Intrinsic::not_intrinsic) | ||||
23548 | return nullptr; | ||||
23549 | |||||
23550 | return B.CreateIntrinsic(IntId, Ty, {InputA, InputB}); | ||||
23551 | } | ||||
23552 | |||||
23553 | return nullptr; | ||||
23554 | } |