File: | build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Target/ARM/ARMISelLowering.cpp |
Warning: | line 5062, column 7 1st function call argument is an uninitialized value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file defines the interfaces that ARM uses to lower LLVM code into a | |||
10 | // selection DAG. | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #include "ARMISelLowering.h" | |||
15 | #include "ARMBaseInstrInfo.h" | |||
16 | #include "ARMBaseRegisterInfo.h" | |||
17 | #include "ARMCallingConv.h" | |||
18 | #include "ARMConstantPoolValue.h" | |||
19 | #include "ARMMachineFunctionInfo.h" | |||
20 | #include "ARMPerfectShuffle.h" | |||
21 | #include "ARMRegisterInfo.h" | |||
22 | #include "ARMSelectionDAGInfo.h" | |||
23 | #include "ARMSubtarget.h" | |||
24 | #include "ARMTargetTransformInfo.h" | |||
25 | #include "MCTargetDesc/ARMAddressingModes.h" | |||
26 | #include "MCTargetDesc/ARMBaseInfo.h" | |||
27 | #include "Utils/ARMBaseInfo.h" | |||
28 | #include "llvm/ADT/APFloat.h" | |||
29 | #include "llvm/ADT/APInt.h" | |||
30 | #include "llvm/ADT/ArrayRef.h" | |||
31 | #include "llvm/ADT/BitVector.h" | |||
32 | #include "llvm/ADT/DenseMap.h" | |||
33 | #include "llvm/ADT/STLExtras.h" | |||
34 | #include "llvm/ADT/SmallPtrSet.h" | |||
35 | #include "llvm/ADT/SmallVector.h" | |||
36 | #include "llvm/ADT/Statistic.h" | |||
37 | #include "llvm/ADT/StringExtras.h" | |||
38 | #include "llvm/ADT/StringRef.h" | |||
39 | #include "llvm/ADT/StringSwitch.h" | |||
40 | #include "llvm/ADT/Triple.h" | |||
41 | #include "llvm/ADT/Twine.h" | |||
42 | #include "llvm/Analysis/VectorUtils.h" | |||
43 | #include "llvm/CodeGen/CallingConvLower.h" | |||
44 | #include "llvm/CodeGen/ISDOpcodes.h" | |||
45 | #include "llvm/CodeGen/IntrinsicLowering.h" | |||
46 | #include "llvm/CodeGen/MachineBasicBlock.h" | |||
47 | #include "llvm/CodeGen/MachineConstantPool.h" | |||
48 | #include "llvm/CodeGen/MachineFrameInfo.h" | |||
49 | #include "llvm/CodeGen/MachineFunction.h" | |||
50 | #include "llvm/CodeGen/MachineInstr.h" | |||
51 | #include "llvm/CodeGen/MachineInstrBuilder.h" | |||
52 | #include "llvm/CodeGen/MachineJumpTableInfo.h" | |||
53 | #include "llvm/CodeGen/MachineMemOperand.h" | |||
54 | #include "llvm/CodeGen/MachineOperand.h" | |||
55 | #include "llvm/CodeGen/MachineRegisterInfo.h" | |||
56 | #include "llvm/CodeGen/RuntimeLibcalls.h" | |||
57 | #include "llvm/CodeGen/SelectionDAG.h" | |||
58 | #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" | |||
59 | #include "llvm/CodeGen/SelectionDAGNodes.h" | |||
60 | #include "llvm/CodeGen/TargetInstrInfo.h" | |||
61 | #include "llvm/CodeGen/TargetLowering.h" | |||
62 | #include "llvm/CodeGen/TargetOpcodes.h" | |||
63 | #include "llvm/CodeGen/TargetRegisterInfo.h" | |||
64 | #include "llvm/CodeGen/TargetSubtargetInfo.h" | |||
65 | #include "llvm/CodeGen/ValueTypes.h" | |||
66 | #include "llvm/IR/Attributes.h" | |||
67 | #include "llvm/IR/CallingConv.h" | |||
68 | #include "llvm/IR/Constant.h" | |||
69 | #include "llvm/IR/Constants.h" | |||
70 | #include "llvm/IR/DataLayout.h" | |||
71 | #include "llvm/IR/DebugLoc.h" | |||
72 | #include "llvm/IR/DerivedTypes.h" | |||
73 | #include "llvm/IR/Function.h" | |||
74 | #include "llvm/IR/GlobalAlias.h" | |||
75 | #include "llvm/IR/GlobalValue.h" | |||
76 | #include "llvm/IR/GlobalVariable.h" | |||
77 | #include "llvm/IR/IRBuilder.h" | |||
78 | #include "llvm/IR/InlineAsm.h" | |||
79 | #include "llvm/IR/Instruction.h" | |||
80 | #include "llvm/IR/Instructions.h" | |||
81 | #include "llvm/IR/IntrinsicInst.h" | |||
82 | #include "llvm/IR/Intrinsics.h" | |||
83 | #include "llvm/IR/IntrinsicsARM.h" | |||
84 | #include "llvm/IR/Module.h" | |||
85 | #include "llvm/IR/PatternMatch.h" | |||
86 | #include "llvm/IR/Type.h" | |||
87 | #include "llvm/IR/User.h" | |||
88 | #include "llvm/IR/Value.h" | |||
89 | #include "llvm/MC/MCInstrDesc.h" | |||
90 | #include "llvm/MC/MCInstrItineraries.h" | |||
91 | #include "llvm/MC/MCRegisterInfo.h" | |||
92 | #include "llvm/MC/MCSchedule.h" | |||
93 | #include "llvm/Support/AtomicOrdering.h" | |||
94 | #include "llvm/Support/BranchProbability.h" | |||
95 | #include "llvm/Support/Casting.h" | |||
96 | #include "llvm/Support/CodeGen.h" | |||
97 | #include "llvm/Support/CommandLine.h" | |||
98 | #include "llvm/Support/Compiler.h" | |||
99 | #include "llvm/Support/Debug.h" | |||
100 | #include "llvm/Support/ErrorHandling.h" | |||
101 | #include "llvm/Support/KnownBits.h" | |||
102 | #include "llvm/Support/MachineValueType.h" | |||
103 | #include "llvm/Support/MathExtras.h" | |||
104 | #include "llvm/Support/raw_ostream.h" | |||
105 | #include "llvm/Target/TargetMachine.h" | |||
106 | #include "llvm/Target/TargetOptions.h" | |||
107 | #include <algorithm> | |||
108 | #include <cassert> | |||
109 | #include <cstdint> | |||
110 | #include <cstdlib> | |||
111 | #include <iterator> | |||
112 | #include <limits> | |||
113 | #include <string> | |||
114 | #include <tuple> | |||
115 | #include <utility> | |||
116 | #include <vector> | |||
117 | ||||
118 | using namespace llvm; | |||
119 | using namespace llvm::PatternMatch; | |||
120 | ||||
121 | #define DEBUG_TYPE"arm-isel" "arm-isel" | |||
122 | ||||
123 | STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"arm-isel", "NumTailCalls" , "Number of tail calls"}; | |||
124 | STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt")static llvm::Statistic NumMovwMovt = {"arm-isel", "NumMovwMovt" , "Number of GAs materialized with movw + movt"}; | |||
125 | STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments")static llvm::Statistic NumLoopByVals = {"arm-isel", "NumLoopByVals" , "Number of loops generated for byval arguments"}; | |||
126 | STATISTIC(NumConstpoolPromoted,static llvm::Statistic NumConstpoolPromoted = {"arm-isel", "NumConstpoolPromoted" , "Number of constants with their storage promoted into constant pools" } | |||
127 | "Number of constants with their storage promoted into constant pools")static llvm::Statistic NumConstpoolPromoted = {"arm-isel", "NumConstpoolPromoted" , "Number of constants with their storage promoted into constant pools" }; | |||
128 | ||||
129 | static cl::opt<bool> | |||
130 | ARMInterworking("arm-interworking", cl::Hidden, | |||
131 | cl::desc("Enable / disable ARM interworking (for debugging only)"), | |||
132 | cl::init(true)); | |||
133 | ||||
134 | static cl::opt<bool> EnableConstpoolPromotion( | |||
135 | "arm-promote-constant", cl::Hidden, | |||
136 | cl::desc("Enable / disable promotion of unnamed_addr constants into " | |||
137 | "constant pools"), | |||
138 | cl::init(false)); // FIXME: set to true by default once PR32780 is fixed | |||
139 | static cl::opt<unsigned> ConstpoolPromotionMaxSize( | |||
140 | "arm-promote-constant-max-size", cl::Hidden, | |||
141 | cl::desc("Maximum size of constant to promote into a constant pool"), | |||
142 | cl::init(64)); | |||
143 | static cl::opt<unsigned> ConstpoolPromotionMaxTotal( | |||
144 | "arm-promote-constant-max-total", cl::Hidden, | |||
145 | cl::desc("Maximum size of ALL constants to promote into a constant pool"), | |||
146 | cl::init(128)); | |||
147 | ||||
148 | cl::opt<unsigned> | |||
149 | MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden, | |||
150 | cl::desc("Maximum interleave factor for MVE VLDn to generate."), | |||
151 | cl::init(2)); | |||
152 | ||||
153 | // The APCS parameter registers. | |||
154 | static const MCPhysReg GPRArgRegs[] = { | |||
155 | ARM::R0, ARM::R1, ARM::R2, ARM::R3 | |||
156 | }; | |||
157 | ||||
158 | void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT) { | |||
159 | if (VT != PromotedLdStVT) { | |||
160 | setOperationAction(ISD::LOAD, VT, Promote); | |||
161 | AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT); | |||
162 | ||||
163 | setOperationAction(ISD::STORE, VT, Promote); | |||
164 | AddPromotedToType (ISD::STORE, VT, PromotedLdStVT); | |||
165 | } | |||
166 | ||||
167 | MVT ElemTy = VT.getVectorElementType(); | |||
168 | if (ElemTy != MVT::f64) | |||
169 | setOperationAction(ISD::SETCC, VT, Custom); | |||
170 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
171 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
172 | if (ElemTy == MVT::i32) { | |||
173 | setOperationAction(ISD::SINT_TO_FP, VT, Custom); | |||
174 | setOperationAction(ISD::UINT_TO_FP, VT, Custom); | |||
175 | setOperationAction(ISD::FP_TO_SINT, VT, Custom); | |||
176 | setOperationAction(ISD::FP_TO_UINT, VT, Custom); | |||
177 | } else { | |||
178 | setOperationAction(ISD::SINT_TO_FP, VT, Expand); | |||
179 | setOperationAction(ISD::UINT_TO_FP, VT, Expand); | |||
180 | setOperationAction(ISD::FP_TO_SINT, VT, Expand); | |||
181 | setOperationAction(ISD::FP_TO_UINT, VT, Expand); | |||
182 | } | |||
183 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
184 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | |||
185 | setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); | |||
186 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); | |||
187 | setOperationAction(ISD::SELECT, VT, Expand); | |||
188 | setOperationAction(ISD::SELECT_CC, VT, Expand); | |||
189 | setOperationAction(ISD::VSELECT, VT, Expand); | |||
190 | setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); | |||
191 | if (VT.isInteger()) { | |||
192 | setOperationAction(ISD::SHL, VT, Custom); | |||
193 | setOperationAction(ISD::SRA, VT, Custom); | |||
194 | setOperationAction(ISD::SRL, VT, Custom); | |||
195 | } | |||
196 | ||||
197 | // Neon does not support vector divide/remainder operations. | |||
198 | setOperationAction(ISD::SDIV, VT, Expand); | |||
199 | setOperationAction(ISD::UDIV, VT, Expand); | |||
200 | setOperationAction(ISD::FDIV, VT, Expand); | |||
201 | setOperationAction(ISD::SREM, VT, Expand); | |||
202 | setOperationAction(ISD::UREM, VT, Expand); | |||
203 | setOperationAction(ISD::FREM, VT, Expand); | |||
204 | setOperationAction(ISD::SDIVREM, VT, Expand); | |||
205 | setOperationAction(ISD::UDIVREM, VT, Expand); | |||
206 | ||||
207 | if (!VT.isFloatingPoint() && | |||
208 | VT != MVT::v2i64 && VT != MVT::v1i64) | |||
209 | for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) | |||
210 | setOperationAction(Opcode, VT, Legal); | |||
211 | if (!VT.isFloatingPoint()) | |||
212 | for (auto Opcode : {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}) | |||
213 | setOperationAction(Opcode, VT, Legal); | |||
214 | } | |||
215 | ||||
216 | void ARMTargetLowering::addDRTypeForNEON(MVT VT) { | |||
217 | addRegisterClass(VT, &ARM::DPRRegClass); | |||
218 | addTypeForNEON(VT, MVT::f64); | |||
219 | } | |||
220 | ||||
221 | void ARMTargetLowering::addQRTypeForNEON(MVT VT) { | |||
222 | addRegisterClass(VT, &ARM::DPairRegClass); | |||
223 | addTypeForNEON(VT, MVT::v2f64); | |||
224 | } | |||
225 | ||||
226 | void ARMTargetLowering::setAllExpand(MVT VT) { | |||
227 | for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) | |||
228 | setOperationAction(Opc, VT, Expand); | |||
229 | ||||
230 | // We support these really simple operations even on types where all | |||
231 | // the actual arithmetic has to be broken down into simpler | |||
232 | // operations or turned into library calls. | |||
233 | setOperationAction(ISD::BITCAST, VT, Legal); | |||
234 | setOperationAction(ISD::LOAD, VT, Legal); | |||
235 | setOperationAction(ISD::STORE, VT, Legal); | |||
236 | setOperationAction(ISD::UNDEF, VT, Legal); | |||
237 | } | |||
238 | ||||
239 | void ARMTargetLowering::addAllExtLoads(const MVT From, const MVT To, | |||
240 | LegalizeAction Action) { | |||
241 | setLoadExtAction(ISD::EXTLOAD, From, To, Action); | |||
242 | setLoadExtAction(ISD::ZEXTLOAD, From, To, Action); | |||
243 | setLoadExtAction(ISD::SEXTLOAD, From, To, Action); | |||
244 | } | |||
245 | ||||
246 | void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) { | |||
247 | const MVT IntTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32 }; | |||
248 | ||||
249 | for (auto VT : IntTypes) { | |||
250 | addRegisterClass(VT, &ARM::MQPRRegClass); | |||
251 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | |||
252 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
253 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
254 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
255 | setOperationAction(ISD::SHL, VT, Custom); | |||
256 | setOperationAction(ISD::SRA, VT, Custom); | |||
257 | setOperationAction(ISD::SRL, VT, Custom); | |||
258 | setOperationAction(ISD::SMIN, VT, Legal); | |||
259 | setOperationAction(ISD::SMAX, VT, Legal); | |||
260 | setOperationAction(ISD::UMIN, VT, Legal); | |||
261 | setOperationAction(ISD::UMAX, VT, Legal); | |||
262 | setOperationAction(ISD::ABS, VT, Legal); | |||
263 | setOperationAction(ISD::SETCC, VT, Custom); | |||
264 | setOperationAction(ISD::MLOAD, VT, Custom); | |||
265 | setOperationAction(ISD::MSTORE, VT, Legal); | |||
266 | setOperationAction(ISD::CTLZ, VT, Legal); | |||
267 | setOperationAction(ISD::CTTZ, VT, Custom); | |||
268 | setOperationAction(ISD::BITREVERSE, VT, Legal); | |||
269 | setOperationAction(ISD::BSWAP, VT, Legal); | |||
270 | setOperationAction(ISD::SADDSAT, VT, Legal); | |||
271 | setOperationAction(ISD::UADDSAT, VT, Legal); | |||
272 | setOperationAction(ISD::SSUBSAT, VT, Legal); | |||
273 | setOperationAction(ISD::USUBSAT, VT, Legal); | |||
274 | setOperationAction(ISD::ABDS, VT, Legal); | |||
275 | setOperationAction(ISD::ABDU, VT, Legal); | |||
276 | setOperationAction(ISD::AVGFLOORS, VT, Legal); | |||
277 | setOperationAction(ISD::AVGFLOORU, VT, Legal); | |||
278 | setOperationAction(ISD::AVGCEILS, VT, Legal); | |||
279 | setOperationAction(ISD::AVGCEILU, VT, Legal); | |||
280 | ||||
281 | // No native support for these. | |||
282 | setOperationAction(ISD::UDIV, VT, Expand); | |||
283 | setOperationAction(ISD::SDIV, VT, Expand); | |||
284 | setOperationAction(ISD::UREM, VT, Expand); | |||
285 | setOperationAction(ISD::SREM, VT, Expand); | |||
286 | setOperationAction(ISD::UDIVREM, VT, Expand); | |||
287 | setOperationAction(ISD::SDIVREM, VT, Expand); | |||
288 | setOperationAction(ISD::CTPOP, VT, Expand); | |||
289 | setOperationAction(ISD::SELECT, VT, Expand); | |||
290 | setOperationAction(ISD::SELECT_CC, VT, Expand); | |||
291 | ||||
292 | // Vector reductions | |||
293 | setOperationAction(ISD::VECREDUCE_ADD, VT, Legal); | |||
294 | setOperationAction(ISD::VECREDUCE_SMAX, VT, Legal); | |||
295 | setOperationAction(ISD::VECREDUCE_UMAX, VT, Legal); | |||
296 | setOperationAction(ISD::VECREDUCE_SMIN, VT, Legal); | |||
297 | setOperationAction(ISD::VECREDUCE_UMIN, VT, Legal); | |||
298 | setOperationAction(ISD::VECREDUCE_MUL, VT, Custom); | |||
299 | setOperationAction(ISD::VECREDUCE_AND, VT, Custom); | |||
300 | setOperationAction(ISD::VECREDUCE_OR, VT, Custom); | |||
301 | setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); | |||
302 | ||||
303 | if (!HasMVEFP) { | |||
304 | setOperationAction(ISD::SINT_TO_FP, VT, Expand); | |||
305 | setOperationAction(ISD::UINT_TO_FP, VT, Expand); | |||
306 | setOperationAction(ISD::FP_TO_SINT, VT, Expand); | |||
307 | setOperationAction(ISD::FP_TO_UINT, VT, Expand); | |||
308 | } else { | |||
309 | setOperationAction(ISD::FP_TO_SINT_SAT, VT, Custom); | |||
310 | setOperationAction(ISD::FP_TO_UINT_SAT, VT, Custom); | |||
311 | } | |||
312 | ||||
313 | // Pre and Post inc are supported on loads and stores | |||
314 | for (unsigned im = (unsigned)ISD::PRE_INC; | |||
315 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { | |||
316 | setIndexedLoadAction(im, VT, Legal); | |||
317 | setIndexedStoreAction(im, VT, Legal); | |||
318 | setIndexedMaskedLoadAction(im, VT, Legal); | |||
319 | setIndexedMaskedStoreAction(im, VT, Legal); | |||
320 | } | |||
321 | } | |||
322 | ||||
323 | const MVT FloatTypes[] = { MVT::v8f16, MVT::v4f32 }; | |||
324 | for (auto VT : FloatTypes) { | |||
325 | addRegisterClass(VT, &ARM::MQPRRegClass); | |||
326 | if (!HasMVEFP) | |||
327 | setAllExpand(VT); | |||
328 | ||||
329 | // These are legal or custom whether we have MVE.fp or not | |||
330 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | |||
331 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
332 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getVectorElementType(), Custom); | |||
333 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
334 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
335 | setOperationAction(ISD::BUILD_VECTOR, VT.getVectorElementType(), Custom); | |||
336 | setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal); | |||
337 | setOperationAction(ISD::SETCC, VT, Custom); | |||
338 | setOperationAction(ISD::MLOAD, VT, Custom); | |||
339 | setOperationAction(ISD::MSTORE, VT, Legal); | |||
340 | setOperationAction(ISD::SELECT, VT, Expand); | |||
341 | setOperationAction(ISD::SELECT_CC, VT, Expand); | |||
342 | ||||
343 | // Pre and Post inc are supported on loads and stores | |||
344 | for (unsigned im = (unsigned)ISD::PRE_INC; | |||
345 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { | |||
346 | setIndexedLoadAction(im, VT, Legal); | |||
347 | setIndexedStoreAction(im, VT, Legal); | |||
348 | setIndexedMaskedLoadAction(im, VT, Legal); | |||
349 | setIndexedMaskedStoreAction(im, VT, Legal); | |||
350 | } | |||
351 | ||||
352 | if (HasMVEFP) { | |||
353 | setOperationAction(ISD::FMINNUM, VT, Legal); | |||
354 | setOperationAction(ISD::FMAXNUM, VT, Legal); | |||
355 | setOperationAction(ISD::FROUND, VT, Legal); | |||
356 | setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); | |||
357 | setOperationAction(ISD::VECREDUCE_FMUL, VT, Custom); | |||
358 | setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); | |||
359 | setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); | |||
360 | ||||
361 | // No native support for these. | |||
362 | setOperationAction(ISD::FDIV, VT, Expand); | |||
363 | setOperationAction(ISD::FREM, VT, Expand); | |||
364 | setOperationAction(ISD::FSQRT, VT, Expand); | |||
365 | setOperationAction(ISD::FSIN, VT, Expand); | |||
366 | setOperationAction(ISD::FCOS, VT, Expand); | |||
367 | setOperationAction(ISD::FPOW, VT, Expand); | |||
368 | setOperationAction(ISD::FLOG, VT, Expand); | |||
369 | setOperationAction(ISD::FLOG2, VT, Expand); | |||
370 | setOperationAction(ISD::FLOG10, VT, Expand); | |||
371 | setOperationAction(ISD::FEXP, VT, Expand); | |||
372 | setOperationAction(ISD::FEXP2, VT, Expand); | |||
373 | setOperationAction(ISD::FNEARBYINT, VT, Expand); | |||
374 | } | |||
375 | } | |||
376 | ||||
377 | // Custom Expand smaller than legal vector reductions to prevent false zero | |||
378 | // items being added. | |||
379 | setOperationAction(ISD::VECREDUCE_FADD, MVT::v4f16, Custom); | |||
380 | setOperationAction(ISD::VECREDUCE_FMUL, MVT::v4f16, Custom); | |||
381 | setOperationAction(ISD::VECREDUCE_FMIN, MVT::v4f16, Custom); | |||
382 | setOperationAction(ISD::VECREDUCE_FMAX, MVT::v4f16, Custom); | |||
383 | setOperationAction(ISD::VECREDUCE_FADD, MVT::v2f16, Custom); | |||
384 | setOperationAction(ISD::VECREDUCE_FMUL, MVT::v2f16, Custom); | |||
385 | setOperationAction(ISD::VECREDUCE_FMIN, MVT::v2f16, Custom); | |||
386 | setOperationAction(ISD::VECREDUCE_FMAX, MVT::v2f16, Custom); | |||
387 | ||||
388 | // We 'support' these types up to bitcast/load/store level, regardless of | |||
389 | // MVE integer-only / float support. Only doing FP data processing on the FP | |||
390 | // vector types is inhibited at integer-only level. | |||
391 | const MVT LongTypes[] = { MVT::v2i64, MVT::v2f64 }; | |||
392 | for (auto VT : LongTypes) { | |||
393 | addRegisterClass(VT, &ARM::MQPRRegClass); | |||
394 | setAllExpand(VT); | |||
395 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
396 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
397 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
398 | setOperationAction(ISD::VSELECT, VT, Legal); | |||
399 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | |||
400 | } | |||
401 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); | |||
402 | ||||
403 | // We can do bitwise operations on v2i64 vectors | |||
404 | setOperationAction(ISD::AND, MVT::v2i64, Legal); | |||
405 | setOperationAction(ISD::OR, MVT::v2i64, Legal); | |||
406 | setOperationAction(ISD::XOR, MVT::v2i64, Legal); | |||
407 | ||||
408 | // It is legal to extload from v4i8 to v4i16 or v4i32. | |||
409 | addAllExtLoads(MVT::v8i16, MVT::v8i8, Legal); | |||
410 | addAllExtLoads(MVT::v4i32, MVT::v4i16, Legal); | |||
411 | addAllExtLoads(MVT::v4i32, MVT::v4i8, Legal); | |||
412 | ||||
413 | // It is legal to sign extend from v4i8/v4i16 to v4i32 or v8i8 to v8i16. | |||
414 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal); | |||
415 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal); | |||
416 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal); | |||
417 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i8, Legal); | |||
418 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i16, Legal); | |||
419 | ||||
420 | // Some truncating stores are legal too. | |||
421 | setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal); | |||
422 | setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal); | |||
423 | setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal); | |||
424 | ||||
425 | // Pre and Post inc on these are legal, given the correct extends | |||
426 | for (unsigned im = (unsigned)ISD::PRE_INC; | |||
427 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { | |||
428 | for (auto VT : {MVT::v8i8, MVT::v4i8, MVT::v4i16}) { | |||
429 | setIndexedLoadAction(im, VT, Legal); | |||
430 | setIndexedStoreAction(im, VT, Legal); | |||
431 | setIndexedMaskedLoadAction(im, VT, Legal); | |||
432 | setIndexedMaskedStoreAction(im, VT, Legal); | |||
433 | } | |||
434 | } | |||
435 | ||||
436 | // Predicate types | |||
437 | const MVT pTypes[] = {MVT::v16i1, MVT::v8i1, MVT::v4i1, MVT::v2i1}; | |||
438 | for (auto VT : pTypes) { | |||
439 | addRegisterClass(VT, &ARM::VCCRRegClass); | |||
440 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
441 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | |||
442 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); | |||
443 | setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); | |||
444 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
445 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
446 | setOperationAction(ISD::SETCC, VT, Custom); | |||
447 | setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); | |||
448 | setOperationAction(ISD::LOAD, VT, Custom); | |||
449 | setOperationAction(ISD::STORE, VT, Custom); | |||
450 | setOperationAction(ISD::TRUNCATE, VT, Custom); | |||
451 | setOperationAction(ISD::VSELECT, VT, Expand); | |||
452 | setOperationAction(ISD::SELECT, VT, Expand); | |||
453 | } | |||
454 | setOperationAction(ISD::SETCC, MVT::v2i1, Expand); | |||
455 | setOperationAction(ISD::TRUNCATE, MVT::v2i1, Expand); | |||
456 | setOperationAction(ISD::AND, MVT::v2i1, Expand); | |||
457 | setOperationAction(ISD::OR, MVT::v2i1, Expand); | |||
458 | setOperationAction(ISD::XOR, MVT::v2i1, Expand); | |||
459 | setOperationAction(ISD::SINT_TO_FP, MVT::v2i1, Expand); | |||
460 | setOperationAction(ISD::UINT_TO_FP, MVT::v2i1, Expand); | |||
461 | setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Expand); | |||
462 | setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Expand); | |||
463 | ||||
464 | setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom); | |||
465 | setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom); | |||
466 | setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom); | |||
467 | setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom); | |||
468 | setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom); | |||
469 | setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom); | |||
470 | setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom); | |||
471 | setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom); | |||
472 | } | |||
473 | ||||
474 | ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, | |||
475 | const ARMSubtarget &STI) | |||
476 | : TargetLowering(TM), Subtarget(&STI) { | |||
477 | RegInfo = Subtarget->getRegisterInfo(); | |||
478 | Itins = Subtarget->getInstrItineraryData(); | |||
479 | ||||
480 | setBooleanContents(ZeroOrOneBooleanContent); | |||
481 | setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); | |||
482 | ||||
483 | if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetIOS() && | |||
484 | !Subtarget->isTargetWatchOS() && !Subtarget->isTargetDriverKit()) { | |||
485 | bool IsHFTarget = TM.Options.FloatABIType == FloatABI::Hard; | |||
486 | for (int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID) | |||
487 | setLibcallCallingConv(static_cast<RTLIB::Libcall>(LCID), | |||
488 | IsHFTarget ? CallingConv::ARM_AAPCS_VFP | |||
489 | : CallingConv::ARM_AAPCS); | |||
490 | } | |||
491 | ||||
492 | if (Subtarget->isTargetMachO()) { | |||
493 | // Uses VFP for Thumb libfuncs if available. | |||
494 | if (Subtarget->isThumb() && Subtarget->hasVFP2Base() && | |||
495 | Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) { | |||
496 | static const struct { | |||
497 | const RTLIB::Libcall Op; | |||
498 | const char * const Name; | |||
499 | const ISD::CondCode Cond; | |||
500 | } LibraryCalls[] = { | |||
501 | // Single-precision floating-point arithmetic. | |||
502 | { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID }, | |||
503 | { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID }, | |||
504 | { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID }, | |||
505 | { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID }, | |||
506 | ||||
507 | // Double-precision floating-point arithmetic. | |||
508 | { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID }, | |||
509 | { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID }, | |||
510 | { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID }, | |||
511 | { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID }, | |||
512 | ||||
513 | // Single-precision comparisons. | |||
514 | { RTLIB::OEQ_F32, "__eqsf2vfp", ISD::SETNE }, | |||
515 | { RTLIB::UNE_F32, "__nesf2vfp", ISD::SETNE }, | |||
516 | { RTLIB::OLT_F32, "__ltsf2vfp", ISD::SETNE }, | |||
517 | { RTLIB::OLE_F32, "__lesf2vfp", ISD::SETNE }, | |||
518 | { RTLIB::OGE_F32, "__gesf2vfp", ISD::SETNE }, | |||
519 | { RTLIB::OGT_F32, "__gtsf2vfp", ISD::SETNE }, | |||
520 | { RTLIB::UO_F32, "__unordsf2vfp", ISD::SETNE }, | |||
521 | ||||
522 | // Double-precision comparisons. | |||
523 | { RTLIB::OEQ_F64, "__eqdf2vfp", ISD::SETNE }, | |||
524 | { RTLIB::UNE_F64, "__nedf2vfp", ISD::SETNE }, | |||
525 | { RTLIB::OLT_F64, "__ltdf2vfp", ISD::SETNE }, | |||
526 | { RTLIB::OLE_F64, "__ledf2vfp", ISD::SETNE }, | |||
527 | { RTLIB::OGE_F64, "__gedf2vfp", ISD::SETNE }, | |||
528 | { RTLIB::OGT_F64, "__gtdf2vfp", ISD::SETNE }, | |||
529 | { RTLIB::UO_F64, "__unorddf2vfp", ISD::SETNE }, | |||
530 | ||||
531 | // Floating-point to integer conversions. | |||
532 | // i64 conversions are done via library routines even when generating VFP | |||
533 | // instructions, so use the same ones. | |||
534 | { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp", ISD::SETCC_INVALID }, | |||
535 | { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID }, | |||
536 | { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp", ISD::SETCC_INVALID }, | |||
537 | { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID }, | |||
538 | ||||
539 | // Conversions between floating types. | |||
540 | { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp", ISD::SETCC_INVALID }, | |||
541 | { RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp", ISD::SETCC_INVALID }, | |||
542 | ||||
543 | // Integer to floating-point conversions. | |||
544 | // i64 conversions are done via library routines even when generating VFP | |||
545 | // instructions, so use the same ones. | |||
546 | // FIXME: There appears to be some naming inconsistency in ARM libgcc: | |||
547 | // e.g., __floatunsidf vs. __floatunssidfvfp. | |||
548 | { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp", ISD::SETCC_INVALID }, | |||
549 | { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID }, | |||
550 | { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp", ISD::SETCC_INVALID }, | |||
551 | { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID }, | |||
552 | }; | |||
553 | ||||
554 | for (const auto &LC : LibraryCalls) { | |||
555 | setLibcallName(LC.Op, LC.Name); | |||
556 | if (LC.Cond != ISD::SETCC_INVALID) | |||
557 | setCmpLibcallCC(LC.Op, LC.Cond); | |||
558 | } | |||
559 | } | |||
560 | } | |||
561 | ||||
562 | // These libcalls are not available in 32-bit. | |||
563 | setLibcallName(RTLIB::SHL_I128, nullptr); | |||
564 | setLibcallName(RTLIB::SRL_I128, nullptr); | |||
565 | setLibcallName(RTLIB::SRA_I128, nullptr); | |||
566 | setLibcallName(RTLIB::MUL_I128, nullptr); | |||
567 | setLibcallName(RTLIB::MULO_I64, nullptr); | |||
568 | setLibcallName(RTLIB::MULO_I128, nullptr); | |||
569 | ||||
570 | // RTLIB | |||
571 | if (Subtarget->isAAPCS_ABI() && | |||
572 | (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() || | |||
573 | Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) { | |||
574 | static const struct { | |||
575 | const RTLIB::Libcall Op; | |||
576 | const char * const Name; | |||
577 | const CallingConv::ID CC; | |||
578 | const ISD::CondCode Cond; | |||
579 | } LibraryCalls[] = { | |||
580 | // Double-precision floating-point arithmetic helper functions | |||
581 | // RTABI chapter 4.1.2, Table 2 | |||
582 | { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
583 | { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
584 | { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
585 | { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
586 | ||||
587 | // Double-precision floating-point comparison helper functions | |||
588 | // RTABI chapter 4.1.2, Table 3 | |||
589 | { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, | |||
590 | { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, | |||
591 | { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, | |||
592 | { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, | |||
593 | { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, | |||
594 | { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, | |||
595 | { RTLIB::UO_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, | |||
596 | ||||
597 | // Single-precision floating-point arithmetic helper functions | |||
598 | // RTABI chapter 4.1.2, Table 4 | |||
599 | { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
600 | { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
601 | { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
602 | { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
603 | ||||
604 | // Single-precision floating-point comparison helper functions | |||
605 | // RTABI chapter 4.1.2, Table 5 | |||
606 | { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, | |||
607 | { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, | |||
608 | { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, | |||
609 | { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, | |||
610 | { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, | |||
611 | { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, | |||
612 | { RTLIB::UO_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, | |||
613 | ||||
614 | // Floating-point to integer conversions. | |||
615 | // RTABI chapter 4.1.2, Table 6 | |||
616 | { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
617 | { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
618 | { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
619 | { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
620 | { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
621 | { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
622 | { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
623 | { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
624 | ||||
625 | // Conversions between floating types. | |||
626 | // RTABI chapter 4.1.2, Table 7 | |||
627 | { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
628 | { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
629 | { RTLIB::FPEXT_F32_F64, "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
630 | ||||
631 | // Integer to floating-point conversions. | |||
632 | // RTABI chapter 4.1.2, Table 8 | |||
633 | { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
634 | { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
635 | { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
636 | { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
637 | { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
638 | { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
639 | { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
640 | { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
641 | ||||
642 | // Long long helper functions | |||
643 | // RTABI chapter 4.2, Table 9 | |||
644 | { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
645 | { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
646 | { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
647 | { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
648 | ||||
649 | // Integer division functions | |||
650 | // RTABI chapter 4.3.1 | |||
651 | { RTLIB::SDIV_I8, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
652 | { RTLIB::SDIV_I16, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
653 | { RTLIB::SDIV_I32, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
654 | { RTLIB::SDIV_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
655 | { RTLIB::UDIV_I8, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
656 | { RTLIB::UDIV_I16, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
657 | { RTLIB::UDIV_I32, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
658 | { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
659 | }; | |||
660 | ||||
661 | for (const auto &LC : LibraryCalls) { | |||
662 | setLibcallName(LC.Op, LC.Name); | |||
663 | setLibcallCallingConv(LC.Op, LC.CC); | |||
664 | if (LC.Cond != ISD::SETCC_INVALID) | |||
665 | setCmpLibcallCC(LC.Op, LC.Cond); | |||
666 | } | |||
667 | ||||
668 | // EABI dependent RTLIB | |||
669 | if (TM.Options.EABIVersion == EABI::EABI4 || | |||
670 | TM.Options.EABIVersion == EABI::EABI5) { | |||
671 | static const struct { | |||
672 | const RTLIB::Libcall Op; | |||
673 | const char *const Name; | |||
674 | const CallingConv::ID CC; | |||
675 | const ISD::CondCode Cond; | |||
676 | } MemOpsLibraryCalls[] = { | |||
677 | // Memory operations | |||
678 | // RTABI chapter 4.3.4 | |||
679 | { RTLIB::MEMCPY, "__aeabi_memcpy", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
680 | { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
681 | { RTLIB::MEMSET, "__aeabi_memset", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, | |||
682 | }; | |||
683 | ||||
684 | for (const auto &LC : MemOpsLibraryCalls) { | |||
685 | setLibcallName(LC.Op, LC.Name); | |||
686 | setLibcallCallingConv(LC.Op, LC.CC); | |||
687 | if (LC.Cond != ISD::SETCC_INVALID) | |||
688 | setCmpLibcallCC(LC.Op, LC.Cond); | |||
689 | } | |||
690 | } | |||
691 | } | |||
692 | ||||
693 | if (Subtarget->isTargetWindows()) { | |||
694 | static const struct { | |||
695 | const RTLIB::Libcall Op; | |||
696 | const char * const Name; | |||
697 | const CallingConv::ID CC; | |||
698 | } LibraryCalls[] = { | |||
699 | { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP }, | |||
700 | { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP }, | |||
701 | { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP }, | |||
702 | { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP }, | |||
703 | { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP }, | |||
704 | { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP }, | |||
705 | { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP }, | |||
706 | { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP }, | |||
707 | }; | |||
708 | ||||
709 | for (const auto &LC : LibraryCalls) { | |||
710 | setLibcallName(LC.Op, LC.Name); | |||
711 | setLibcallCallingConv(LC.Op, LC.CC); | |||
712 | } | |||
713 | } | |||
714 | ||||
715 | // Use divmod compiler-rt calls for iOS 5.0 and later. | |||
716 | if (Subtarget->isTargetMachO() && | |||
717 | !(Subtarget->isTargetIOS() && | |||
718 | Subtarget->getTargetTriple().isOSVersionLT(5, 0))) { | |||
719 | setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); | |||
720 | setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); | |||
721 | } | |||
722 | ||||
723 | // The half <-> float conversion functions are always soft-float on | |||
724 | // non-watchos platforms, but are needed for some targets which use a | |||
725 | // hard-float calling convention by default. | |||
726 | if (!Subtarget->isTargetWatchABI()) { | |||
727 | if (Subtarget->isAAPCS_ABI()) { | |||
728 | setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS); | |||
729 | setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS); | |||
730 | setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS); | |||
731 | } else { | |||
732 | setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS); | |||
733 | setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS); | |||
734 | setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS); | |||
735 | } | |||
736 | } | |||
737 | ||||
738 | // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have | |||
739 | // a __gnu_ prefix (which is the default). | |||
740 | if (Subtarget->isTargetAEABI()) { | |||
741 | static const struct { | |||
742 | const RTLIB::Libcall Op; | |||
743 | const char * const Name; | |||
744 | const CallingConv::ID CC; | |||
745 | } LibraryCalls[] = { | |||
746 | { RTLIB::FPROUND_F32_F16, "__aeabi_f2h", CallingConv::ARM_AAPCS }, | |||
747 | { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS }, | |||
748 | { RTLIB::FPEXT_F16_F32, "__aeabi_h2f", CallingConv::ARM_AAPCS }, | |||
749 | }; | |||
750 | ||||
751 | for (const auto &LC : LibraryCalls) { | |||
752 | setLibcallName(LC.Op, LC.Name); | |||
753 | setLibcallCallingConv(LC.Op, LC.CC); | |||
754 | } | |||
755 | } | |||
756 | ||||
757 | if (Subtarget->isThumb1Only()) | |||
758 | addRegisterClass(MVT::i32, &ARM::tGPRRegClass); | |||
759 | else | |||
760 | addRegisterClass(MVT::i32, &ARM::GPRRegClass); | |||
761 | ||||
762 | if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only() && | |||
763 | Subtarget->hasFPRegs()) { | |||
764 | addRegisterClass(MVT::f32, &ARM::SPRRegClass); | |||
765 | addRegisterClass(MVT::f64, &ARM::DPRRegClass); | |||
766 | ||||
767 | setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i32, Custom); | |||
768 | setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i32, Custom); | |||
769 | setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom); | |||
770 | setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom); | |||
771 | ||||
772 | if (!Subtarget->hasVFP2Base()) | |||
773 | setAllExpand(MVT::f32); | |||
774 | if (!Subtarget->hasFP64()) | |||
775 | setAllExpand(MVT::f64); | |||
776 | } | |||
777 | ||||
778 | if (Subtarget->hasFullFP16()) { | |||
779 | addRegisterClass(MVT::f16, &ARM::HPRRegClass); | |||
780 | setOperationAction(ISD::BITCAST, MVT::i16, Custom); | |||
781 | setOperationAction(ISD::BITCAST, MVT::f16, Custom); | |||
782 | ||||
783 | setOperationAction(ISD::FMINNUM, MVT::f16, Legal); | |||
784 | setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); | |||
785 | } | |||
786 | ||||
787 | if (Subtarget->hasBF16()) { | |||
788 | addRegisterClass(MVT::bf16, &ARM::HPRRegClass); | |||
789 | setAllExpand(MVT::bf16); | |||
790 | if (!Subtarget->hasFullFP16()) | |||
791 | setOperationAction(ISD::BITCAST, MVT::bf16, Custom); | |||
792 | } | |||
793 | ||||
794 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { | |||
795 | for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { | |||
796 | setTruncStoreAction(VT, InnerVT, Expand); | |||
797 | addAllExtLoads(VT, InnerVT, Expand); | |||
798 | } | |||
799 | ||||
800 | setOperationAction(ISD::SMUL_LOHI, VT, Expand); | |||
801 | setOperationAction(ISD::UMUL_LOHI, VT, Expand); | |||
802 | ||||
803 | setOperationAction(ISD::BSWAP, VT, Expand); | |||
804 | } | |||
805 | ||||
806 | setOperationAction(ISD::ConstantFP, MVT::f32, Custom); | |||
807 | setOperationAction(ISD::ConstantFP, MVT::f64, Custom); | |||
808 | ||||
809 | setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom); | |||
810 | setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom); | |||
811 | ||||
812 | if (Subtarget->hasMVEIntegerOps()) | |||
813 | addMVEVectorTypes(Subtarget->hasMVEFloatOps()); | |||
814 | ||||
815 | // Combine low-overhead loop intrinsics so that we can lower i1 types. | |||
816 | if (Subtarget->hasLOB()) { | |||
817 | setTargetDAGCombine({ISD::BRCOND, ISD::BR_CC}); | |||
818 | } | |||
819 | ||||
820 | if (Subtarget->hasNEON()) { | |||
821 | addDRTypeForNEON(MVT::v2f32); | |||
822 | addDRTypeForNEON(MVT::v8i8); | |||
823 | addDRTypeForNEON(MVT::v4i16); | |||
824 | addDRTypeForNEON(MVT::v2i32); | |||
825 | addDRTypeForNEON(MVT::v1i64); | |||
826 | ||||
827 | addQRTypeForNEON(MVT::v4f32); | |||
828 | addQRTypeForNEON(MVT::v2f64); | |||
829 | addQRTypeForNEON(MVT::v16i8); | |||
830 | addQRTypeForNEON(MVT::v8i16); | |||
831 | addQRTypeForNEON(MVT::v4i32); | |||
832 | addQRTypeForNEON(MVT::v2i64); | |||
833 | ||||
834 | if (Subtarget->hasFullFP16()) { | |||
835 | addQRTypeForNEON(MVT::v8f16); | |||
836 | addDRTypeForNEON(MVT::v4f16); | |||
837 | } | |||
838 | ||||
839 | if (Subtarget->hasBF16()) { | |||
840 | addQRTypeForNEON(MVT::v8bf16); | |||
841 | addDRTypeForNEON(MVT::v4bf16); | |||
842 | } | |||
843 | } | |||
844 | ||||
845 | if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) { | |||
846 | // v2f64 is legal so that QR subregs can be extracted as f64 elements, but | |||
847 | // none of Neon, MVE or VFP supports any arithmetic operations on it. | |||
848 | setOperationAction(ISD::FADD, MVT::v2f64, Expand); | |||
849 | setOperationAction(ISD::FSUB, MVT::v2f64, Expand); | |||
850 | setOperationAction(ISD::FMUL, MVT::v2f64, Expand); | |||
851 | // FIXME: Code duplication: FDIV and FREM are expanded always, see | |||
852 | // ARMTargetLowering::addTypeForNEON method for details. | |||
853 | setOperationAction(ISD::FDIV, MVT::v2f64, Expand); | |||
854 | setOperationAction(ISD::FREM, MVT::v2f64, Expand); | |||
855 | // FIXME: Create unittest. | |||
856 | // In another words, find a way when "copysign" appears in DAG with vector | |||
857 | // operands. | |||
858 | setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); | |||
859 | // FIXME: Code duplication: SETCC has custom operation action, see | |||
860 | // ARMTargetLowering::addTypeForNEON method for details. | |||
861 | setOperationAction(ISD::SETCC, MVT::v2f64, Expand); | |||
862 | // FIXME: Create unittest for FNEG and for FABS. | |||
863 | setOperationAction(ISD::FNEG, MVT::v2f64, Expand); | |||
864 | setOperationAction(ISD::FABS, MVT::v2f64, Expand); | |||
865 | setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); | |||
866 | setOperationAction(ISD::FSIN, MVT::v2f64, Expand); | |||
867 | setOperationAction(ISD::FCOS, MVT::v2f64, Expand); | |||
868 | setOperationAction(ISD::FPOW, MVT::v2f64, Expand); | |||
869 | setOperationAction(ISD::FLOG, MVT::v2f64, Expand); | |||
870 | setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); | |||
871 | setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); | |||
872 | setOperationAction(ISD::FEXP, MVT::v2f64, Expand); | |||
873 | setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); | |||
874 | // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR. | |||
875 | setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); | |||
876 | setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); | |||
877 | setOperationAction(ISD::FRINT, MVT::v2f64, Expand); | |||
878 | setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); | |||
879 | setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); | |||
880 | setOperationAction(ISD::FMA, MVT::v2f64, Expand); | |||
881 | } | |||
882 | ||||
883 | if (Subtarget->hasNEON()) { | |||
884 | // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively | |||
885 | // supported for v4f32. | |||
886 | setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); | |||
887 | setOperationAction(ISD::FSIN, MVT::v4f32, Expand); | |||
888 | setOperationAction(ISD::FCOS, MVT::v4f32, Expand); | |||
889 | setOperationAction(ISD::FPOW, MVT::v4f32, Expand); | |||
890 | setOperationAction(ISD::FLOG, MVT::v4f32, Expand); | |||
891 | setOperationAction(ISD::FLOG2, MVT::v4f32, Expand); | |||
892 | setOperationAction(ISD::FLOG10, MVT::v4f32, Expand); | |||
893 | setOperationAction(ISD::FEXP, MVT::v4f32, Expand); | |||
894 | setOperationAction(ISD::FEXP2, MVT::v4f32, Expand); | |||
895 | setOperationAction(ISD::FCEIL, MVT::v4f32, Expand); | |||
896 | setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand); | |||
897 | setOperationAction(ISD::FRINT, MVT::v4f32, Expand); | |||
898 | setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); | |||
899 | setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand); | |||
900 | ||||
901 | // Mark v2f32 intrinsics. | |||
902 | setOperationAction(ISD::FSQRT, MVT::v2f32, Expand); | |||
903 | setOperationAction(ISD::FSIN, MVT::v2f32, Expand); | |||
904 | setOperationAction(ISD::FCOS, MVT::v2f32, Expand); | |||
905 | setOperationAction(ISD::FPOW, MVT::v2f32, Expand); | |||
906 | setOperationAction(ISD::FLOG, MVT::v2f32, Expand); | |||
907 | setOperationAction(ISD::FLOG2, MVT::v2f32, Expand); | |||
908 | setOperationAction(ISD::FLOG10, MVT::v2f32, Expand); | |||
909 | setOperationAction(ISD::FEXP, MVT::v2f32, Expand); | |||
910 | setOperationAction(ISD::FEXP2, MVT::v2f32, Expand); | |||
911 | setOperationAction(ISD::FCEIL, MVT::v2f32, Expand); | |||
912 | setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand); | |||
913 | setOperationAction(ISD::FRINT, MVT::v2f32, Expand); | |||
914 | setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand); | |||
915 | setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand); | |||
916 | ||||
917 | // Neon does not support some operations on v1i64 and v2i64 types. | |||
918 | setOperationAction(ISD::MUL, MVT::v1i64, Expand); | |||
919 | // Custom handling for some quad-vector types to detect VMULL. | |||
920 | setOperationAction(ISD::MUL, MVT::v8i16, Custom); | |||
921 | setOperationAction(ISD::MUL, MVT::v4i32, Custom); | |||
922 | setOperationAction(ISD::MUL, MVT::v2i64, Custom); | |||
923 | // Custom handling for some vector types to avoid expensive expansions | |||
924 | setOperationAction(ISD::SDIV, MVT::v4i16, Custom); | |||
925 | setOperationAction(ISD::SDIV, MVT::v8i8, Custom); | |||
926 | setOperationAction(ISD::UDIV, MVT::v4i16, Custom); | |||
927 | setOperationAction(ISD::UDIV, MVT::v8i8, Custom); | |||
928 | // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with | |||
929 | // a destination type that is wider than the source, and nor does | |||
930 | // it have a FP_TO_[SU]INT instruction with a narrower destination than | |||
931 | // source. | |||
932 | setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); | |||
933 | setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom); | |||
934 | setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); | |||
935 | setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom); | |||
936 | setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); | |||
937 | setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom); | |||
938 | setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); | |||
939 | setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); | |||
940 | ||||
941 | setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); | |||
942 | setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); | |||
943 | ||||
944 | // NEON does not have single instruction CTPOP for vectors with element | |||
945 | // types wider than 8-bits. However, custom lowering can leverage the | |||
946 | // v8i8/v16i8 vcnt instruction. | |||
947 | setOperationAction(ISD::CTPOP, MVT::v2i32, Custom); | |||
948 | setOperationAction(ISD::CTPOP, MVT::v4i32, Custom); | |||
949 | setOperationAction(ISD::CTPOP, MVT::v4i16, Custom); | |||
950 | setOperationAction(ISD::CTPOP, MVT::v8i16, Custom); | |||
951 | setOperationAction(ISD::CTPOP, MVT::v1i64, Custom); | |||
952 | setOperationAction(ISD::CTPOP, MVT::v2i64, Custom); | |||
953 | ||||
954 | setOperationAction(ISD::CTLZ, MVT::v1i64, Expand); | |||
955 | setOperationAction(ISD::CTLZ, MVT::v2i64, Expand); | |||
956 | ||||
957 | // NEON does not have single instruction CTTZ for vectors. | |||
958 | setOperationAction(ISD::CTTZ, MVT::v8i8, Custom); | |||
959 | setOperationAction(ISD::CTTZ, MVT::v4i16, Custom); | |||
960 | setOperationAction(ISD::CTTZ, MVT::v2i32, Custom); | |||
961 | setOperationAction(ISD::CTTZ, MVT::v1i64, Custom); | |||
962 | ||||
963 | setOperationAction(ISD::CTTZ, MVT::v16i8, Custom); | |||
964 | setOperationAction(ISD::CTTZ, MVT::v8i16, Custom); | |||
965 | setOperationAction(ISD::CTTZ, MVT::v4i32, Custom); | |||
966 | setOperationAction(ISD::CTTZ, MVT::v2i64, Custom); | |||
967 | ||||
968 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom); | |||
969 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom); | |||
970 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom); | |||
971 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom); | |||
972 | ||||
973 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom); | |||
974 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom); | |||
975 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom); | |||
976 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom); | |||
977 | ||||
978 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { | |||
979 | setOperationAction(ISD::MULHS, VT, Expand); | |||
980 | setOperationAction(ISD::MULHU, VT, Expand); | |||
981 | } | |||
982 | ||||
983 | // NEON only has FMA instructions as of VFP4. | |||
984 | if (!Subtarget->hasVFP4Base()) { | |||
985 | setOperationAction(ISD::FMA, MVT::v2f32, Expand); | |||
986 | setOperationAction(ISD::FMA, MVT::v4f32, Expand); | |||
987 | } | |||
988 | ||||
989 | setTargetDAGCombine({ISD::SHL, ISD::SRL, ISD::SRA, ISD::FP_TO_SINT, | |||
990 | ISD::FP_TO_UINT, ISD::FDIV, ISD::LOAD}); | |||
991 | ||||
992 | // It is legal to extload from v4i8 to v4i16 or v4i32. | |||
993 | for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, | |||
994 | MVT::v2i32}) { | |||
995 | for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { | |||
996 | setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal); | |||
997 | setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal); | |||
998 | setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal); | |||
999 | } | |||
1000 | } | |||
1001 | } | |||
1002 | ||||
1003 | if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) { | |||
1004 | setTargetDAGCombine( | |||
1005 | {ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE, ISD::INSERT_SUBVECTOR, | |||
1006 | ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, | |||
1007 | ISD::SIGN_EXTEND_INREG, ISD::STORE, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, | |||
1008 | ISD::ANY_EXTEND, ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN, | |||
1009 | ISD::INTRINSIC_VOID, ISD::VECREDUCE_ADD, ISD::ADD, ISD::BITCAST}); | |||
1010 | } | |||
1011 | if (Subtarget->hasMVEIntegerOps()) { | |||
1012 | setTargetDAGCombine({ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX, | |||
1013 | ISD::FP_EXTEND, ISD::SELECT, ISD::SELECT_CC, | |||
1014 | ISD::SETCC}); | |||
1015 | } | |||
1016 | if (Subtarget->hasMVEFloatOps()) { | |||
1017 | setTargetDAGCombine(ISD::FADD); | |||
1018 | } | |||
1019 | ||||
1020 | if (!Subtarget->hasFP64()) { | |||
1021 | // When targeting a floating-point unit with only single-precision | |||
1022 | // operations, f64 is legal for the few double-precision instructions which | |||
1023 | // are present However, no double-precision operations other than moves, | |||
1024 | // loads and stores are provided by the hardware. | |||
1025 | setOperationAction(ISD::FADD, MVT::f64, Expand); | |||
1026 | setOperationAction(ISD::FSUB, MVT::f64, Expand); | |||
1027 | setOperationAction(ISD::FMUL, MVT::f64, Expand); | |||
1028 | setOperationAction(ISD::FMA, MVT::f64, Expand); | |||
1029 | setOperationAction(ISD::FDIV, MVT::f64, Expand); | |||
1030 | setOperationAction(ISD::FREM, MVT::f64, Expand); | |||
1031 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); | |||
1032 | setOperationAction(ISD::FGETSIGN, MVT::f64, Expand); | |||
1033 | setOperationAction(ISD::FNEG, MVT::f64, Expand); | |||
1034 | setOperationAction(ISD::FABS, MVT::f64, Expand); | |||
1035 | setOperationAction(ISD::FSQRT, MVT::f64, Expand); | |||
1036 | setOperationAction(ISD::FSIN, MVT::f64, Expand); | |||
1037 | setOperationAction(ISD::FCOS, MVT::f64, Expand); | |||
1038 | setOperationAction(ISD::FPOW, MVT::f64, Expand); | |||
1039 | setOperationAction(ISD::FLOG, MVT::f64, Expand); | |||
1040 | setOperationAction(ISD::FLOG2, MVT::f64, Expand); | |||
1041 | setOperationAction(ISD::FLOG10, MVT::f64, Expand); | |||
1042 | setOperationAction(ISD::FEXP, MVT::f64, Expand); | |||
1043 | setOperationAction(ISD::FEXP2, MVT::f64, Expand); | |||
1044 | setOperationAction(ISD::FCEIL, MVT::f64, Expand); | |||
1045 | setOperationAction(ISD::FTRUNC, MVT::f64, Expand); | |||
1046 | setOperationAction(ISD::FRINT, MVT::f64, Expand); | |||
1047 | setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand); | |||
1048 | setOperationAction(ISD::FFLOOR, MVT::f64, Expand); | |||
1049 | setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); | |||
1050 | setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); | |||
1051 | setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); | |||
1052 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); | |||
1053 | setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom); | |||
1054 | setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom); | |||
1055 | setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); | |||
1056 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); | |||
1057 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); | |||
1058 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::f64, Custom); | |||
1059 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::f64, Custom); | |||
1060 | setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom); | |||
1061 | } | |||
1062 | ||||
1063 | if (!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) { | |||
1064 | setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom); | |||
1065 | setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom); | |||
1066 | if (Subtarget->hasFullFP16()) { | |||
1067 | setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); | |||
1068 | setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom); | |||
1069 | } | |||
1070 | } | |||
1071 | ||||
1072 | if (!Subtarget->hasFP16()) { | |||
1073 | setOperationAction(ISD::FP_EXTEND, MVT::f32, Custom); | |||
1074 | setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom); | |||
1075 | } | |||
1076 | ||||
1077 | computeRegisterProperties(Subtarget->getRegisterInfo()); | |||
1078 | ||||
1079 | // ARM does not have floating-point extending loads. | |||
1080 | for (MVT VT : MVT::fp_valuetypes()) { | |||
1081 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); | |||
1082 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); | |||
1083 | } | |||
1084 | ||||
1085 | // ... or truncating stores | |||
1086 | setTruncStoreAction(MVT::f64, MVT::f32, Expand); | |||
1087 | setTruncStoreAction(MVT::f32, MVT::f16, Expand); | |||
1088 | setTruncStoreAction(MVT::f64, MVT::f16, Expand); | |||
1089 | ||||
1090 | // ARM does not have i1 sign extending load. | |||
1091 | for (MVT VT : MVT::integer_valuetypes()) | |||
1092 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); | |||
1093 | ||||
1094 | // ARM supports all 4 flavors of integer indexed load / store. | |||
1095 | if (!Subtarget->isThumb1Only()) { | |||
1096 | for (unsigned im = (unsigned)ISD::PRE_INC; | |||
1097 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { | |||
1098 | setIndexedLoadAction(im, MVT::i1, Legal); | |||
1099 | setIndexedLoadAction(im, MVT::i8, Legal); | |||
1100 | setIndexedLoadAction(im, MVT::i16, Legal); | |||
1101 | setIndexedLoadAction(im, MVT::i32, Legal); | |||
1102 | setIndexedStoreAction(im, MVT::i1, Legal); | |||
1103 | setIndexedStoreAction(im, MVT::i8, Legal); | |||
1104 | setIndexedStoreAction(im, MVT::i16, Legal); | |||
1105 | setIndexedStoreAction(im, MVT::i32, Legal); | |||
1106 | } | |||
1107 | } else { | |||
1108 | // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}. | |||
1109 | setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal); | |||
1110 | setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal); | |||
1111 | } | |||
1112 | ||||
1113 | setOperationAction(ISD::SADDO, MVT::i32, Custom); | |||
1114 | setOperationAction(ISD::UADDO, MVT::i32, Custom); | |||
1115 | setOperationAction(ISD::SSUBO, MVT::i32, Custom); | |||
1116 | setOperationAction(ISD::USUBO, MVT::i32, Custom); | |||
1117 | ||||
1118 | setOperationAction(ISD::ADDCARRY, MVT::i32, Custom); | |||
1119 | setOperationAction(ISD::SUBCARRY, MVT::i32, Custom); | |||
1120 | if (Subtarget->hasDSP()) { | |||
1121 | setOperationAction(ISD::SADDSAT, MVT::i8, Custom); | |||
1122 | setOperationAction(ISD::SSUBSAT, MVT::i8, Custom); | |||
1123 | setOperationAction(ISD::SADDSAT, MVT::i16, Custom); | |||
1124 | setOperationAction(ISD::SSUBSAT, MVT::i16, Custom); | |||
1125 | setOperationAction(ISD::UADDSAT, MVT::i8, Custom); | |||
1126 | setOperationAction(ISD::USUBSAT, MVT::i8, Custom); | |||
1127 | setOperationAction(ISD::UADDSAT, MVT::i16, Custom); | |||
1128 | setOperationAction(ISD::USUBSAT, MVT::i16, Custom); | |||
1129 | } | |||
1130 | if (Subtarget->hasBaseDSP()) { | |||
1131 | setOperationAction(ISD::SADDSAT, MVT::i32, Legal); | |||
1132 | setOperationAction(ISD::SSUBSAT, MVT::i32, Legal); | |||
1133 | } | |||
1134 | ||||
1135 | // i64 operation support. | |||
1136 | setOperationAction(ISD::MUL, MVT::i64, Expand); | |||
1137 | setOperationAction(ISD::MULHU, MVT::i32, Expand); | |||
1138 | if (Subtarget->isThumb1Only()) { | |||
1139 | setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); | |||
1140 | setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); | |||
1141 | } | |||
1142 | if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() | |||
1143 | || (Subtarget->isThumb2() && !Subtarget->hasDSP())) | |||
1144 | setOperationAction(ISD::MULHS, MVT::i32, Expand); | |||
1145 | ||||
1146 | setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); | |||
1147 | setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); | |||
1148 | setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); | |||
1149 | setOperationAction(ISD::SRL, MVT::i64, Custom); | |||
1150 | setOperationAction(ISD::SRA, MVT::i64, Custom); | |||
1151 | setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); | |||
1152 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); | |||
1153 | setOperationAction(ISD::LOAD, MVT::i64, Custom); | |||
1154 | setOperationAction(ISD::STORE, MVT::i64, Custom); | |||
1155 | ||||
1156 | // MVE lowers 64 bit shifts to lsll and lsrl | |||
1157 | // assuming that ISD::SRL and SRA of i64 are already marked custom | |||
1158 | if (Subtarget->hasMVEIntegerOps()) | |||
1159 | setOperationAction(ISD::SHL, MVT::i64, Custom); | |||
1160 | ||||
1161 | // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1. | |||
1162 | if (Subtarget->isThumb1Only()) { | |||
1163 | setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); | |||
1164 | setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); | |||
1165 | setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); | |||
1166 | } | |||
1167 | ||||
1168 | if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) | |||
1169 | setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); | |||
1170 | ||||
1171 | // ARM does not have ROTL. | |||
1172 | setOperationAction(ISD::ROTL, MVT::i32, Expand); | |||
1173 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { | |||
1174 | setOperationAction(ISD::ROTL, VT, Expand); | |||
1175 | setOperationAction(ISD::ROTR, VT, Expand); | |||
1176 | } | |||
1177 | setOperationAction(ISD::CTTZ, MVT::i32, Custom); | |||
1178 | setOperationAction(ISD::CTPOP, MVT::i32, Expand); | |||
1179 | if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) { | |||
1180 | setOperationAction(ISD::CTLZ, MVT::i32, Expand); | |||
1181 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, LibCall); | |||
1182 | } | |||
1183 | ||||
1184 | // @llvm.readcyclecounter requires the Performance Monitors extension. | |||
1185 | // Default to the 0 expansion on unsupported platforms. | |||
1186 | // FIXME: Technically there are older ARM CPUs that have | |||
1187 | // implementation-specific ways of obtaining this information. | |||
1188 | if (Subtarget->hasPerfMon()) | |||
1189 | setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom); | |||
1190 | ||||
1191 | // Only ARMv6 has BSWAP. | |||
1192 | if (!Subtarget->hasV6Ops()) | |||
1193 | setOperationAction(ISD::BSWAP, MVT::i32, Expand); | |||
1194 | ||||
1195 | bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() | |||
1196 | : Subtarget->hasDivideInARMMode(); | |||
1197 | if (!hasDivide) { | |||
1198 | // These are expanded into libcalls if the cpu doesn't have HW divider. | |||
1199 | setOperationAction(ISD::SDIV, MVT::i32, LibCall); | |||
1200 | setOperationAction(ISD::UDIV, MVT::i32, LibCall); | |||
1201 | } | |||
1202 | ||||
1203 | if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) { | |||
1204 | setOperationAction(ISD::SDIV, MVT::i32, Custom); | |||
1205 | setOperationAction(ISD::UDIV, MVT::i32, Custom); | |||
1206 | ||||
1207 | setOperationAction(ISD::SDIV, MVT::i64, Custom); | |||
1208 | setOperationAction(ISD::UDIV, MVT::i64, Custom); | |||
1209 | } | |||
1210 | ||||
1211 | setOperationAction(ISD::SREM, MVT::i32, Expand); | |||
1212 | setOperationAction(ISD::UREM, MVT::i32, Expand); | |||
1213 | ||||
1214 | // Register based DivRem for AEABI (RTABI 4.2) | |||
1215 | if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || | |||
1216 | Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || | |||
1217 | Subtarget->isTargetWindows()) { | |||
1218 | setOperationAction(ISD::SREM, MVT::i64, Custom); | |||
1219 | setOperationAction(ISD::UREM, MVT::i64, Custom); | |||
1220 | HasStandaloneRem = false; | |||
1221 | ||||
1222 | if (Subtarget->isTargetWindows()) { | |||
1223 | const struct { | |||
1224 | const RTLIB::Libcall Op; | |||
1225 | const char * const Name; | |||
1226 | const CallingConv::ID CC; | |||
1227 | } LibraryCalls[] = { | |||
1228 | { RTLIB::SDIVREM_I8, "__rt_sdiv", CallingConv::ARM_AAPCS }, | |||
1229 | { RTLIB::SDIVREM_I16, "__rt_sdiv", CallingConv::ARM_AAPCS }, | |||
1230 | { RTLIB::SDIVREM_I32, "__rt_sdiv", CallingConv::ARM_AAPCS }, | |||
1231 | { RTLIB::SDIVREM_I64, "__rt_sdiv64", CallingConv::ARM_AAPCS }, | |||
1232 | ||||
1233 | { RTLIB::UDIVREM_I8, "__rt_udiv", CallingConv::ARM_AAPCS }, | |||
1234 | { RTLIB::UDIVREM_I16, "__rt_udiv", CallingConv::ARM_AAPCS }, | |||
1235 | { RTLIB::UDIVREM_I32, "__rt_udiv", CallingConv::ARM_AAPCS }, | |||
1236 | { RTLIB::UDIVREM_I64, "__rt_udiv64", CallingConv::ARM_AAPCS }, | |||
1237 | }; | |||
1238 | ||||
1239 | for (const auto &LC : LibraryCalls) { | |||
1240 | setLibcallName(LC.Op, LC.Name); | |||
1241 | setLibcallCallingConv(LC.Op, LC.CC); | |||
1242 | } | |||
1243 | } else { | |||
1244 | const struct { | |||
1245 | const RTLIB::Libcall Op; | |||
1246 | const char * const Name; | |||
1247 | const CallingConv::ID CC; | |||
1248 | } LibraryCalls[] = { | |||
1249 | { RTLIB::SDIVREM_I8, "__aeabi_idivmod", CallingConv::ARM_AAPCS }, | |||
1250 | { RTLIB::SDIVREM_I16, "__aeabi_idivmod", CallingConv::ARM_AAPCS }, | |||
1251 | { RTLIB::SDIVREM_I32, "__aeabi_idivmod", CallingConv::ARM_AAPCS }, | |||
1252 | { RTLIB::SDIVREM_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS }, | |||
1253 | ||||
1254 | { RTLIB::UDIVREM_I8, "__aeabi_uidivmod", CallingConv::ARM_AAPCS }, | |||
1255 | { RTLIB::UDIVREM_I16, "__aeabi_uidivmod", CallingConv::ARM_AAPCS }, | |||
1256 | { RTLIB::UDIVREM_I32, "__aeabi_uidivmod", CallingConv::ARM_AAPCS }, | |||
1257 | { RTLIB::UDIVREM_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS }, | |||
1258 | }; | |||
1259 | ||||
1260 | for (const auto &LC : LibraryCalls) { | |||
1261 | setLibcallName(LC.Op, LC.Name); | |||
1262 | setLibcallCallingConv(LC.Op, LC.CC); | |||
1263 | } | |||
1264 | } | |||
1265 | ||||
1266 | setOperationAction(ISD::SDIVREM, MVT::i32, Custom); | |||
1267 | setOperationAction(ISD::UDIVREM, MVT::i32, Custom); | |||
1268 | setOperationAction(ISD::SDIVREM, MVT::i64, Custom); | |||
1269 | setOperationAction(ISD::UDIVREM, MVT::i64, Custom); | |||
1270 | } else { | |||
1271 | setOperationAction(ISD::SDIVREM, MVT::i32, Expand); | |||
1272 | setOperationAction(ISD::UDIVREM, MVT::i32, Expand); | |||
1273 | } | |||
1274 | ||||
1275 | if (Subtarget->getTargetTriple().isOSMSVCRT()) { | |||
1276 | // MSVCRT doesn't have powi; fall back to pow | |||
1277 | setLibcallName(RTLIB::POWI_F32, nullptr); | |||
1278 | setLibcallName(RTLIB::POWI_F64, nullptr); | |||
1279 | } | |||
1280 | ||||
1281 | setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); | |||
1282 | setOperationAction(ISD::ConstantPool, MVT::i32, Custom); | |||
1283 | setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); | |||
1284 | setOperationAction(ISD::BlockAddress, MVT::i32, Custom); | |||
1285 | ||||
1286 | setOperationAction(ISD::TRAP, MVT::Other, Legal); | |||
1287 | setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); | |||
1288 | ||||
1289 | // Use the default implementation. | |||
1290 | setOperationAction(ISD::VASTART, MVT::Other, Custom); | |||
1291 | setOperationAction(ISD::VAARG, MVT::Other, Expand); | |||
1292 | setOperationAction(ISD::VACOPY, MVT::Other, Expand); | |||
1293 | setOperationAction(ISD::VAEND, MVT::Other, Expand); | |||
1294 | setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); | |||
1295 | setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); | |||
1296 | ||||
1297 | if (Subtarget->isTargetWindows()) | |||
1298 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); | |||
1299 | else | |||
1300 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); | |||
1301 | ||||
1302 | // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use | |||
1303 | // the default expansion. | |||
1304 | InsertFencesForAtomic = false; | |||
1305 | if (Subtarget->hasAnyDataBarrier() && | |||
1306 | (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) { | |||
1307 | // ATOMIC_FENCE needs custom lowering; the others should have been expanded | |||
1308 | // to ldrex/strex loops already. | |||
1309 | setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); | |||
1310 | if (!Subtarget->isThumb() || !Subtarget->isMClass()) | |||
1311 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); | |||
1312 | ||||
1313 | // On v8, we have particularly efficient implementations of atomic fences | |||
1314 | // if they can be combined with nearby atomic loads and stores. | |||
1315 | if (!Subtarget->hasAcquireRelease() || | |||
1316 | getTargetMachine().getOptLevel() == 0) { | |||
1317 | // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc. | |||
1318 | InsertFencesForAtomic = true; | |||
1319 | } | |||
1320 | } else { | |||
1321 | // If there's anything we can use as a barrier, go through custom lowering | |||
1322 | // for ATOMIC_FENCE. | |||
1323 | // If target has DMB in thumb, Fences can be inserted. | |||
1324 | if (Subtarget->hasDataBarrier()) | |||
1325 | InsertFencesForAtomic = true; | |||
1326 | ||||
1327 | setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, | |||
1328 | Subtarget->hasAnyDataBarrier() ? Custom : Expand); | |||
1329 | ||||
1330 | // Set them all for expansion, which will force libcalls. | |||
1331 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); | |||
1332 | setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); | |||
1333 | setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); | |||
1334 | setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); | |||
1335 | setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); | |||
1336 | setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); | |||
1337 | setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); | |||
1338 | setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); | |||
1339 | setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); | |||
1340 | setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); | |||
1341 | setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); | |||
1342 | setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); | |||
1343 | // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the | |||
1344 | // Unordered/Monotonic case. | |||
1345 | if (!InsertFencesForAtomic) { | |||
1346 | setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); | |||
1347 | setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); | |||
1348 | } | |||
1349 | } | |||
1350 | ||||
1351 | // Compute supported atomic widths. | |||
1352 | if (Subtarget->isTargetLinux() || | |||
1353 | (!Subtarget->isMClass() && Subtarget->hasV6Ops())) { | |||
1354 | // For targets where __sync_* routines are reliably available, we use them | |||
1355 | // if necessary. | |||
1356 | // | |||
1357 | // ARM Linux always supports 64-bit atomics through kernel-assisted atomic | |||
1358 | // routines (kernel 3.1 or later). FIXME: Not with compiler-rt? | |||
1359 | // | |||
1360 | // ARMv6 targets have native instructions in ARM mode. For Thumb mode, | |||
1361 | // such targets should provide __sync_* routines, which use the ARM mode | |||
1362 | // instructions. (ARMv6 doesn't have dmb, but it has an equivalent | |||
1363 | // encoding; see ARMISD::MEMBARRIER_MCR.) | |||
1364 | setMaxAtomicSizeInBitsSupported(64); | |||
1365 | } else if (Subtarget->isMClass() && Subtarget->hasV8MBaselineOps()) { | |||
1366 | // Cortex-M (besides Cortex-M0) have 32-bit atomics. | |||
1367 | setMaxAtomicSizeInBitsSupported(32); | |||
1368 | } else { | |||
1369 | // We can't assume anything about other targets; just use libatomic | |||
1370 | // routines. | |||
1371 | setMaxAtomicSizeInBitsSupported(0); | |||
1372 | } | |||
1373 | ||||
1374 | setOperationAction(ISD::PREFETCH, MVT::Other, Custom); | |||
1375 | ||||
1376 | // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. | |||
1377 | if (!Subtarget->hasV6Ops()) { | |||
1378 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); | |||
1379 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); | |||
1380 | } | |||
1381 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); | |||
1382 | ||||
1383 | if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() && | |||
1384 | !Subtarget->isThumb1Only()) { | |||
1385 | // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR | |||
1386 | // iff target supports vfp2. | |||
1387 | setOperationAction(ISD::BITCAST, MVT::i64, Custom); | |||
1388 | setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); | |||
1389 | setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom); | |||
1390 | } | |||
1391 | ||||
1392 | // We want to custom lower some of our intrinsics. | |||
1393 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); | |||
1394 | setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); | |||
1395 | setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); | |||
1396 | setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom); | |||
1397 | if (Subtarget->useSjLjEH()) | |||
1398 | setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); | |||
1399 | ||||
1400 | setOperationAction(ISD::SETCC, MVT::i32, Expand); | |||
1401 | setOperationAction(ISD::SETCC, MVT::f32, Expand); | |||
1402 | setOperationAction(ISD::SETCC, MVT::f64, Expand); | |||
1403 | setOperationAction(ISD::SELECT, MVT::i32, Custom); | |||
1404 | setOperationAction(ISD::SELECT, MVT::f32, Custom); | |||
1405 | setOperationAction(ISD::SELECT, MVT::f64, Custom); | |||
1406 | setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); | |||
1407 | setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); | |||
1408 | setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); | |||
1409 | if (Subtarget->hasFullFP16()) { | |||
1410 | setOperationAction(ISD::SETCC, MVT::f16, Expand); | |||
1411 | setOperationAction(ISD::SELECT, MVT::f16, Custom); | |||
1412 | setOperationAction(ISD::SELECT_CC, MVT::f16, Custom); | |||
1413 | } | |||
1414 | ||||
1415 | setOperationAction(ISD::SETCCCARRY, MVT::i32, Custom); | |||
1416 | ||||
1417 | setOperationAction(ISD::BRCOND, MVT::Other, Custom); | |||
1418 | setOperationAction(ISD::BR_CC, MVT::i32, Custom); | |||
1419 | if (Subtarget->hasFullFP16()) | |||
1420 | setOperationAction(ISD::BR_CC, MVT::f16, Custom); | |||
1421 | setOperationAction(ISD::BR_CC, MVT::f32, Custom); | |||
1422 | setOperationAction(ISD::BR_CC, MVT::f64, Custom); | |||
1423 | setOperationAction(ISD::BR_JT, MVT::Other, Custom); | |||
1424 | ||||
1425 | // We don't support sin/cos/fmod/copysign/pow | |||
1426 | setOperationAction(ISD::FSIN, MVT::f64, Expand); | |||
1427 | setOperationAction(ISD::FSIN, MVT::f32, Expand); | |||
1428 | setOperationAction(ISD::FCOS, MVT::f32, Expand); | |||
1429 | setOperationAction(ISD::FCOS, MVT::f64, Expand); | |||
1430 | setOperationAction(ISD::FSINCOS, MVT::f64, Expand); | |||
1431 | setOperationAction(ISD::FSINCOS, MVT::f32, Expand); | |||
1432 | setOperationAction(ISD::FREM, MVT::f64, Expand); | |||
1433 | setOperationAction(ISD::FREM, MVT::f32, Expand); | |||
1434 | if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() && | |||
1435 | !Subtarget->isThumb1Only()) { | |||
1436 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); | |||
1437 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); | |||
1438 | } | |||
1439 | setOperationAction(ISD::FPOW, MVT::f64, Expand); | |||
1440 | setOperationAction(ISD::FPOW, MVT::f32, Expand); | |||
1441 | ||||
1442 | if (!Subtarget->hasVFP4Base()) { | |||
1443 | setOperationAction(ISD::FMA, MVT::f64, Expand); | |||
1444 | setOperationAction(ISD::FMA, MVT::f32, Expand); | |||
1445 | } | |||
1446 | ||||
1447 | // Various VFP goodness | |||
1448 | if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) { | |||
1449 | // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded. | |||
1450 | if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) { | |||
1451 | setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); | |||
1452 | setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); | |||
1453 | } | |||
1454 | ||||
1455 | // fp16 is a special v7 extension that adds f16 <-> f32 conversions. | |||
1456 | if (!Subtarget->hasFP16()) { | |||
1457 | setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); | |||
1458 | setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); | |||
1459 | } | |||
1460 | ||||
1461 | // Strict floating-point comparisons need custom lowering. | |||
1462 | setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom); | |||
1463 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom); | |||
1464 | setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Custom); | |||
1465 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom); | |||
1466 | setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Custom); | |||
1467 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom); | |||
1468 | } | |||
1469 | ||||
1470 | // Use __sincos_stret if available. | |||
1471 | if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr && | |||
1472 | getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) { | |||
1473 | setOperationAction(ISD::FSINCOS, MVT::f64, Custom); | |||
1474 | setOperationAction(ISD::FSINCOS, MVT::f32, Custom); | |||
1475 | } | |||
1476 | ||||
1477 | // FP-ARMv8 implements a lot of rounding-like FP operations. | |||
1478 | if (Subtarget->hasFPARMv8Base()) { | |||
1479 | setOperationAction(ISD::FFLOOR, MVT::f32, Legal); | |||
1480 | setOperationAction(ISD::FCEIL, MVT::f32, Legal); | |||
1481 | setOperationAction(ISD::FROUND, MVT::f32, Legal); | |||
1482 | setOperationAction(ISD::FTRUNC, MVT::f32, Legal); | |||
1483 | setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); | |||
1484 | setOperationAction(ISD::FRINT, MVT::f32, Legal); | |||
1485 | setOperationAction(ISD::FMINNUM, MVT::f32, Legal); | |||
1486 | setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); | |||
1487 | if (Subtarget->hasNEON()) { | |||
1488 | setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal); | |||
1489 | setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal); | |||
1490 | setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); | |||
1491 | setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); | |||
1492 | } | |||
1493 | ||||
1494 | if (Subtarget->hasFP64()) { | |||
1495 | setOperationAction(ISD::FFLOOR, MVT::f64, Legal); | |||
1496 | setOperationAction(ISD::FCEIL, MVT::f64, Legal); | |||
1497 | setOperationAction(ISD::FROUND, MVT::f64, Legal); | |||
1498 | setOperationAction(ISD::FTRUNC, MVT::f64, Legal); | |||
1499 | setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); | |||
1500 | setOperationAction(ISD::FRINT, MVT::f64, Legal); | |||
1501 | setOperationAction(ISD::FMINNUM, MVT::f64, Legal); | |||
1502 | setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); | |||
1503 | } | |||
1504 | } | |||
1505 | ||||
1506 | // FP16 often need to be promoted to call lib functions | |||
1507 | if (Subtarget->hasFullFP16()) { | |||
1508 | setOperationAction(ISD::FREM, MVT::f16, Promote); | |||
1509 | setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand); | |||
1510 | setOperationAction(ISD::FSIN, MVT::f16, Promote); | |||
1511 | setOperationAction(ISD::FCOS, MVT::f16, Promote); | |||
1512 | setOperationAction(ISD::FSINCOS, MVT::f16, Promote); | |||
1513 | setOperationAction(ISD::FPOWI, MVT::f16, Promote); | |||
1514 | setOperationAction(ISD::FPOW, MVT::f16, Promote); | |||
1515 | setOperationAction(ISD::FEXP, MVT::f16, Promote); | |||
1516 | setOperationAction(ISD::FEXP2, MVT::f16, Promote); | |||
1517 | setOperationAction(ISD::FLOG, MVT::f16, Promote); | |||
1518 | setOperationAction(ISD::FLOG10, MVT::f16, Promote); | |||
1519 | setOperationAction(ISD::FLOG2, MVT::f16, Promote); | |||
1520 | ||||
1521 | setOperationAction(ISD::FROUND, MVT::f16, Legal); | |||
1522 | } | |||
1523 | ||||
1524 | if (Subtarget->hasNEON()) { | |||
1525 | // vmin and vmax aren't available in a scalar form, so we can use | |||
1526 | // a NEON instruction with an undef lane instead. This has a performance | |||
1527 | // penalty on some cores, so we don't do this unless we have been | |||
1528 | // asked to by the core tuning model. | |||
1529 | if (Subtarget->useNEONForSinglePrecisionFP()) { | |||
1530 | setOperationAction(ISD::FMINIMUM, MVT::f32, Legal); | |||
1531 | setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal); | |||
1532 | setOperationAction(ISD::FMINIMUM, MVT::f16, Legal); | |||
1533 | setOperationAction(ISD::FMAXIMUM, MVT::f16, Legal); | |||
1534 | } | |||
1535 | setOperationAction(ISD::FMINIMUM, MVT::v2f32, Legal); | |||
1536 | setOperationAction(ISD::FMAXIMUM, MVT::v2f32, Legal); | |||
1537 | setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal); | |||
1538 | setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal); | |||
1539 | ||||
1540 | if (Subtarget->hasFullFP16()) { | |||
1541 | setOperationAction(ISD::FMINNUM, MVT::v4f16, Legal); | |||
1542 | setOperationAction(ISD::FMAXNUM, MVT::v4f16, Legal); | |||
1543 | setOperationAction(ISD::FMINNUM, MVT::v8f16, Legal); | |||
1544 | setOperationAction(ISD::FMAXNUM, MVT::v8f16, Legal); | |||
1545 | ||||
1546 | setOperationAction(ISD::FMINIMUM, MVT::v4f16, Legal); | |||
1547 | setOperationAction(ISD::FMAXIMUM, MVT::v4f16, Legal); | |||
1548 | setOperationAction(ISD::FMINIMUM, MVT::v8f16, Legal); | |||
1549 | setOperationAction(ISD::FMAXIMUM, MVT::v8f16, Legal); | |||
1550 | } | |||
1551 | } | |||
1552 | ||||
1553 | // We have target-specific dag combine patterns for the following nodes: | |||
1554 | // ARMISD::VMOVRRD - No need to call setTargetDAGCombine | |||
1555 | setTargetDAGCombine( | |||
1556 | {ISD::ADD, ISD::SUB, ISD::MUL, ISD::AND, ISD::OR, ISD::XOR}); | |||
1557 | ||||
1558 | if (Subtarget->hasMVEIntegerOps()) | |||
1559 | setTargetDAGCombine(ISD::VSELECT); | |||
1560 | ||||
1561 | if (Subtarget->hasV6Ops()) | |||
1562 | setTargetDAGCombine(ISD::SRL); | |||
1563 | if (Subtarget->isThumb1Only()) | |||
1564 | setTargetDAGCombine(ISD::SHL); | |||
1565 | // Attempt to lower smin/smax to ssat/usat | |||
1566 | if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || | |||
1567 | Subtarget->isThumb2()) { | |||
1568 | setTargetDAGCombine({ISD::SMIN, ISD::SMAX}); | |||
1569 | } | |||
1570 | ||||
1571 | setStackPointerRegisterToSaveRestore(ARM::SP); | |||
1572 | ||||
1573 | if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() || | |||
1574 | !Subtarget->hasVFP2Base() || Subtarget->hasMinSize()) | |||
1575 | setSchedulingPreference(Sched::RegPressure); | |||
1576 | else | |||
1577 | setSchedulingPreference(Sched::Hybrid); | |||
1578 | ||||
1579 | //// temporary - rewrite interface to use type | |||
1580 | MaxStoresPerMemset = 8; | |||
1581 | MaxStoresPerMemsetOptSize = 4; | |||
1582 | MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores | |||
1583 | MaxStoresPerMemcpyOptSize = 2; | |||
1584 | MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores | |||
1585 | MaxStoresPerMemmoveOptSize = 2; | |||
1586 | ||||
1587 | // On ARM arguments smaller than 4 bytes are extended, so all arguments | |||
1588 | // are at least 4 bytes aligned. | |||
1589 | setMinStackArgumentAlignment(Align(4)); | |||
1590 | ||||
1591 | // Prefer likely predicted branches to selects on out-of-order cores. | |||
1592 | PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); | |||
1593 | ||||
1594 | setPrefLoopAlignment(Align(1ULL << Subtarget->getPrefLoopLogAlignment())); | |||
1595 | ||||
1596 | setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4)); | |||
1597 | ||||
1598 | if (Subtarget->isThumb() || Subtarget->isThumb2()) | |||
1599 | setTargetDAGCombine(ISD::ABS); | |||
1600 | } | |||
1601 | ||||
1602 | bool ARMTargetLowering::useSoftFloat() const { | |||
1603 | return Subtarget->useSoftFloat(); | |||
1604 | } | |||
1605 | ||||
1606 | // FIXME: It might make sense to define the representative register class as the | |||
1607 | // nearest super-register that has a non-null superset. For example, DPR_VFP2 is | |||
1608 | // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, | |||
1609 | // SPR's representative would be DPR_VFP2. This should work well if register | |||
1610 | // pressure tracking were modified such that a register use would increment the | |||
1611 | // pressure of the register class's representative and all of it's super | |||
1612 | // classes' representatives transitively. We have not implemented this because | |||
1613 | // of the difficulty prior to coalescing of modeling operand register classes | |||
1614 | // due to the common occurrence of cross class copies and subregister insertions | |||
1615 | // and extractions. | |||
1616 | std::pair<const TargetRegisterClass *, uint8_t> | |||
1617 | ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, | |||
1618 | MVT VT) const { | |||
1619 | const TargetRegisterClass *RRC = nullptr; | |||
1620 | uint8_t Cost = 1; | |||
1621 | switch (VT.SimpleTy) { | |||
1622 | default: | |||
1623 | return TargetLowering::findRepresentativeClass(TRI, VT); | |||
1624 | // Use DPR as representative register class for all floating point | |||
1625 | // and vector types. Since there are 32 SPR registers and 32 DPR registers so | |||
1626 | // the cost is 1 for both f32 and f64. | |||
1627 | case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: | |||
1628 | case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: | |||
1629 | RRC = &ARM::DPRRegClass; | |||
1630 | // When NEON is used for SP, only half of the register file is available | |||
1631 | // because operations that define both SP and DP results will be constrained | |||
1632 | // to the VFP2 class (D0-D15). We currently model this constraint prior to | |||
1633 | // coalescing by double-counting the SP regs. See the FIXME above. | |||
1634 | if (Subtarget->useNEONForSinglePrecisionFP()) | |||
1635 | Cost = 2; | |||
1636 | break; | |||
1637 | case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: | |||
1638 | case MVT::v4f32: case MVT::v2f64: | |||
1639 | RRC = &ARM::DPRRegClass; | |||
1640 | Cost = 2; | |||
1641 | break; | |||
1642 | case MVT::v4i64: | |||
1643 | RRC = &ARM::DPRRegClass; | |||
1644 | Cost = 4; | |||
1645 | break; | |||
1646 | case MVT::v8i64: | |||
1647 | RRC = &ARM::DPRRegClass; | |||
1648 | Cost = 8; | |||
1649 | break; | |||
1650 | } | |||
1651 | return std::make_pair(RRC, Cost); | |||
1652 | } | |||
1653 | ||||
1654 | const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { | |||
1655 | #define MAKE_CASE(V) \ | |||
1656 | case V: \ | |||
1657 | return #V; | |||
1658 | switch ((ARMISD::NodeType)Opcode) { | |||
1659 | case ARMISD::FIRST_NUMBER: | |||
1660 | break; | |||
1661 | MAKE_CASE(ARMISD::Wrapper) | |||
1662 | MAKE_CASE(ARMISD::WrapperPIC) | |||
1663 | MAKE_CASE(ARMISD::WrapperJT) | |||
1664 | MAKE_CASE(ARMISD::COPY_STRUCT_BYVAL) | |||
1665 | MAKE_CASE(ARMISD::CALL) | |||
1666 | MAKE_CASE(ARMISD::CALL_PRED) | |||
1667 | MAKE_CASE(ARMISD::CALL_NOLINK) | |||
1668 | MAKE_CASE(ARMISD::tSECALL) | |||
1669 | MAKE_CASE(ARMISD::t2CALL_BTI) | |||
1670 | MAKE_CASE(ARMISD::BRCOND) | |||
1671 | MAKE_CASE(ARMISD::BR_JT) | |||
1672 | MAKE_CASE(ARMISD::BR2_JT) | |||
1673 | MAKE_CASE(ARMISD::RET_FLAG) | |||
1674 | MAKE_CASE(ARMISD::SERET_FLAG) | |||
1675 | MAKE_CASE(ARMISD::INTRET_FLAG) | |||
1676 | MAKE_CASE(ARMISD::PIC_ADD) | |||
1677 | MAKE_CASE(ARMISD::CMP) | |||
1678 | MAKE_CASE(ARMISD::CMN) | |||
1679 | MAKE_CASE(ARMISD::CMPZ) | |||
1680 | MAKE_CASE(ARMISD::CMPFP) | |||
1681 | MAKE_CASE(ARMISD::CMPFPE) | |||
1682 | MAKE_CASE(ARMISD::CMPFPw0) | |||
1683 | MAKE_CASE(ARMISD::CMPFPEw0) | |||
1684 | MAKE_CASE(ARMISD::BCC_i64) | |||
1685 | MAKE_CASE(ARMISD::FMSTAT) | |||
1686 | MAKE_CASE(ARMISD::CMOV) | |||
1687 | MAKE_CASE(ARMISD::SUBS) | |||
1688 | MAKE_CASE(ARMISD::SSAT) | |||
1689 | MAKE_CASE(ARMISD::USAT) | |||
1690 | MAKE_CASE(ARMISD::ASRL) | |||
1691 | MAKE_CASE(ARMISD::LSRL) | |||
1692 | MAKE_CASE(ARMISD::LSLL) | |||
1693 | MAKE_CASE(ARMISD::SRL_FLAG) | |||
1694 | MAKE_CASE(ARMISD::SRA_FLAG) | |||
1695 | MAKE_CASE(ARMISD::RRX) | |||
1696 | MAKE_CASE(ARMISD::ADDC) | |||
1697 | MAKE_CASE(ARMISD::ADDE) | |||
1698 | MAKE_CASE(ARMISD::SUBC) | |||
1699 | MAKE_CASE(ARMISD::SUBE) | |||
1700 | MAKE_CASE(ARMISD::LSLS) | |||
1701 | MAKE_CASE(ARMISD::VMOVRRD) | |||
1702 | MAKE_CASE(ARMISD::VMOVDRR) | |||
1703 | MAKE_CASE(ARMISD::VMOVhr) | |||
1704 | MAKE_CASE(ARMISD::VMOVrh) | |||
1705 | MAKE_CASE(ARMISD::VMOVSR) | |||
1706 | MAKE_CASE(ARMISD::EH_SJLJ_SETJMP) | |||
1707 | MAKE_CASE(ARMISD::EH_SJLJ_LONGJMP) | |||
1708 | MAKE_CASE(ARMISD::EH_SJLJ_SETUP_DISPATCH) | |||
1709 | MAKE_CASE(ARMISD::TC_RETURN) | |||
1710 | MAKE_CASE(ARMISD::THREAD_POINTER) | |||
1711 | MAKE_CASE(ARMISD::DYN_ALLOC) | |||
1712 | MAKE_CASE(ARMISD::MEMBARRIER_MCR) | |||
1713 | MAKE_CASE(ARMISD::PRELOAD) | |||
1714 | MAKE_CASE(ARMISD::LDRD) | |||
1715 | MAKE_CASE(ARMISD::STRD) | |||
1716 | MAKE_CASE(ARMISD::WIN__CHKSTK) | |||
1717 | MAKE_CASE(ARMISD::WIN__DBZCHK) | |||
1718 | MAKE_CASE(ARMISD::PREDICATE_CAST) | |||
1719 | MAKE_CASE(ARMISD::VECTOR_REG_CAST) | |||
1720 | MAKE_CASE(ARMISD::MVESEXT) | |||
1721 | MAKE_CASE(ARMISD::MVEZEXT) | |||
1722 | MAKE_CASE(ARMISD::MVETRUNC) | |||
1723 | MAKE_CASE(ARMISD::VCMP) | |||
1724 | MAKE_CASE(ARMISD::VCMPZ) | |||
1725 | MAKE_CASE(ARMISD::VTST) | |||
1726 | MAKE_CASE(ARMISD::VSHLs) | |||
1727 | MAKE_CASE(ARMISD::VSHLu) | |||
1728 | MAKE_CASE(ARMISD::VSHLIMM) | |||
1729 | MAKE_CASE(ARMISD::VSHRsIMM) | |||
1730 | MAKE_CASE(ARMISD::VSHRuIMM) | |||
1731 | MAKE_CASE(ARMISD::VRSHRsIMM) | |||
1732 | MAKE_CASE(ARMISD::VRSHRuIMM) | |||
1733 | MAKE_CASE(ARMISD::VRSHRNIMM) | |||
1734 | MAKE_CASE(ARMISD::VQSHLsIMM) | |||
1735 | MAKE_CASE(ARMISD::VQSHLuIMM) | |||
1736 | MAKE_CASE(ARMISD::VQSHLsuIMM) | |||
1737 | MAKE_CASE(ARMISD::VQSHRNsIMM) | |||
1738 | MAKE_CASE(ARMISD::VQSHRNuIMM) | |||
1739 | MAKE_CASE(ARMISD::VQSHRNsuIMM) | |||
1740 | MAKE_CASE(ARMISD::VQRSHRNsIMM) | |||
1741 | MAKE_CASE(ARMISD::VQRSHRNuIMM) | |||
1742 | MAKE_CASE(ARMISD::VQRSHRNsuIMM) | |||
1743 | MAKE_CASE(ARMISD::VSLIIMM) | |||
1744 | MAKE_CASE(ARMISD::VSRIIMM) | |||
1745 | MAKE_CASE(ARMISD::VGETLANEu) | |||
1746 | MAKE_CASE(ARMISD::VGETLANEs) | |||
1747 | MAKE_CASE(ARMISD::VMOVIMM) | |||
1748 | MAKE_CASE(ARMISD::VMVNIMM) | |||
1749 | MAKE_CASE(ARMISD::VMOVFPIMM) | |||
1750 | MAKE_CASE(ARMISD::VDUP) | |||
1751 | MAKE_CASE(ARMISD::VDUPLANE) | |||
1752 | MAKE_CASE(ARMISD::VEXT) | |||
1753 | MAKE_CASE(ARMISD::VREV64) | |||
1754 | MAKE_CASE(ARMISD::VREV32) | |||
1755 | MAKE_CASE(ARMISD::VREV16) | |||
1756 | MAKE_CASE(ARMISD::VZIP) | |||
1757 | MAKE_CASE(ARMISD::VUZP) | |||
1758 | MAKE_CASE(ARMISD::VTRN) | |||
1759 | MAKE_CASE(ARMISD::VTBL1) | |||
1760 | MAKE_CASE(ARMISD::VTBL2) | |||
1761 | MAKE_CASE(ARMISD::VMOVN) | |||
1762 | MAKE_CASE(ARMISD::VQMOVNs) | |||
1763 | MAKE_CASE(ARMISD::VQMOVNu) | |||
1764 | MAKE_CASE(ARMISD::VCVTN) | |||
1765 | MAKE_CASE(ARMISD::VCVTL) | |||
1766 | MAKE_CASE(ARMISD::VIDUP) | |||
1767 | MAKE_CASE(ARMISD::VMULLs) | |||
1768 | MAKE_CASE(ARMISD::VMULLu) | |||
1769 | MAKE_CASE(ARMISD::VQDMULH) | |||
1770 | MAKE_CASE(ARMISD::VADDVs) | |||
1771 | MAKE_CASE(ARMISD::VADDVu) | |||
1772 | MAKE_CASE(ARMISD::VADDVps) | |||
1773 | MAKE_CASE(ARMISD::VADDVpu) | |||
1774 | MAKE_CASE(ARMISD::VADDLVs) | |||
1775 | MAKE_CASE(ARMISD::VADDLVu) | |||
1776 | MAKE_CASE(ARMISD::VADDLVAs) | |||
1777 | MAKE_CASE(ARMISD::VADDLVAu) | |||
1778 | MAKE_CASE(ARMISD::VADDLVps) | |||
1779 | MAKE_CASE(ARMISD::VADDLVpu) | |||
1780 | MAKE_CASE(ARMISD::VADDLVAps) | |||
1781 | MAKE_CASE(ARMISD::VADDLVApu) | |||
1782 | MAKE_CASE(ARMISD::VMLAVs) | |||
1783 | MAKE_CASE(ARMISD::VMLAVu) | |||
1784 | MAKE_CASE(ARMISD::VMLAVps) | |||
1785 | MAKE_CASE(ARMISD::VMLAVpu) | |||
1786 | MAKE_CASE(ARMISD::VMLALVs) | |||
1787 | MAKE_CASE(ARMISD::VMLALVu) | |||
1788 | MAKE_CASE(ARMISD::VMLALVps) | |||
1789 | MAKE_CASE(ARMISD::VMLALVpu) | |||
1790 | MAKE_CASE(ARMISD::VMLALVAs) | |||
1791 | MAKE_CASE(ARMISD::VMLALVAu) | |||
1792 | MAKE_CASE(ARMISD::VMLALVAps) | |||
1793 | MAKE_CASE(ARMISD::VMLALVApu) | |||
1794 | MAKE_CASE(ARMISD::VMINVu) | |||
1795 | MAKE_CASE(ARMISD::VMINVs) | |||
1796 | MAKE_CASE(ARMISD::VMAXVu) | |||
1797 | MAKE_CASE(ARMISD::VMAXVs) | |||
1798 | MAKE_CASE(ARMISD::UMAAL) | |||
1799 | MAKE_CASE(ARMISD::UMLAL) | |||
1800 | MAKE_CASE(ARMISD::SMLAL) | |||
1801 | MAKE_CASE(ARMISD::SMLALBB) | |||
1802 | MAKE_CASE(ARMISD::SMLALBT) | |||
1803 | MAKE_CASE(ARMISD::SMLALTB) | |||
1804 | MAKE_CASE(ARMISD::SMLALTT) | |||
1805 | MAKE_CASE(ARMISD::SMULWB) | |||
1806 | MAKE_CASE(ARMISD::SMULWT) | |||
1807 | MAKE_CASE(ARMISD::SMLALD) | |||
1808 | MAKE_CASE(ARMISD::SMLALDX) | |||
1809 | MAKE_CASE(ARMISD::SMLSLD) | |||
1810 | MAKE_CASE(ARMISD::SMLSLDX) | |||
1811 | MAKE_CASE(ARMISD::SMMLAR) | |||
1812 | MAKE_CASE(ARMISD::SMMLSR) | |||
1813 | MAKE_CASE(ARMISD::QADD16b) | |||
1814 | MAKE_CASE(ARMISD::QSUB16b) | |||
1815 | MAKE_CASE(ARMISD::QADD8b) | |||
1816 | MAKE_CASE(ARMISD::QSUB8b) | |||
1817 | MAKE_CASE(ARMISD::UQADD16b) | |||
1818 | MAKE_CASE(ARMISD::UQSUB16b) | |||
1819 | MAKE_CASE(ARMISD::UQADD8b) | |||
1820 | MAKE_CASE(ARMISD::UQSUB8b) | |||
1821 | MAKE_CASE(ARMISD::BUILD_VECTOR) | |||
1822 | MAKE_CASE(ARMISD::BFI) | |||
1823 | MAKE_CASE(ARMISD::VORRIMM) | |||
1824 | MAKE_CASE(ARMISD::VBICIMM) | |||
1825 | MAKE_CASE(ARMISD::VBSP) | |||
1826 | MAKE_CASE(ARMISD::MEMCPY) | |||
1827 | MAKE_CASE(ARMISD::VLD1DUP) | |||
1828 | MAKE_CASE(ARMISD::VLD2DUP) | |||
1829 | MAKE_CASE(ARMISD::VLD3DUP) | |||
1830 | MAKE_CASE(ARMISD::VLD4DUP) | |||
1831 | MAKE_CASE(ARMISD::VLD1_UPD) | |||
1832 | MAKE_CASE(ARMISD::VLD2_UPD) | |||
1833 | MAKE_CASE(ARMISD::VLD3_UPD) | |||
1834 | MAKE_CASE(ARMISD::VLD4_UPD) | |||
1835 | MAKE_CASE(ARMISD::VLD1x2_UPD) | |||
1836 | MAKE_CASE(ARMISD::VLD1x3_UPD) | |||
1837 | MAKE_CASE(ARMISD::VLD1x4_UPD) | |||
1838 | MAKE_CASE(ARMISD::VLD2LN_UPD) | |||
1839 | MAKE_CASE(ARMISD::VLD3LN_UPD) | |||
1840 | MAKE_CASE(ARMISD::VLD4LN_UPD) | |||
1841 | MAKE_CASE(ARMISD::VLD1DUP_UPD) | |||
1842 | MAKE_CASE(ARMISD::VLD2DUP_UPD) | |||
1843 | MAKE_CASE(ARMISD::VLD3DUP_UPD) | |||
1844 | MAKE_CASE(ARMISD::VLD4DUP_UPD) | |||
1845 | MAKE_CASE(ARMISD::VST1_UPD) | |||
1846 | MAKE_CASE(ARMISD::VST2_UPD) | |||
1847 | MAKE_CASE(ARMISD::VST3_UPD) | |||
1848 | MAKE_CASE(ARMISD::VST4_UPD) | |||
1849 | MAKE_CASE(ARMISD::VST1x2_UPD) | |||
1850 | MAKE_CASE(ARMISD::VST1x3_UPD) | |||
1851 | MAKE_CASE(ARMISD::VST1x4_UPD) | |||
1852 | MAKE_CASE(ARMISD::VST2LN_UPD) | |||
1853 | MAKE_CASE(ARMISD::VST3LN_UPD) | |||
1854 | MAKE_CASE(ARMISD::VST4LN_UPD) | |||
1855 | MAKE_CASE(ARMISD::WLS) | |||
1856 | MAKE_CASE(ARMISD::WLSSETUP) | |||
1857 | MAKE_CASE(ARMISD::LE) | |||
1858 | MAKE_CASE(ARMISD::LOOP_DEC) | |||
1859 | MAKE_CASE(ARMISD::CSINV) | |||
1860 | MAKE_CASE(ARMISD::CSNEG) | |||
1861 | MAKE_CASE(ARMISD::CSINC) | |||
1862 | MAKE_CASE(ARMISD::MEMCPYLOOP) | |||
1863 | MAKE_CASE(ARMISD::MEMSETLOOP) | |||
1864 | #undef MAKE_CASE | |||
1865 | } | |||
1866 | return nullptr; | |||
1867 | } | |||
1868 | ||||
1869 | EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, | |||
1870 | EVT VT) const { | |||
1871 | if (!VT.isVector()) | |||
1872 | return getPointerTy(DL); | |||
1873 | ||||
1874 | // MVE has a predicate register. | |||
1875 | if ((Subtarget->hasMVEIntegerOps() && | |||
1876 | (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || | |||
1877 | VT == MVT::v16i8)) || | |||
1878 | (Subtarget->hasMVEFloatOps() && | |||
1879 | (VT == MVT::v2f64 || VT == MVT::v4f32 || VT == MVT::v8f16))) | |||
1880 | return MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); | |||
1881 | return VT.changeVectorElementTypeToInteger(); | |||
1882 | } | |||
1883 | ||||
1884 | /// getRegClassFor - Return the register class that should be used for the | |||
1885 | /// specified value type. | |||
1886 | const TargetRegisterClass * | |||
1887 | ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const { | |||
1888 | (void)isDivergent; | |||
1889 | // Map v4i64 to QQ registers but do not make the type legal. Similarly map | |||
1890 | // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to | |||
1891 | // load / store 4 to 8 consecutive NEON D registers, or 2 to 4 consecutive | |||
1892 | // MVE Q registers. | |||
1893 | if (Subtarget->hasNEON()) { | |||
1894 | if (VT == MVT::v4i64) | |||
1895 | return &ARM::QQPRRegClass; | |||
1896 | if (VT == MVT::v8i64) | |||
1897 | return &ARM::QQQQPRRegClass; | |||
1898 | } | |||
1899 | if (Subtarget->hasMVEIntegerOps()) { | |||
1900 | if (VT == MVT::v4i64) | |||
1901 | return &ARM::MQQPRRegClass; | |||
1902 | if (VT == MVT::v8i64) | |||
1903 | return &ARM::MQQQQPRRegClass; | |||
1904 | } | |||
1905 | return TargetLowering::getRegClassFor(VT); | |||
1906 | } | |||
1907 | ||||
1908 | // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the | |||
1909 | // source/dest is aligned and the copy size is large enough. We therefore want | |||
1910 | // to align such objects passed to memory intrinsics. | |||
1911 | bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, | |||
1912 | unsigned &PrefAlign) const { | |||
1913 | if (!isa<MemIntrinsic>(CI)) | |||
1914 | return false; | |||
1915 | MinSize = 8; | |||
1916 | // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1 | |||
1917 | // cycle faster than 4-byte aligned LDM. | |||
1918 | PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4); | |||
1919 | return true; | |||
1920 | } | |||
1921 | ||||
1922 | // Create a fast isel object. | |||
1923 | FastISel * | |||
1924 | ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, | |||
1925 | const TargetLibraryInfo *libInfo) const { | |||
1926 | return ARM::createFastISel(funcInfo, libInfo); | |||
1927 | } | |||
1928 | ||||
1929 | Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { | |||
1930 | unsigned NumVals = N->getNumValues(); | |||
1931 | if (!NumVals) | |||
1932 | return Sched::RegPressure; | |||
1933 | ||||
1934 | for (unsigned i = 0; i != NumVals; ++i) { | |||
1935 | EVT VT = N->getValueType(i); | |||
1936 | if (VT == MVT::Glue || VT == MVT::Other) | |||
1937 | continue; | |||
1938 | if (VT.isFloatingPoint() || VT.isVector()) | |||
1939 | return Sched::ILP; | |||
1940 | } | |||
1941 | ||||
1942 | if (!N->isMachineOpcode()) | |||
1943 | return Sched::RegPressure; | |||
1944 | ||||
1945 | // Load are scheduled for latency even if there instruction itinerary | |||
1946 | // is not available. | |||
1947 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | |||
1948 | const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); | |||
1949 | ||||
1950 | if (MCID.getNumDefs() == 0) | |||
1951 | return Sched::RegPressure; | |||
1952 | if (!Itins->isEmpty() && | |||
1953 | Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) | |||
1954 | return Sched::ILP; | |||
1955 | ||||
1956 | return Sched::RegPressure; | |||
1957 | } | |||
1958 | ||||
1959 | //===----------------------------------------------------------------------===// | |||
1960 | // Lowering Code | |||
1961 | //===----------------------------------------------------------------------===// | |||
1962 | ||||
1963 | static bool isSRL16(const SDValue &Op) { | |||
1964 | if (Op.getOpcode() != ISD::SRL) | |||
1965 | return false; | |||
1966 | if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) | |||
1967 | return Const->getZExtValue() == 16; | |||
1968 | return false; | |||
1969 | } | |||
1970 | ||||
1971 | static bool isSRA16(const SDValue &Op) { | |||
1972 | if (Op.getOpcode() != ISD::SRA) | |||
1973 | return false; | |||
1974 | if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) | |||
1975 | return Const->getZExtValue() == 16; | |||
1976 | return false; | |||
1977 | } | |||
1978 | ||||
1979 | static bool isSHL16(const SDValue &Op) { | |||
1980 | if (Op.getOpcode() != ISD::SHL) | |||
1981 | return false; | |||
1982 | if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) | |||
1983 | return Const->getZExtValue() == 16; | |||
1984 | return false; | |||
1985 | } | |||
1986 | ||||
1987 | // Check for a signed 16-bit value. We special case SRA because it makes it | |||
1988 | // more simple when also looking for SRAs that aren't sign extending a | |||
1989 | // smaller value. Without the check, we'd need to take extra care with | |||
1990 | // checking order for some operations. | |||
1991 | static bool isS16(const SDValue &Op, SelectionDAG &DAG) { | |||
1992 | if (isSRA16(Op)) | |||
1993 | return isSHL16(Op.getOperand(0)); | |||
1994 | return DAG.ComputeNumSignBits(Op) == 17; | |||
1995 | } | |||
1996 | ||||
1997 | /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC | |||
1998 | static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { | |||
1999 | switch (CC) { | |||
2000 | default: llvm_unreachable("Unknown condition code!")::llvm::llvm_unreachable_internal("Unknown condition code!", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 2000); | |||
2001 | case ISD::SETNE: return ARMCC::NE; | |||
2002 | case ISD::SETEQ: return ARMCC::EQ; | |||
2003 | case ISD::SETGT: return ARMCC::GT; | |||
2004 | case ISD::SETGE: return ARMCC::GE; | |||
2005 | case ISD::SETLT: return ARMCC::LT; | |||
2006 | case ISD::SETLE: return ARMCC::LE; | |||
2007 | case ISD::SETUGT: return ARMCC::HI; | |||
2008 | case ISD::SETUGE: return ARMCC::HS; | |||
2009 | case ISD::SETULT: return ARMCC::LO; | |||
2010 | case ISD::SETULE: return ARMCC::LS; | |||
2011 | } | |||
2012 | } | |||
2013 | ||||
2014 | /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. | |||
2015 | static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, | |||
2016 | ARMCC::CondCodes &CondCode2) { | |||
2017 | CondCode2 = ARMCC::AL; | |||
2018 | switch (CC) { | |||
2019 | default: llvm_unreachable("Unknown FP condition!")::llvm::llvm_unreachable_internal("Unknown FP condition!", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 2019); | |||
2020 | case ISD::SETEQ: | |||
2021 | case ISD::SETOEQ: CondCode = ARMCC::EQ; break; | |||
2022 | case ISD::SETGT: | |||
2023 | case ISD::SETOGT: CondCode = ARMCC::GT; break; | |||
2024 | case ISD::SETGE: | |||
2025 | case ISD::SETOGE: CondCode = ARMCC::GE; break; | |||
2026 | case ISD::SETOLT: CondCode = ARMCC::MI; break; | |||
2027 | case ISD::SETOLE: CondCode = ARMCC::LS; break; | |||
2028 | case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; | |||
2029 | case ISD::SETO: CondCode = ARMCC::VC; break; | |||
2030 | case ISD::SETUO: CondCode = ARMCC::VS; break; | |||
2031 | case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; | |||
2032 | case ISD::SETUGT: CondCode = ARMCC::HI; break; | |||
2033 | case ISD::SETUGE: CondCode = ARMCC::PL; break; | |||
2034 | case ISD::SETLT: | |||
2035 | case ISD::SETULT: CondCode = ARMCC::LT; break; | |||
2036 | case ISD::SETLE: | |||
2037 | case ISD::SETULE: CondCode = ARMCC::LE; break; | |||
2038 | case ISD::SETNE: | |||
2039 | case ISD::SETUNE: CondCode = ARMCC::NE; break; | |||
2040 | } | |||
2041 | } | |||
2042 | ||||
2043 | //===----------------------------------------------------------------------===// | |||
2044 | // Calling Convention Implementation | |||
2045 | //===----------------------------------------------------------------------===// | |||
2046 | ||||
2047 | /// getEffectiveCallingConv - Get the effective calling convention, taking into | |||
2048 | /// account presence of floating point hardware and calling convention | |||
2049 | /// limitations, such as support for variadic functions. | |||
2050 | CallingConv::ID | |||
2051 | ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC, | |||
2052 | bool isVarArg) const { | |||
2053 | switch (CC) { | |||
2054 | default: | |||
2055 | report_fatal_error("Unsupported calling convention"); | |||
2056 | case CallingConv::ARM_AAPCS: | |||
2057 | case CallingConv::ARM_APCS: | |||
2058 | case CallingConv::GHC: | |||
2059 | case CallingConv::CFGuard_Check: | |||
2060 | return CC; | |||
2061 | case CallingConv::PreserveMost: | |||
2062 | return CallingConv::PreserveMost; | |||
2063 | case CallingConv::ARM_AAPCS_VFP: | |||
2064 | case CallingConv::Swift: | |||
2065 | case CallingConv::SwiftTail: | |||
2066 | return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP; | |||
2067 | case CallingConv::C: | |||
2068 | case CallingConv::Tail: | |||
2069 | if (!Subtarget->isAAPCS_ABI()) | |||
2070 | return CallingConv::ARM_APCS; | |||
2071 | else if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && | |||
2072 | getTargetMachine().Options.FloatABIType == FloatABI::Hard && | |||
2073 | !isVarArg) | |||
2074 | return CallingConv::ARM_AAPCS_VFP; | |||
2075 | else | |||
2076 | return CallingConv::ARM_AAPCS; | |||
2077 | case CallingConv::Fast: | |||
2078 | case CallingConv::CXX_FAST_TLS: | |||
2079 | if (!Subtarget->isAAPCS_ABI()) { | |||
2080 | if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && !isVarArg) | |||
2081 | return CallingConv::Fast; | |||
2082 | return CallingConv::ARM_APCS; | |||
2083 | } else if (Subtarget->hasVFP2Base() && | |||
2084 | !Subtarget->isThumb1Only() && !isVarArg) | |||
2085 | return CallingConv::ARM_AAPCS_VFP; | |||
2086 | else | |||
2087 | return CallingConv::ARM_AAPCS; | |||
2088 | } | |||
2089 | } | |||
2090 | ||||
2091 | CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC, | |||
2092 | bool isVarArg) const { | |||
2093 | return CCAssignFnForNode(CC, false, isVarArg); | |||
2094 | } | |||
2095 | ||||
2096 | CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC, | |||
2097 | bool isVarArg) const { | |||
2098 | return CCAssignFnForNode(CC, true, isVarArg); | |||
2099 | } | |||
2100 | ||||
2101 | /// CCAssignFnForNode - Selects the correct CCAssignFn for the given | |||
2102 | /// CallingConvention. | |||
2103 | CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, | |||
2104 | bool Return, | |||
2105 | bool isVarArg) const { | |||
2106 | switch (getEffectiveCallingConv(CC, isVarArg)) { | |||
2107 | default: | |||
2108 | report_fatal_error("Unsupported calling convention"); | |||
2109 | case CallingConv::ARM_APCS: | |||
2110 | return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); | |||
2111 | case CallingConv::ARM_AAPCS: | |||
2112 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); | |||
2113 | case CallingConv::ARM_AAPCS_VFP: | |||
2114 | return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); | |||
2115 | case CallingConv::Fast: | |||
2116 | return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); | |||
2117 | case CallingConv::GHC: | |||
2118 | return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); | |||
2119 | case CallingConv::PreserveMost: | |||
2120 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); | |||
2121 | case CallingConv::CFGuard_Check: | |||
2122 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_Win32_CFGuard_Check); | |||
2123 | } | |||
2124 | } | |||
2125 | ||||
2126 | SDValue ARMTargetLowering::MoveToHPR(const SDLoc &dl, SelectionDAG &DAG, | |||
2127 | MVT LocVT, MVT ValVT, SDValue Val) const { | |||
2128 | Val = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocVT.getSizeInBits()), | |||
2129 | Val); | |||
2130 | if (Subtarget->hasFullFP16()) { | |||
2131 | Val = DAG.getNode(ARMISD::VMOVhr, dl, ValVT, Val); | |||
2132 | } else { | |||
2133 | Val = DAG.getNode(ISD::TRUNCATE, dl, | |||
2134 | MVT::getIntegerVT(ValVT.getSizeInBits()), Val); | |||
2135 | Val = DAG.getNode(ISD::BITCAST, dl, ValVT, Val); | |||
2136 | } | |||
2137 | return Val; | |||
2138 | } | |||
2139 | ||||
2140 | SDValue ARMTargetLowering::MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG, | |||
2141 | MVT LocVT, MVT ValVT, | |||
2142 | SDValue Val) const { | |||
2143 | if (Subtarget->hasFullFP16()) { | |||
2144 | Val = DAG.getNode(ARMISD::VMOVrh, dl, | |||
2145 | MVT::getIntegerVT(LocVT.getSizeInBits()), Val); | |||
2146 | } else { | |||
2147 | Val = DAG.getNode(ISD::BITCAST, dl, | |||
2148 | MVT::getIntegerVT(ValVT.getSizeInBits()), Val); | |||
2149 | Val = DAG.getNode(ISD::ZERO_EXTEND, dl, | |||
2150 | MVT::getIntegerVT(LocVT.getSizeInBits()), Val); | |||
2151 | } | |||
2152 | return DAG.getNode(ISD::BITCAST, dl, LocVT, Val); | |||
2153 | } | |||
2154 | ||||
2155 | /// LowerCallResult - Lower the result values of a call into the | |||
2156 | /// appropriate copies out of appropriate physical registers. | |||
2157 | SDValue ARMTargetLowering::LowerCallResult( | |||
2158 | SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, | |||
2159 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, | |||
2160 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, | |||
2161 | SDValue ThisVal) const { | |||
2162 | // Assign locations to each value returned by this call. | |||
2163 | SmallVector<CCValAssign, 16> RVLocs; | |||
2164 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, | |||
2165 | *DAG.getContext()); | |||
2166 | CCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, isVarArg)); | |||
2167 | ||||
2168 | // Copy all of the result registers out of their specified physreg. | |||
2169 | for (unsigned i = 0; i != RVLocs.size(); ++i) { | |||
2170 | CCValAssign VA = RVLocs[i]; | |||
2171 | ||||
2172 | // Pass 'this' value directly from the argument to return value, to avoid | |||
2173 | // reg unit interference | |||
2174 | if (i == 0 && isThisReturn) { | |||
2175 | assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&(static_cast <bool> (!VA.needsCustom() && VA.getLocVT () == MVT::i32 && "unexpected return calling convention register assignment" ) ? void (0) : __assert_fail ("!VA.needsCustom() && VA.getLocVT() == MVT::i32 && \"unexpected return calling convention register assignment\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2176, __extension__ __PRETTY_FUNCTION__)) | |||
2176 | "unexpected return calling convention register assignment")(static_cast <bool> (!VA.needsCustom() && VA.getLocVT () == MVT::i32 && "unexpected return calling convention register assignment" ) ? void (0) : __assert_fail ("!VA.needsCustom() && VA.getLocVT() == MVT::i32 && \"unexpected return calling convention register assignment\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2176, __extension__ __PRETTY_FUNCTION__)); | |||
2177 | InVals.push_back(ThisVal); | |||
2178 | continue; | |||
2179 | } | |||
2180 | ||||
2181 | SDValue Val; | |||
2182 | if (VA.needsCustom() && | |||
2183 | (VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2f64)) { | |||
2184 | // Handle f64 or half of a v2f64. | |||
2185 | SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, | |||
2186 | InFlag); | |||
2187 | Chain = Lo.getValue(1); | |||
2188 | InFlag = Lo.getValue(2); | |||
2189 | VA = RVLocs[++i]; // skip ahead to next loc | |||
2190 | SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, | |||
2191 | InFlag); | |||
2192 | Chain = Hi.getValue(1); | |||
2193 | InFlag = Hi.getValue(2); | |||
2194 | if (!Subtarget->isLittle()) | |||
2195 | std::swap (Lo, Hi); | |||
2196 | Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); | |||
2197 | ||||
2198 | if (VA.getLocVT() == MVT::v2f64) { | |||
2199 | SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); | |||
2200 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, | |||
2201 | DAG.getConstant(0, dl, MVT::i32)); | |||
2202 | ||||
2203 | VA = RVLocs[++i]; // skip ahead to next loc | |||
2204 | Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); | |||
2205 | Chain = Lo.getValue(1); | |||
2206 | InFlag = Lo.getValue(2); | |||
2207 | VA = RVLocs[++i]; // skip ahead to next loc | |||
2208 | Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); | |||
2209 | Chain = Hi.getValue(1); | |||
2210 | InFlag = Hi.getValue(2); | |||
2211 | if (!Subtarget->isLittle()) | |||
2212 | std::swap (Lo, Hi); | |||
2213 | Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); | |||
2214 | Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, | |||
2215 | DAG.getConstant(1, dl, MVT::i32)); | |||
2216 | } | |||
2217 | } else { | |||
2218 | Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), | |||
2219 | InFlag); | |||
2220 | Chain = Val.getValue(1); | |||
2221 | InFlag = Val.getValue(2); | |||
2222 | } | |||
2223 | ||||
2224 | switch (VA.getLocInfo()) { | |||
2225 | default: llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 2225); | |||
2226 | case CCValAssign::Full: break; | |||
2227 | case CCValAssign::BCvt: | |||
2228 | Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); | |||
2229 | break; | |||
2230 | } | |||
2231 | ||||
2232 | // f16 arguments have their size extended to 4 bytes and passed as if they | |||
2233 | // had been copied to the LSBs of a 32-bit register. | |||
2234 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) | |||
2235 | if (VA.needsCustom() && | |||
2236 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) | |||
2237 | Val = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Val); | |||
2238 | ||||
2239 | InVals.push_back(Val); | |||
2240 | } | |||
2241 | ||||
2242 | return Chain; | |||
2243 | } | |||
2244 | ||||
2245 | std::pair<SDValue, MachinePointerInfo> ARMTargetLowering::computeAddrForCallArg( | |||
2246 | const SDLoc &dl, SelectionDAG &DAG, const CCValAssign &VA, SDValue StackPtr, | |||
2247 | bool IsTailCall, int SPDiff) const { | |||
2248 | SDValue DstAddr; | |||
2249 | MachinePointerInfo DstInfo; | |||
2250 | int32_t Offset = VA.getLocMemOffset(); | |||
2251 | MachineFunction &MF = DAG.getMachineFunction(); | |||
2252 | ||||
2253 | if (IsTailCall) { | |||
2254 | Offset += SPDiff; | |||
2255 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | |||
2256 | int Size = VA.getLocVT().getFixedSizeInBits() / 8; | |||
2257 | int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true); | |||
2258 | DstAddr = DAG.getFrameIndex(FI, PtrVT); | |||
2259 | DstInfo = | |||
2260 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); | |||
2261 | } else { | |||
2262 | SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl); | |||
2263 | DstAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), | |||
2264 | StackPtr, PtrOff); | |||
2265 | DstInfo = | |||
2266 | MachinePointerInfo::getStack(DAG.getMachineFunction(), Offset); | |||
2267 | } | |||
2268 | ||||
2269 | return std::make_pair(DstAddr, DstInfo); | |||
2270 | } | |||
2271 | ||||
2272 | void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, | |||
2273 | SDValue Chain, SDValue &Arg, | |||
2274 | RegsToPassVector &RegsToPass, | |||
2275 | CCValAssign &VA, CCValAssign &NextVA, | |||
2276 | SDValue &StackPtr, | |||
2277 | SmallVectorImpl<SDValue> &MemOpChains, | |||
2278 | bool IsTailCall, | |||
2279 | int SPDiff) const { | |||
2280 | SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, | |||
2281 | DAG.getVTList(MVT::i32, MVT::i32), Arg); | |||
2282 | unsigned id = Subtarget->isLittle() ? 0 : 1; | |||
2283 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id))); | |||
2284 | ||||
2285 | if (NextVA.isRegLoc()) | |||
2286 | RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id))); | |||
2287 | else { | |||
2288 | assert(NextVA.isMemLoc())(static_cast <bool> (NextVA.isMemLoc()) ? void (0) : __assert_fail ("NextVA.isMemLoc()", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 2288, __extension__ __PRETTY_FUNCTION__)); | |||
2289 | if (!StackPtr.getNode()) | |||
2290 | StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, | |||
2291 | getPointerTy(DAG.getDataLayout())); | |||
2292 | ||||
2293 | SDValue DstAddr; | |||
2294 | MachinePointerInfo DstInfo; | |||
2295 | std::tie(DstAddr, DstInfo) = | |||
2296 | computeAddrForCallArg(dl, DAG, NextVA, StackPtr, IsTailCall, SPDiff); | |||
2297 | MemOpChains.push_back( | |||
2298 | DAG.getStore(Chain, dl, fmrrd.getValue(1 - id), DstAddr, DstInfo)); | |||
2299 | } | |||
2300 | } | |||
2301 | ||||
2302 | static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) { | |||
2303 | return (CC == CallingConv::Fast && GuaranteeTailCalls) || | |||
2304 | CC == CallingConv::Tail || CC == CallingConv::SwiftTail; | |||
2305 | } | |||
2306 | ||||
2307 | /// LowerCall - Lowering a call into a callseq_start <- | |||
2308 | /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter | |||
2309 | /// nodes. | |||
2310 | SDValue | |||
2311 | ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, | |||
2312 | SmallVectorImpl<SDValue> &InVals) const { | |||
2313 | SelectionDAG &DAG = CLI.DAG; | |||
2314 | SDLoc &dl = CLI.DL; | |||
2315 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; | |||
2316 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; | |||
2317 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; | |||
2318 | SDValue Chain = CLI.Chain; | |||
2319 | SDValue Callee = CLI.Callee; | |||
2320 | bool &isTailCall = CLI.IsTailCall; | |||
2321 | CallingConv::ID CallConv = CLI.CallConv; | |||
2322 | bool doesNotRet = CLI.DoesNotReturn; | |||
2323 | bool isVarArg = CLI.IsVarArg; | |||
2324 | ||||
2325 | MachineFunction &MF = DAG.getMachineFunction(); | |||
2326 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); | |||
2327 | MachineFunction::CallSiteInfo CSInfo; | |||
2328 | bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); | |||
2329 | bool isThisReturn = false; | |||
2330 | bool isCmseNSCall = false; | |||
2331 | bool isSibCall = false; | |||
2332 | bool PreferIndirect = false; | |||
2333 | bool GuardWithBTI = false; | |||
2334 | ||||
2335 | // Lower 'returns_twice' calls to a pseudo-instruction. | |||
2336 | if (CLI.CB && CLI.CB->getAttributes().hasFnAttr(Attribute::ReturnsTwice) && | |||
2337 | !Subtarget->noBTIAtReturnTwice()) | |||
2338 | GuardWithBTI = AFI->branchTargetEnforcement(); | |||
2339 | ||||
2340 | // Determine whether this is a non-secure function call. | |||
2341 | if (CLI.CB && CLI.CB->getAttributes().hasFnAttr("cmse_nonsecure_call")) | |||
2342 | isCmseNSCall = true; | |||
2343 | ||||
2344 | // Disable tail calls if they're not supported. | |||
2345 | if (!Subtarget->supportsTailCall()) | |||
2346 | isTailCall = false; | |||
2347 | ||||
2348 | // For both the non-secure calls and the returns from a CMSE entry function, | |||
2349 | // the function needs to do some extra work afte r the call, or before the | |||
2350 | // return, respectively, thus it cannot end with atail call | |||
2351 | if (isCmseNSCall || AFI->isCmseNSEntryFunction()) | |||
2352 | isTailCall = false; | |||
2353 | ||||
2354 | if (isa<GlobalAddressSDNode>(Callee)) { | |||
2355 | // If we're optimizing for minimum size and the function is called three or | |||
2356 | // more times in this block, we can improve codesize by calling indirectly | |||
2357 | // as BLXr has a 16-bit encoding. | |||
2358 | auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); | |||
2359 | if (CLI.CB) { | |||
2360 | auto *BB = CLI.CB->getParent(); | |||
2361 | PreferIndirect = Subtarget->isThumb() && Subtarget->hasMinSize() && | |||
2362 | count_if(GV->users(), [&BB](const User *U) { | |||
2363 | return isa<Instruction>(U) && | |||
2364 | cast<Instruction>(U)->getParent() == BB; | |||
2365 | }) > 2; | |||
2366 | } | |||
2367 | } | |||
2368 | if (isTailCall) { | |||
2369 | // Check if it's really possible to do a tail call. | |||
2370 | isTailCall = IsEligibleForTailCallOptimization( | |||
2371 | Callee, CallConv, isVarArg, isStructRet, | |||
2372 | MF.getFunction().hasStructRetAttr(), Outs, OutVals, Ins, DAG, | |||
2373 | PreferIndirect); | |||
2374 | ||||
2375 | if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt && | |||
2376 | CallConv != CallingConv::Tail && CallConv != CallingConv::SwiftTail) | |||
2377 | isSibCall = true; | |||
2378 | ||||
2379 | // We don't support GuaranteedTailCallOpt for ARM, only automatically | |||
2380 | // detected sibcalls. | |||
2381 | if (isTailCall) | |||
2382 | ++NumTailCalls; | |||
2383 | } | |||
2384 | ||||
2385 | if (!isTailCall && CLI.CB && CLI.CB->isMustTailCall()) | |||
2386 | report_fatal_error("failed to perform tail call elimination on a call " | |||
2387 | "site marked musttail"); | |||
2388 | // Analyze operands of the call, assigning locations to each operand. | |||
2389 | SmallVector<CCValAssign, 16> ArgLocs; | |||
2390 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, | |||
2391 | *DAG.getContext()); | |||
2392 | CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg)); | |||
2393 | ||||
2394 | // Get a count of how many bytes are to be pushed on the stack. | |||
2395 | unsigned NumBytes = CCInfo.getNextStackOffset(); | |||
2396 | ||||
2397 | // SPDiff is the byte offset of the call's argument area from the callee's. | |||
2398 | // Stores to callee stack arguments will be placed in FixedStackSlots offset | |||
2399 | // by this amount for a tail call. In a sibling call it must be 0 because the | |||
2400 | // caller will deallocate the entire stack and the callee still expects its | |||
2401 | // arguments to begin at SP+0. Completely unused for non-tail calls. | |||
2402 | int SPDiff = 0; | |||
2403 | ||||
2404 | if (isTailCall && !isSibCall) { | |||
2405 | auto FuncInfo = MF.getInfo<ARMFunctionInfo>(); | |||
2406 | unsigned NumReusableBytes = FuncInfo->getArgumentStackSize(); | |||
2407 | ||||
2408 | // Since callee will pop argument stack as a tail call, we must keep the | |||
2409 | // popped size 16-byte aligned. | |||
2410 | Align StackAlign = DAG.getDataLayout().getStackAlignment(); | |||
2411 | NumBytes = alignTo(NumBytes, StackAlign); | |||
2412 | ||||
2413 | // SPDiff will be negative if this tail call requires more space than we | |||
2414 | // would automatically have in our incoming argument space. Positive if we | |||
2415 | // can actually shrink the stack. | |||
2416 | SPDiff = NumReusableBytes - NumBytes; | |||
2417 | ||||
2418 | // If this call requires more stack than we have available from | |||
2419 | // LowerFormalArguments, tell FrameLowering to reserve space for it. | |||
2420 | if (SPDiff < 0 && AFI->getArgRegsSaveSize() < (unsigned)-SPDiff) | |||
2421 | AFI->setArgRegsSaveSize(-SPDiff); | |||
2422 | } | |||
2423 | ||||
2424 | if (isSibCall) { | |||
2425 | // For sibling tail calls, memory operands are available in our caller's stack. | |||
2426 | NumBytes = 0; | |||
2427 | } else { | |||
2428 | // Adjust the stack pointer for the new arguments... | |||
2429 | // These operations are automatically eliminated by the prolog/epilog pass | |||
2430 | Chain = DAG.getCALLSEQ_START(Chain, isTailCall ? 0 : NumBytes, 0, dl); | |||
2431 | } | |||
2432 | ||||
2433 | SDValue StackPtr = | |||
2434 | DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout())); | |||
2435 | ||||
2436 | RegsToPassVector RegsToPass; | |||
2437 | SmallVector<SDValue, 8> MemOpChains; | |||
2438 | ||||
2439 | // During a tail call, stores to the argument area must happen after all of | |||
2440 | // the function's incoming arguments have been loaded because they may alias. | |||
2441 | // This is done by folding in a TokenFactor from LowerFormalArguments, but | |||
2442 | // there's no point in doing so repeatedly so this tracks whether that's | |||
2443 | // happened yet. | |||
2444 | bool AfterFormalArgLoads = false; | |||
2445 | ||||
2446 | // Walk the register/memloc assignments, inserting copies/loads. In the case | |||
2447 | // of tail call optimization, arguments are handled later. | |||
2448 | for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); | |||
2449 | i != e; | |||
2450 | ++i, ++realArgIdx) { | |||
2451 | CCValAssign &VA = ArgLocs[i]; | |||
2452 | SDValue Arg = OutVals[realArgIdx]; | |||
2453 | ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; | |||
2454 | bool isByVal = Flags.isByVal(); | |||
2455 | ||||
2456 | // Promote the value if needed. | |||
2457 | switch (VA.getLocInfo()) { | |||
2458 | default: llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 2458); | |||
2459 | case CCValAssign::Full: break; | |||
2460 | case CCValAssign::SExt: | |||
2461 | Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); | |||
2462 | break; | |||
2463 | case CCValAssign::ZExt: | |||
2464 | Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); | |||
2465 | break; | |||
2466 | case CCValAssign::AExt: | |||
2467 | Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); | |||
2468 | break; | |||
2469 | case CCValAssign::BCvt: | |||
2470 | Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); | |||
2471 | break; | |||
2472 | } | |||
2473 | ||||
2474 | if (isTailCall && VA.isMemLoc() && !AfterFormalArgLoads) { | |||
2475 | Chain = DAG.getStackArgumentTokenFactor(Chain); | |||
2476 | AfterFormalArgLoads = true; | |||
2477 | } | |||
2478 | ||||
2479 | // f16 arguments have their size extended to 4 bytes and passed as if they | |||
2480 | // had been copied to the LSBs of a 32-bit register. | |||
2481 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) | |||
2482 | if (VA.needsCustom() && | |||
2483 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) { | |||
2484 | Arg = MoveFromHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Arg); | |||
2485 | } else { | |||
2486 | // f16 arguments could have been extended prior to argument lowering. | |||
2487 | // Mask them arguments if this is a CMSE nonsecure call. | |||
2488 | auto ArgVT = Outs[realArgIdx].ArgVT; | |||
2489 | if (isCmseNSCall && (ArgVT == MVT::f16)) { | |||
2490 | auto LocBits = VA.getLocVT().getSizeInBits(); | |||
2491 | auto MaskValue = APInt::getLowBitsSet(LocBits, ArgVT.getSizeInBits()); | |||
2492 | SDValue Mask = | |||
2493 | DAG.getConstant(MaskValue, dl, MVT::getIntegerVT(LocBits)); | |||
2494 | Arg = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocBits), Arg); | |||
2495 | Arg = DAG.getNode(ISD::AND, dl, MVT::getIntegerVT(LocBits), Arg, Mask); | |||
2496 | Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); | |||
2497 | } | |||
2498 | } | |||
2499 | ||||
2500 | // f64 and v2f64 might be passed in i32 pairs and must be split into pieces | |||
2501 | if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) { | |||
2502 | SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, | |||
2503 | DAG.getConstant(0, dl, MVT::i32)); | |||
2504 | SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, | |||
2505 | DAG.getConstant(1, dl, MVT::i32)); | |||
2506 | ||||
2507 | PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, VA, ArgLocs[++i], | |||
2508 | StackPtr, MemOpChains, isTailCall, SPDiff); | |||
2509 | ||||
2510 | VA = ArgLocs[++i]; // skip ahead to next loc | |||
2511 | if (VA.isRegLoc()) { | |||
2512 | PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, VA, ArgLocs[++i], | |||
2513 | StackPtr, MemOpChains, isTailCall, SPDiff); | |||
2514 | } else { | |||
2515 | assert(VA.isMemLoc())(static_cast <bool> (VA.isMemLoc()) ? void (0) : __assert_fail ("VA.isMemLoc()", "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2515, __extension__ __PRETTY_FUNCTION__)); | |||
2516 | SDValue DstAddr; | |||
2517 | MachinePointerInfo DstInfo; | |||
2518 | std::tie(DstAddr, DstInfo) = | |||
2519 | computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff); | |||
2520 | MemOpChains.push_back(DAG.getStore(Chain, dl, Op1, DstAddr, DstInfo)); | |||
2521 | } | |||
2522 | } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) { | |||
2523 | PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], | |||
2524 | StackPtr, MemOpChains, isTailCall, SPDiff); | |||
2525 | } else if (VA.isRegLoc()) { | |||
2526 | if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() && | |||
2527 | Outs[0].VT == MVT::i32) { | |||
2528 | assert(VA.getLocVT() == MVT::i32 &&(static_cast <bool> (VA.getLocVT() == MVT::i32 && "unexpected calling convention register assignment") ? void ( 0) : __assert_fail ("VA.getLocVT() == MVT::i32 && \"unexpected calling convention register assignment\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2529, __extension__ __PRETTY_FUNCTION__)) | |||
2529 | "unexpected calling convention register assignment")(static_cast <bool> (VA.getLocVT() == MVT::i32 && "unexpected calling convention register assignment") ? void ( 0) : __assert_fail ("VA.getLocVT() == MVT::i32 && \"unexpected calling convention register assignment\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2529, __extension__ __PRETTY_FUNCTION__)); | |||
2530 | assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&(static_cast <bool> (!Ins.empty() && Ins[0].VT == MVT::i32 && "unexpected use of 'returned'") ? void ( 0) : __assert_fail ("!Ins.empty() && Ins[0].VT == MVT::i32 && \"unexpected use of 'returned'\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2531, __extension__ __PRETTY_FUNCTION__)) | |||
2531 | "unexpected use of 'returned'")(static_cast <bool> (!Ins.empty() && Ins[0].VT == MVT::i32 && "unexpected use of 'returned'") ? void ( 0) : __assert_fail ("!Ins.empty() && Ins[0].VT == MVT::i32 && \"unexpected use of 'returned'\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2531, __extension__ __PRETTY_FUNCTION__)); | |||
2532 | isThisReturn = true; | |||
2533 | } | |||
2534 | const TargetOptions &Options = DAG.getTarget().Options; | |||
2535 | if (Options.EmitCallSiteInfo) | |||
2536 | CSInfo.emplace_back(VA.getLocReg(), i); | |||
2537 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); | |||
2538 | } else if (isByVal) { | |||
2539 | assert(VA.isMemLoc())(static_cast <bool> (VA.isMemLoc()) ? void (0) : __assert_fail ("VA.isMemLoc()", "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2539, __extension__ __PRETTY_FUNCTION__)); | |||
2540 | unsigned offset = 0; | |||
2541 | ||||
2542 | // True if this byval aggregate will be split between registers | |||
2543 | // and memory. | |||
2544 | unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); | |||
2545 | unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed(); | |||
2546 | ||||
2547 | if (CurByValIdx < ByValArgsCount) { | |||
2548 | ||||
2549 | unsigned RegBegin, RegEnd; | |||
2550 | CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd); | |||
2551 | ||||
2552 | EVT PtrVT = | |||
2553 | DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); | |||
2554 | unsigned int i, j; | |||
2555 | for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { | |||
2556 | SDValue Const = DAG.getConstant(4*i, dl, MVT::i32); | |||
2557 | SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); | |||
2558 | SDValue Load = | |||
2559 | DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo(), | |||
2560 | DAG.InferPtrAlign(AddArg)); | |||
2561 | MemOpChains.push_back(Load.getValue(1)); | |||
2562 | RegsToPass.push_back(std::make_pair(j, Load)); | |||
2563 | } | |||
2564 | ||||
2565 | // If parameter size outsides register area, "offset" value | |||
2566 | // helps us to calculate stack slot for remained part properly. | |||
2567 | offset = RegEnd - RegBegin; | |||
2568 | ||||
2569 | CCInfo.nextInRegsParam(); | |||
2570 | } | |||
2571 | ||||
2572 | if (Flags.getByValSize() > 4*offset) { | |||
2573 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | |||
2574 | SDValue Dst; | |||
2575 | MachinePointerInfo DstInfo; | |||
2576 | std::tie(Dst, DstInfo) = | |||
2577 | computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff); | |||
2578 | SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl); | |||
2579 | SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset); | |||
2580 | SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl, | |||
2581 | MVT::i32); | |||
2582 | SDValue AlignNode = | |||
2583 | DAG.getConstant(Flags.getNonZeroByValAlign().value(), dl, MVT::i32); | |||
2584 | ||||
2585 | SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); | |||
2586 | SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; | |||
2587 | MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs, | |||
2588 | Ops)); | |||
2589 | } | |||
2590 | } else { | |||
2591 | assert(VA.isMemLoc())(static_cast <bool> (VA.isMemLoc()) ? void (0) : __assert_fail ("VA.isMemLoc()", "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2591, __extension__ __PRETTY_FUNCTION__)); | |||
2592 | SDValue DstAddr; | |||
2593 | MachinePointerInfo DstInfo; | |||
2594 | std::tie(DstAddr, DstInfo) = | |||
2595 | computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff); | |||
2596 | ||||
2597 | SDValue Store = DAG.getStore(Chain, dl, Arg, DstAddr, DstInfo); | |||
2598 | MemOpChains.push_back(Store); | |||
2599 | } | |||
2600 | } | |||
2601 | ||||
2602 | if (!MemOpChains.empty()) | |||
2603 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); | |||
2604 | ||||
2605 | // Build a sequence of copy-to-reg nodes chained together with token chain | |||
2606 | // and flag operands which copy the outgoing args into the appropriate regs. | |||
2607 | SDValue InFlag; | |||
2608 | for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { | |||
2609 | Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, | |||
2610 | RegsToPass[i].second, InFlag); | |||
2611 | InFlag = Chain.getValue(1); | |||
2612 | } | |||
2613 | ||||
2614 | // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every | |||
2615 | // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol | |||
2616 | // node so that legalize doesn't hack it. | |||
2617 | bool isDirect = false; | |||
2618 | ||||
2619 | const TargetMachine &TM = getTargetMachine(); | |||
2620 | const Module *Mod = MF.getFunction().getParent(); | |||
2621 | const GlobalValue *GV = nullptr; | |||
2622 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) | |||
2623 | GV = G->getGlobal(); | |||
2624 | bool isStub = | |||
2625 | !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO(); | |||
2626 | ||||
2627 | bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass()); | |||
2628 | bool isLocalARMFunc = false; | |||
2629 | auto PtrVt = getPointerTy(DAG.getDataLayout()); | |||
2630 | ||||
2631 | if (Subtarget->genLongCalls()) { | |||
2632 | assert((!isPositionIndependent() || Subtarget->isTargetWindows()) &&(static_cast <bool> ((!isPositionIndependent() || Subtarget ->isTargetWindows()) && "long-calls codegen is not position independent!" ) ? void (0) : __assert_fail ("(!isPositionIndependent() || Subtarget->isTargetWindows()) && \"long-calls codegen is not position independent!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2633, __extension__ __PRETTY_FUNCTION__)) | |||
2633 | "long-calls codegen is not position independent!")(static_cast <bool> ((!isPositionIndependent() || Subtarget ->isTargetWindows()) && "long-calls codegen is not position independent!" ) ? void (0) : __assert_fail ("(!isPositionIndependent() || Subtarget->isTargetWindows()) && \"long-calls codegen is not position independent!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2633, __extension__ __PRETTY_FUNCTION__)); | |||
2634 | // Handle a global address or an external symbol. If it's not one of | |||
2635 | // those, the target's already in a register, so we don't need to do | |||
2636 | // anything extra. | |||
2637 | if (isa<GlobalAddressSDNode>(Callee)) { | |||
2638 | // Create a constant pool entry for the callee address | |||
2639 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); | |||
2640 | ARMConstantPoolValue *CPV = | |||
2641 | ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); | |||
2642 | ||||
2643 | // Get the address of the callee into a register | |||
2644 | SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4)); | |||
2645 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); | |||
2646 | Callee = DAG.getLoad( | |||
2647 | PtrVt, dl, DAG.getEntryNode(), CPAddr, | |||
2648 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); | |||
2649 | } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { | |||
2650 | const char *Sym = S->getSymbol(); | |||
2651 | ||||
2652 | // Create a constant pool entry for the callee address | |||
2653 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); | |||
2654 | ARMConstantPoolValue *CPV = | |||
2655 | ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, | |||
2656 | ARMPCLabelIndex, 0); | |||
2657 | // Get the address of the callee into a register | |||
2658 | SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4)); | |||
2659 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); | |||
2660 | Callee = DAG.getLoad( | |||
2661 | PtrVt, dl, DAG.getEntryNode(), CPAddr, | |||
2662 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); | |||
2663 | } | |||
2664 | } else if (isa<GlobalAddressSDNode>(Callee)) { | |||
2665 | if (!PreferIndirect) { | |||
2666 | isDirect = true; | |||
2667 | bool isDef = GV->isStrongDefinitionForLinker(); | |||
2668 | ||||
2669 | // ARM call to a local ARM function is predicable. | |||
2670 | isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking); | |||
2671 | // tBX takes a register source operand. | |||
2672 | if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { | |||
2673 | assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?")(static_cast <bool> (Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?") ? void (0) : __assert_fail ( "Subtarget->isTargetMachO() && \"WrapperPIC use on non-MachO?\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2673, __extension__ __PRETTY_FUNCTION__)); | |||
2674 | Callee = DAG.getNode( | |||
2675 | ARMISD::WrapperPIC, dl, PtrVt, | |||
2676 | DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY)); | |||
2677 | Callee = DAG.getLoad( | |||
2678 | PtrVt, dl, DAG.getEntryNode(), Callee, | |||
2679 | MachinePointerInfo::getGOT(DAG.getMachineFunction()), MaybeAlign(), | |||
2680 | MachineMemOperand::MODereferenceable | | |||
2681 | MachineMemOperand::MOInvariant); | |||
2682 | } else if (Subtarget->isTargetCOFF()) { | |||
2683 | assert(Subtarget->isTargetWindows() &&(static_cast <bool> (Subtarget->isTargetWindows() && "Windows is the only supported COFF target") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"Windows is the only supported COFF target\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2684, __extension__ __PRETTY_FUNCTION__)) | |||
2684 | "Windows is the only supported COFF target")(static_cast <bool> (Subtarget->isTargetWindows() && "Windows is the only supported COFF target") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"Windows is the only supported COFF target\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2684, __extension__ __PRETTY_FUNCTION__)); | |||
2685 | unsigned TargetFlags = ARMII::MO_NO_FLAG; | |||
2686 | if (GV->hasDLLImportStorageClass()) | |||
2687 | TargetFlags = ARMII::MO_DLLIMPORT; | |||
2688 | else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) | |||
2689 | TargetFlags = ARMII::MO_COFFSTUB; | |||
2690 | Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*offset=*/0, | |||
2691 | TargetFlags); | |||
2692 | if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) | |||
2693 | Callee = | |||
2694 | DAG.getLoad(PtrVt, dl, DAG.getEntryNode(), | |||
2695 | DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee), | |||
2696 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); | |||
2697 | } else { | |||
2698 | Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0); | |||
2699 | } | |||
2700 | } | |||
2701 | } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { | |||
2702 | isDirect = true; | |||
2703 | // tBX takes a register source operand. | |||
2704 | const char *Sym = S->getSymbol(); | |||
2705 | if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { | |||
2706 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); | |||
2707 | ARMConstantPoolValue *CPV = | |||
2708 | ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, | |||
2709 | ARMPCLabelIndex, 4); | |||
2710 | SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4)); | |||
2711 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); | |||
2712 | Callee = DAG.getLoad( | |||
2713 | PtrVt, dl, DAG.getEntryNode(), CPAddr, | |||
2714 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); | |||
2715 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); | |||
2716 | Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel); | |||
2717 | } else { | |||
2718 | Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0); | |||
2719 | } | |||
2720 | } | |||
2721 | ||||
2722 | if (isCmseNSCall) { | |||
2723 | assert(!isARMFunc && !isDirect &&(static_cast <bool> (!isARMFunc && !isDirect && "Cannot handle call to ARM function or direct call") ? void ( 0) : __assert_fail ("!isARMFunc && !isDirect && \"Cannot handle call to ARM function or direct call\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2724, __extension__ __PRETTY_FUNCTION__)) | |||
2724 | "Cannot handle call to ARM function or direct call")(static_cast <bool> (!isARMFunc && !isDirect && "Cannot handle call to ARM function or direct call") ? void ( 0) : __assert_fail ("!isARMFunc && !isDirect && \"Cannot handle call to ARM function or direct call\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2724, __extension__ __PRETTY_FUNCTION__)); | |||
2725 | if (NumBytes > 0) { | |||
2726 | DiagnosticInfoUnsupported Diag(DAG.getMachineFunction().getFunction(), | |||
2727 | "call to non-secure function would " | |||
2728 | "require passing arguments on stack", | |||
2729 | dl.getDebugLoc()); | |||
2730 | DAG.getContext()->diagnose(Diag); | |||
2731 | } | |||
2732 | if (isStructRet) { | |||
2733 | DiagnosticInfoUnsupported Diag( | |||
2734 | DAG.getMachineFunction().getFunction(), | |||
2735 | "call to non-secure function would return value through pointer", | |||
2736 | dl.getDebugLoc()); | |||
2737 | DAG.getContext()->diagnose(Diag); | |||
2738 | } | |||
2739 | } | |||
2740 | ||||
2741 | // FIXME: handle tail calls differently. | |||
2742 | unsigned CallOpc; | |||
2743 | if (Subtarget->isThumb()) { | |||
2744 | if (GuardWithBTI) | |||
2745 | CallOpc = ARMISD::t2CALL_BTI; | |||
2746 | else if (isCmseNSCall) | |||
2747 | CallOpc = ARMISD::tSECALL; | |||
2748 | else if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) | |||
2749 | CallOpc = ARMISD::CALL_NOLINK; | |||
2750 | else | |||
2751 | CallOpc = ARMISD::CALL; | |||
2752 | } else { | |||
2753 | if (!isDirect && !Subtarget->hasV5TOps()) | |||
2754 | CallOpc = ARMISD::CALL_NOLINK; | |||
2755 | else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() && | |||
2756 | // Emit regular call when code size is the priority | |||
2757 | !Subtarget->hasMinSize()) | |||
2758 | // "mov lr, pc; b _foo" to avoid confusing the RSP | |||
2759 | CallOpc = ARMISD::CALL_NOLINK; | |||
2760 | else | |||
2761 | CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; | |||
2762 | } | |||
2763 | ||||
2764 | // We don't usually want to end the call-sequence here because we would tidy | |||
2765 | // the frame up *after* the call, however in the ABI-changing tail-call case | |||
2766 | // we've carefully laid out the parameters so that when sp is reset they'll be | |||
2767 | // in the correct location. | |||
2768 | if (isTailCall && !isSibCall) { | |||
2769 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true), | |||
2770 | DAG.getIntPtrConstant(0, dl, true), InFlag, dl); | |||
2771 | InFlag = Chain.getValue(1); | |||
2772 | } | |||
2773 | ||||
2774 | std::vector<SDValue> Ops; | |||
2775 | Ops.push_back(Chain); | |||
2776 | Ops.push_back(Callee); | |||
2777 | ||||
2778 | if (isTailCall) { | |||
2779 | Ops.push_back(DAG.getTargetConstant(SPDiff, dl, MVT::i32)); | |||
2780 | } | |||
2781 | ||||
2782 | // Add argument registers to the end of the list so that they are known live | |||
2783 | // into the call. | |||
2784 | for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) | |||
2785 | Ops.push_back(DAG.getRegister(RegsToPass[i].first, | |||
2786 | RegsToPass[i].second.getValueType())); | |||
2787 | ||||
2788 | // Add a register mask operand representing the call-preserved registers. | |||
2789 | if (!isTailCall) { | |||
2790 | const uint32_t *Mask; | |||
2791 | const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); | |||
2792 | if (isThisReturn) { | |||
2793 | // For 'this' returns, use the R0-preserving mask if applicable | |||
2794 | Mask = ARI->getThisReturnPreservedMask(MF, CallConv); | |||
2795 | if (!Mask) { | |||
2796 | // Set isThisReturn to false if the calling convention is not one that | |||
2797 | // allows 'returned' to be modeled in this way, so LowerCallResult does | |||
2798 | // not try to pass 'this' straight through | |||
2799 | isThisReturn = false; | |||
2800 | Mask = ARI->getCallPreservedMask(MF, CallConv); | |||
2801 | } | |||
2802 | } else | |||
2803 | Mask = ARI->getCallPreservedMask(MF, CallConv); | |||
2804 | ||||
2805 | assert(Mask && "Missing call preserved mask for calling convention")(static_cast <bool> (Mask && "Missing call preserved mask for calling convention" ) ? void (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2805, __extension__ __PRETTY_FUNCTION__)); | |||
2806 | Ops.push_back(DAG.getRegisterMask(Mask)); | |||
2807 | } | |||
2808 | ||||
2809 | if (InFlag.getNode()) | |||
2810 | Ops.push_back(InFlag); | |||
2811 | ||||
2812 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
2813 | if (isTailCall) { | |||
2814 | MF.getFrameInfo().setHasTailCall(); | |||
2815 | SDValue Ret = DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops); | |||
2816 | DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo)); | |||
2817 | return Ret; | |||
2818 | } | |||
2819 | ||||
2820 | // Returns a chain and a flag for retval copy to use. | |||
2821 | Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); | |||
2822 | DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); | |||
2823 | InFlag = Chain.getValue(1); | |||
2824 | DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo)); | |||
2825 | ||||
2826 | // If we're guaranteeing tail-calls will be honoured, the callee must | |||
2827 | // pop its own argument stack on return. But this call is *not* a tail call so | |||
2828 | // we need to undo that after it returns to restore the status-quo. | |||
2829 | bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; | |||
2830 | uint64_t CalleePopBytes = | |||
2831 | canGuaranteeTCO(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : -1ULL; | |||
2832 | ||||
2833 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), | |||
2834 | DAG.getIntPtrConstant(CalleePopBytes, dl, true), | |||
2835 | InFlag, dl); | |||
2836 | if (!Ins.empty()) | |||
2837 | InFlag = Chain.getValue(1); | |||
2838 | ||||
2839 | // Handle result values, copying them out of physregs into vregs that we | |||
2840 | // return. | |||
2841 | return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, | |||
2842 | InVals, isThisReturn, | |||
2843 | isThisReturn ? OutVals[0] : SDValue()); | |||
2844 | } | |||
2845 | ||||
2846 | /// HandleByVal - Every parameter *after* a byval parameter is passed | |||
2847 | /// on the stack. Remember the next parameter register to allocate, | |||
2848 | /// and then confiscate the rest of the parameter registers to insure | |||
2849 | /// this. | |||
2850 | void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size, | |||
2851 | Align Alignment) const { | |||
2852 | // Byval (as with any stack) slots are always at least 4 byte aligned. | |||
2853 | Alignment = std::max(Alignment, Align(4)); | |||
2854 | ||||
2855 | unsigned Reg = State->AllocateReg(GPRArgRegs); | |||
2856 | if (!Reg) | |||
2857 | return; | |||
2858 | ||||
2859 | unsigned AlignInRegs = Alignment.value() / 4; | |||
2860 | unsigned Waste = (ARM::R4 - Reg) % AlignInRegs; | |||
2861 | for (unsigned i = 0; i < Waste; ++i) | |||
2862 | Reg = State->AllocateReg(GPRArgRegs); | |||
2863 | ||||
2864 | if (!Reg) | |||
2865 | return; | |||
2866 | ||||
2867 | unsigned Excess = 4 * (ARM::R4 - Reg); | |||
2868 | ||||
2869 | // Special case when NSAA != SP and parameter size greater than size of | |||
2870 | // all remained GPR regs. In that case we can't split parameter, we must | |||
2871 | // send it to stack. We also must set NCRN to R4, so waste all | |||
2872 | // remained registers. | |||
2873 | const unsigned NSAAOffset = State->getNextStackOffset(); | |||
2874 | if (NSAAOffset != 0 && Size > Excess) { | |||
2875 | while (State->AllocateReg(GPRArgRegs)) | |||
2876 | ; | |||
2877 | return; | |||
2878 | } | |||
2879 | ||||
2880 | // First register for byval parameter is the first register that wasn't | |||
2881 | // allocated before this method call, so it would be "reg". | |||
2882 | // If parameter is small enough to be saved in range [reg, r4), then | |||
2883 | // the end (first after last) register would be reg + param-size-in-regs, | |||
2884 | // else parameter would be splitted between registers and stack, | |||
2885 | // end register would be r4 in this case. | |||
2886 | unsigned ByValRegBegin = Reg; | |||
2887 | unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4); | |||
2888 | State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd); | |||
2889 | // Note, first register is allocated in the beginning of function already, | |||
2890 | // allocate remained amount of registers we need. | |||
2891 | for (unsigned i = Reg + 1; i != ByValRegEnd; ++i) | |||
2892 | State->AllocateReg(GPRArgRegs); | |||
2893 | // A byval parameter that is split between registers and memory needs its | |||
2894 | // size truncated here. | |||
2895 | // In the case where the entire structure fits in registers, we set the | |||
2896 | // size in memory to zero. | |||
2897 | Size = std::max<int>(Size - Excess, 0); | |||
2898 | } | |||
2899 | ||||
2900 | /// MatchingStackOffset - Return true if the given stack call argument is | |||
2901 | /// already available in the same position (relatively) of the caller's | |||
2902 | /// incoming argument stack. | |||
2903 | static | |||
2904 | bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, | |||
2905 | MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, | |||
2906 | const TargetInstrInfo *TII) { | |||
2907 | unsigned Bytes = Arg.getValueSizeInBits() / 8; | |||
2908 | int FI = std::numeric_limits<int>::max(); | |||
2909 | if (Arg.getOpcode() == ISD::CopyFromReg) { | |||
2910 | Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); | |||
2911 | if (!Register::isVirtualRegister(VR)) | |||
2912 | return false; | |||
2913 | MachineInstr *Def = MRI->getVRegDef(VR); | |||
2914 | if (!Def) | |||
2915 | return false; | |||
2916 | if (!Flags.isByVal()) { | |||
2917 | if (!TII->isLoadFromStackSlot(*Def, FI)) | |||
2918 | return false; | |||
2919 | } else { | |||
2920 | return false; | |||
2921 | } | |||
2922 | } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { | |||
2923 | if (Flags.isByVal()) | |||
2924 | // ByVal argument is passed in as a pointer but it's now being | |||
2925 | // dereferenced. e.g. | |||
2926 | // define @foo(%struct.X* %A) { | |||
2927 | // tail call @bar(%struct.X* byval %A) | |||
2928 | // } | |||
2929 | return false; | |||
2930 | SDValue Ptr = Ld->getBasePtr(); | |||
2931 | FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); | |||
2932 | if (!FINode) | |||
2933 | return false; | |||
2934 | FI = FINode->getIndex(); | |||
2935 | } else | |||
2936 | return false; | |||
2937 | ||||
2938 | assert(FI != std::numeric_limits<int>::max())(static_cast <bool> (FI != std::numeric_limits<int> ::max()) ? void (0) : __assert_fail ("FI != std::numeric_limits<int>::max()" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2938, __extension__ __PRETTY_FUNCTION__)); | |||
2939 | if (!MFI.isFixedObjectIndex(FI)) | |||
2940 | return false; | |||
2941 | return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI); | |||
2942 | } | |||
2943 | ||||
2944 | /// IsEligibleForTailCallOptimization - Check whether the call is eligible | |||
2945 | /// for tail call optimization. Targets which want to do tail call | |||
2946 | /// optimization should implement this function. | |||
2947 | bool ARMTargetLowering::IsEligibleForTailCallOptimization( | |||
2948 | SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, | |||
2949 | bool isCalleeStructRet, bool isCallerStructRet, | |||
2950 | const SmallVectorImpl<ISD::OutputArg> &Outs, | |||
2951 | const SmallVectorImpl<SDValue> &OutVals, | |||
2952 | const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG, | |||
2953 | const bool isIndirect) const { | |||
2954 | MachineFunction &MF = DAG.getMachineFunction(); | |||
2955 | const Function &CallerF = MF.getFunction(); | |||
2956 | CallingConv::ID CallerCC = CallerF.getCallingConv(); | |||
2957 | ||||
2958 | assert(Subtarget->supportsTailCall())(static_cast <bool> (Subtarget->supportsTailCall()) ? void (0) : __assert_fail ("Subtarget->supportsTailCall()" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 2958, __extension__ __PRETTY_FUNCTION__)); | |||
2959 | ||||
2960 | // Indirect tail calls cannot be optimized for Thumb1 if the args | |||
2961 | // to the call take up r0-r3. The reason is that there are no legal registers | |||
2962 | // left to hold the pointer to the function to be called. | |||
2963 | // Similarly, if the function uses return address sign and authentication, | |||
2964 | // r12 is needed to hold the PAC and is not available to hold the callee | |||
2965 | // address. | |||
2966 | if (Outs.size() >= 4 && | |||
2967 | (!isa<GlobalAddressSDNode>(Callee.getNode()) || isIndirect)) { | |||
2968 | if (Subtarget->isThumb1Only()) | |||
2969 | return false; | |||
2970 | // Conservatively assume the function spills LR. | |||
2971 | if (MF.getInfo<ARMFunctionInfo>()->shouldSignReturnAddress(true)) | |||
2972 | return false; | |||
2973 | } | |||
2974 | ||||
2975 | // Look for obvious safe cases to perform tail call optimization that do not | |||
2976 | // require ABI changes. This is what gcc calls sibcall. | |||
2977 | ||||
2978 | // Exception-handling functions need a special set of instructions to indicate | |||
2979 | // a return to the hardware. Tail-calling another function would probably | |||
2980 | // break this. | |||
2981 | if (CallerF.hasFnAttribute("interrupt")) | |||
2982 | return false; | |||
2983 | ||||
2984 | if (canGuaranteeTCO(CalleeCC, getTargetMachine().Options.GuaranteedTailCallOpt)) | |||
2985 | return CalleeCC == CallerCC; | |||
2986 | ||||
2987 | // Also avoid sibcall optimization if either caller or callee uses struct | |||
2988 | // return semantics. | |||
2989 | if (isCalleeStructRet || isCallerStructRet) | |||
2990 | return false; | |||
2991 | ||||
2992 | // Externally-defined functions with weak linkage should not be | |||
2993 | // tail-called on ARM when the OS does not support dynamic | |||
2994 | // pre-emption of symbols, as the AAELF spec requires normal calls | |||
2995 | // to undefined weak functions to be replaced with a NOP or jump to the | |||
2996 | // next instruction. The behaviour of branch instructions in this | |||
2997 | // situation (as used for tail calls) is implementation-defined, so we | |||
2998 | // cannot rely on the linker replacing the tail call with a return. | |||
2999 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { | |||
3000 | const GlobalValue *GV = G->getGlobal(); | |||
3001 | const Triple &TT = getTargetMachine().getTargetTriple(); | |||
3002 | if (GV->hasExternalWeakLinkage() && | |||
3003 | (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO())) | |||
3004 | return false; | |||
3005 | } | |||
3006 | ||||
3007 | // Check that the call results are passed in the same way. | |||
3008 | LLVMContext &C = *DAG.getContext(); | |||
3009 | if (!CCState::resultsCompatible( | |||
3010 | getEffectiveCallingConv(CalleeCC, isVarArg), | |||
3011 | getEffectiveCallingConv(CallerCC, CallerF.isVarArg()), MF, C, Ins, | |||
3012 | CCAssignFnForReturn(CalleeCC, isVarArg), | |||
3013 | CCAssignFnForReturn(CallerCC, CallerF.isVarArg()))) | |||
3014 | return false; | |||
3015 | // The callee has to preserve all registers the caller needs to preserve. | |||
3016 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); | |||
3017 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); | |||
3018 | if (CalleeCC != CallerCC) { | |||
3019 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); | |||
3020 | if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) | |||
3021 | return false; | |||
3022 | } | |||
3023 | ||||
3024 | // If Caller's vararg or byval argument has been split between registers and | |||
3025 | // stack, do not perform tail call, since part of the argument is in caller's | |||
3026 | // local frame. | |||
3027 | const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>(); | |||
3028 | if (AFI_Caller->getArgRegsSaveSize()) | |||
3029 | return false; | |||
3030 | ||||
3031 | // If the callee takes no arguments then go on to check the results of the | |||
3032 | // call. | |||
3033 | if (!Outs.empty()) { | |||
3034 | // Check if stack adjustment is needed. For now, do not do this if any | |||
3035 | // argument is passed on the stack. | |||
3036 | SmallVector<CCValAssign, 16> ArgLocs; | |||
3037 | CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); | |||
3038 | CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg)); | |||
3039 | if (CCInfo.getNextStackOffset()) { | |||
3040 | // Check if the arguments are already laid out in the right way as | |||
3041 | // the caller's fixed stack objects. | |||
3042 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
3043 | const MachineRegisterInfo *MRI = &MF.getRegInfo(); | |||
3044 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | |||
3045 | for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); | |||
3046 | i != e; | |||
3047 | ++i, ++realArgIdx) { | |||
3048 | CCValAssign &VA = ArgLocs[i]; | |||
3049 | EVT RegVT = VA.getLocVT(); | |||
3050 | SDValue Arg = OutVals[realArgIdx]; | |||
3051 | ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; | |||
3052 | if (VA.getLocInfo() == CCValAssign::Indirect) | |||
3053 | return false; | |||
3054 | if (VA.needsCustom() && (RegVT == MVT::f64 || RegVT == MVT::v2f64)) { | |||
3055 | // f64 and vector types are split into multiple registers or | |||
3056 | // register/stack-slot combinations. The types will not match | |||
3057 | // the registers; give up on memory f64 refs until we figure | |||
3058 | // out what to do about this. | |||
3059 | if (!VA.isRegLoc()) | |||
3060 | return false; | |||
3061 | if (!ArgLocs[++i].isRegLoc()) | |||
3062 | return false; | |||
3063 | if (RegVT == MVT::v2f64) { | |||
3064 | if (!ArgLocs[++i].isRegLoc()) | |||
3065 | return false; | |||
3066 | if (!ArgLocs[++i].isRegLoc()) | |||
3067 | return false; | |||
3068 | } | |||
3069 | } else if (!VA.isRegLoc()) { | |||
3070 | if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, | |||
3071 | MFI, MRI, TII)) | |||
3072 | return false; | |||
3073 | } | |||
3074 | } | |||
3075 | } | |||
3076 | ||||
3077 | const MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
3078 | if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) | |||
3079 | return false; | |||
3080 | } | |||
3081 | ||||
3082 | return true; | |||
3083 | } | |||
3084 | ||||
3085 | bool | |||
3086 | ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, | |||
3087 | MachineFunction &MF, bool isVarArg, | |||
3088 | const SmallVectorImpl<ISD::OutputArg> &Outs, | |||
3089 | LLVMContext &Context) const { | |||
3090 | SmallVector<CCValAssign, 16> RVLocs; | |||
3091 | CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); | |||
3092 | return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); | |||
3093 | } | |||
3094 | ||||
3095 | static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, | |||
3096 | const SDLoc &DL, SelectionDAG &DAG) { | |||
3097 | const MachineFunction &MF = DAG.getMachineFunction(); | |||
3098 | const Function &F = MF.getFunction(); | |||
3099 | ||||
3100 | StringRef IntKind = F.getFnAttribute("interrupt").getValueAsString(); | |||
3101 | ||||
3102 | // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset | |||
3103 | // version of the "preferred return address". These offsets affect the return | |||
3104 | // instruction if this is a return from PL1 without hypervisor extensions. | |||
3105 | // IRQ/FIQ: +4 "subs pc, lr, #4" | |||
3106 | // SWI: 0 "subs pc, lr, #0" | |||
3107 | // ABORT: +4 "subs pc, lr, #4" | |||
3108 | // UNDEF: +4/+2 "subs pc, lr, #0" | |||
3109 | // UNDEF varies depending on where the exception came from ARM or Thumb | |||
3110 | // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0. | |||
3111 | ||||
3112 | int64_t LROffset; | |||
3113 | if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" || | |||
3114 | IntKind == "ABORT") | |||
3115 | LROffset = 4; | |||
3116 | else if (IntKind == "SWI" || IntKind == "UNDEF") | |||
3117 | LROffset = 0; | |||
3118 | else | |||
3119 | report_fatal_error("Unsupported interrupt attribute. If present, value " | |||
3120 | "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF"); | |||
3121 | ||||
3122 | RetOps.insert(RetOps.begin() + 1, | |||
3123 | DAG.getConstant(LROffset, DL, MVT::i32, false)); | |||
3124 | ||||
3125 | return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps); | |||
3126 | } | |||
3127 | ||||
3128 | SDValue | |||
3129 | ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, | |||
3130 | bool isVarArg, | |||
3131 | const SmallVectorImpl<ISD::OutputArg> &Outs, | |||
3132 | const SmallVectorImpl<SDValue> &OutVals, | |||
3133 | const SDLoc &dl, SelectionDAG &DAG) const { | |||
3134 | // CCValAssign - represent the assignment of the return value to a location. | |||
3135 | SmallVector<CCValAssign, 16> RVLocs; | |||
3136 | ||||
3137 | // CCState - Info about the registers and stack slots. | |||
3138 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, | |||
3139 | *DAG.getContext()); | |||
3140 | ||||
3141 | // Analyze outgoing return values. | |||
3142 | CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); | |||
3143 | ||||
3144 | SDValue Flag; | |||
3145 | SmallVector<SDValue, 4> RetOps; | |||
3146 | RetOps.push_back(Chain); // Operand #0 = Chain (updated below) | |||
3147 | bool isLittleEndian = Subtarget->isLittle(); | |||
3148 | ||||
3149 | MachineFunction &MF = DAG.getMachineFunction(); | |||
3150 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); | |||
3151 | AFI->setReturnRegsCount(RVLocs.size()); | |||
3152 | ||||
3153 | // Report error if cmse entry function returns structure through first ptr arg. | |||
3154 | if (AFI->isCmseNSEntryFunction() && MF.getFunction().hasStructRetAttr()) { | |||
3155 | // Note: using an empty SDLoc(), as the first line of the function is a | |||
3156 | // better place to report than the last line. | |||
3157 | DiagnosticInfoUnsupported Diag( | |||
3158 | DAG.getMachineFunction().getFunction(), | |||
3159 | "secure entry function would return value through pointer", | |||
3160 | SDLoc().getDebugLoc()); | |||
3161 | DAG.getContext()->diagnose(Diag); | |||
3162 | } | |||
3163 | ||||
3164 | // Copy the result values into the output registers. | |||
3165 | for (unsigned i = 0, realRVLocIdx = 0; | |||
3166 | i != RVLocs.size(); | |||
3167 | ++i, ++realRVLocIdx) { | |||
3168 | CCValAssign &VA = RVLocs[i]; | |||
3169 | assert(VA.isRegLoc() && "Can only return in registers!")(static_cast <bool> (VA.isRegLoc() && "Can only return in registers!" ) ? void (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 3169, __extension__ __PRETTY_FUNCTION__)); | |||
3170 | ||||
3171 | SDValue Arg = OutVals[realRVLocIdx]; | |||
3172 | bool ReturnF16 = false; | |||
3173 | ||||
3174 | if (Subtarget->hasFullFP16() && Subtarget->isTargetHardFloat()) { | |||
3175 | // Half-precision return values can be returned like this: | |||
3176 | // | |||
3177 | // t11 f16 = fadd ... | |||
3178 | // t12: i16 = bitcast t11 | |||
3179 | // t13: i32 = zero_extend t12 | |||
3180 | // t14: f32 = bitcast t13 <~~~~~~~ Arg | |||
3181 | // | |||
3182 | // to avoid code generation for bitcasts, we simply set Arg to the node | |||
3183 | // that produces the f16 value, t11 in this case. | |||
3184 | // | |||
3185 | if (Arg.getValueType() == MVT::f32 && Arg.getOpcode() == ISD::BITCAST) { | |||
3186 | SDValue ZE = Arg.getOperand(0); | |||
3187 | if (ZE.getOpcode() == ISD::ZERO_EXTEND && ZE.getValueType() == MVT::i32) { | |||
3188 | SDValue BC = ZE.getOperand(0); | |||
3189 | if (BC.getOpcode() == ISD::BITCAST && BC.getValueType() == MVT::i16) { | |||
3190 | Arg = BC.getOperand(0); | |||
3191 | ReturnF16 = true; | |||
3192 | } | |||
3193 | } | |||
3194 | } | |||
3195 | } | |||
3196 | ||||
3197 | switch (VA.getLocInfo()) { | |||
3198 | default: llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 3198); | |||
3199 | case CCValAssign::Full: break; | |||
3200 | case CCValAssign::BCvt: | |||
3201 | if (!ReturnF16) | |||
3202 | Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); | |||
3203 | break; | |||
3204 | } | |||
3205 | ||||
3206 | // Mask f16 arguments if this is a CMSE nonsecure entry. | |||
3207 | auto RetVT = Outs[realRVLocIdx].ArgVT; | |||
3208 | if (AFI->isCmseNSEntryFunction() && (RetVT == MVT::f16)) { | |||
3209 | if (VA.needsCustom() && VA.getValVT() == MVT::f16) { | |||
3210 | Arg = MoveFromHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Arg); | |||
3211 | } else { | |||
3212 | auto LocBits = VA.getLocVT().getSizeInBits(); | |||
3213 | auto MaskValue = APInt::getLowBitsSet(LocBits, RetVT.getSizeInBits()); | |||
3214 | SDValue Mask = | |||
3215 | DAG.getConstant(MaskValue, dl, MVT::getIntegerVT(LocBits)); | |||
3216 | Arg = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocBits), Arg); | |||
3217 | Arg = DAG.getNode(ISD::AND, dl, MVT::getIntegerVT(LocBits), Arg, Mask); | |||
3218 | Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); | |||
3219 | } | |||
3220 | } | |||
3221 | ||||
3222 | if (VA.needsCustom() && | |||
3223 | (VA.getLocVT() == MVT::v2f64 || VA.getLocVT() == MVT::f64)) { | |||
3224 | if (VA.getLocVT() == MVT::v2f64) { | |||
3225 | // Extract the first half and return it in two registers. | |||
3226 | SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, | |||
3227 | DAG.getConstant(0, dl, MVT::i32)); | |||
3228 | SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, | |||
3229 | DAG.getVTList(MVT::i32, MVT::i32), Half); | |||
3230 | ||||
3231 | Chain = | |||
3232 | DAG.getCopyToReg(Chain, dl, VA.getLocReg(), | |||
3233 | HalfGPRs.getValue(isLittleEndian ? 0 : 1), Flag); | |||
3234 | Flag = Chain.getValue(1); | |||
3235 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); | |||
3236 | VA = RVLocs[++i]; // skip ahead to next loc | |||
3237 | Chain = | |||
3238 | DAG.getCopyToReg(Chain, dl, VA.getLocReg(), | |||
3239 | HalfGPRs.getValue(isLittleEndian ? 1 : 0), Flag); | |||
3240 | Flag = Chain.getValue(1); | |||
3241 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); | |||
3242 | VA = RVLocs[++i]; // skip ahead to next loc | |||
3243 | ||||
3244 | // Extract the 2nd half and fall through to handle it as an f64 value. | |||
3245 | Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, | |||
3246 | DAG.getConstant(1, dl, MVT::i32)); | |||
3247 | } | |||
3248 | // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is | |||
3249 | // available. | |||
3250 | SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, | |||
3251 | DAG.getVTList(MVT::i32, MVT::i32), Arg); | |||
3252 | Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), | |||
3253 | fmrrd.getValue(isLittleEndian ? 0 : 1), Flag); | |||
3254 | Flag = Chain.getValue(1); | |||
3255 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); | |||
3256 | VA = RVLocs[++i]; // skip ahead to next loc | |||
3257 | Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), | |||
3258 | fmrrd.getValue(isLittleEndian ? 1 : 0), Flag); | |||
3259 | } else | |||
3260 | Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); | |||
3261 | ||||
3262 | // Guarantee that all emitted copies are | |||
3263 | // stuck together, avoiding something bad. | |||
3264 | Flag = Chain.getValue(1); | |||
3265 | RetOps.push_back(DAG.getRegister( | |||
3266 | VA.getLocReg(), ReturnF16 ? Arg.getValueType() : VA.getLocVT())); | |||
3267 | } | |||
3268 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); | |||
3269 | const MCPhysReg *I = | |||
3270 | TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); | |||
3271 | if (I) { | |||
3272 | for (; *I; ++I) { | |||
3273 | if (ARM::GPRRegClass.contains(*I)) | |||
3274 | RetOps.push_back(DAG.getRegister(*I, MVT::i32)); | |||
3275 | else if (ARM::DPRRegClass.contains(*I)) | |||
3276 | RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); | |||
3277 | else | |||
3278 | llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 3278); | |||
3279 | } | |||
3280 | } | |||
3281 | ||||
3282 | // Update chain and glue. | |||
3283 | RetOps[0] = Chain; | |||
3284 | if (Flag.getNode()) | |||
3285 | RetOps.push_back(Flag); | |||
3286 | ||||
3287 | // CPUs which aren't M-class use a special sequence to return from | |||
3288 | // exceptions (roughly, any instruction setting pc and cpsr simultaneously, | |||
3289 | // though we use "subs pc, lr, #N"). | |||
3290 | // | |||
3291 | // M-class CPUs actually use a normal return sequence with a special | |||
3292 | // (hardware-provided) value in LR, so the normal code path works. | |||
3293 | if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt") && | |||
3294 | !Subtarget->isMClass()) { | |||
3295 | if (Subtarget->isThumb1Only()) | |||
3296 | report_fatal_error("interrupt attribute is not supported in Thumb1"); | |||
3297 | return LowerInterruptReturn(RetOps, dl, DAG); | |||
3298 | } | |||
3299 | ||||
3300 | ARMISD::NodeType RetNode = AFI->isCmseNSEntryFunction() ? ARMISD::SERET_FLAG : | |||
3301 | ARMISD::RET_FLAG; | |||
3302 | return DAG.getNode(RetNode, dl, MVT::Other, RetOps); | |||
3303 | } | |||
3304 | ||||
3305 | bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { | |||
3306 | if (N->getNumValues() != 1) | |||
3307 | return false; | |||
3308 | if (!N->hasNUsesOfValue(1, 0)) | |||
3309 | return false; | |||
3310 | ||||
3311 | SDValue TCChain = Chain; | |||
3312 | SDNode *Copy = *N->use_begin(); | |||
3313 | if (Copy->getOpcode() == ISD::CopyToReg) { | |||
3314 | // If the copy has a glue operand, we conservatively assume it isn't safe to | |||
3315 | // perform a tail call. | |||
3316 | if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) | |||
3317 | return false; | |||
3318 | TCChain = Copy->getOperand(0); | |||
3319 | } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { | |||
3320 | SDNode *VMov = Copy; | |||
3321 | // f64 returned in a pair of GPRs. | |||
3322 | SmallPtrSet<SDNode*, 2> Copies; | |||
3323 | for (SDNode *U : VMov->uses()) { | |||
3324 | if (U->getOpcode() != ISD::CopyToReg) | |||
3325 | return false; | |||
3326 | Copies.insert(U); | |||
3327 | } | |||
3328 | if (Copies.size() > 2) | |||
3329 | return false; | |||
3330 | ||||
3331 | for (SDNode *U : VMov->uses()) { | |||
3332 | SDValue UseChain = U->getOperand(0); | |||
3333 | if (Copies.count(UseChain.getNode())) | |||
3334 | // Second CopyToReg | |||
3335 | Copy = U; | |||
3336 | else { | |||
3337 | // We are at the top of this chain. | |||
3338 | // If the copy has a glue operand, we conservatively assume it | |||
3339 | // isn't safe to perform a tail call. | |||
3340 | if (U->getOperand(U->getNumOperands() - 1).getValueType() == MVT::Glue) | |||
3341 | return false; | |||
3342 | // First CopyToReg | |||
3343 | TCChain = UseChain; | |||
3344 | } | |||
3345 | } | |||
3346 | } else if (Copy->getOpcode() == ISD::BITCAST) { | |||
3347 | // f32 returned in a single GPR. | |||
3348 | if (!Copy->hasOneUse()) | |||
3349 | return false; | |||
3350 | Copy = *Copy->use_begin(); | |||
3351 | if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0)) | |||
3352 | return false; | |||
3353 | // If the copy has a glue operand, we conservatively assume it isn't safe to | |||
3354 | // perform a tail call. | |||
3355 | if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) | |||
3356 | return false; | |||
3357 | TCChain = Copy->getOperand(0); | |||
3358 | } else { | |||
3359 | return false; | |||
3360 | } | |||
3361 | ||||
3362 | bool HasRet = false; | |||
3363 | for (const SDNode *U : Copy->uses()) { | |||
3364 | if (U->getOpcode() != ARMISD::RET_FLAG && | |||
3365 | U->getOpcode() != ARMISD::INTRET_FLAG) | |||
3366 | return false; | |||
3367 | HasRet = true; | |||
3368 | } | |||
3369 | ||||
3370 | if (!HasRet) | |||
3371 | return false; | |||
3372 | ||||
3373 | Chain = TCChain; | |||
3374 | return true; | |||
3375 | } | |||
3376 | ||||
3377 | bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { | |||
3378 | if (!Subtarget->supportsTailCall()) | |||
3379 | return false; | |||
3380 | ||||
3381 | if (!CI->isTailCall()) | |||
3382 | return false; | |||
3383 | ||||
3384 | return true; | |||
3385 | } | |||
3386 | ||||
3387 | // Trying to write a 64 bit value so need to split into two 32 bit values first, | |||
3388 | // and pass the lower and high parts through. | |||
3389 | static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) { | |||
3390 | SDLoc DL(Op); | |||
3391 | SDValue WriteValue = Op->getOperand(2); | |||
3392 | ||||
3393 | // This function is only supposed to be called for i64 type argument. | |||
3394 | assert(WriteValue.getValueType() == MVT::i64(static_cast <bool> (WriteValue.getValueType() == MVT:: i64 && "LowerWRITE_REGISTER called for non-i64 type argument." ) ? void (0) : __assert_fail ("WriteValue.getValueType() == MVT::i64 && \"LowerWRITE_REGISTER called for non-i64 type argument.\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 3395, __extension__ __PRETTY_FUNCTION__)) | |||
3395 | && "LowerWRITE_REGISTER called for non-i64 type argument.")(static_cast <bool> (WriteValue.getValueType() == MVT:: i64 && "LowerWRITE_REGISTER called for non-i64 type argument." ) ? void (0) : __assert_fail ("WriteValue.getValueType() == MVT::i64 && \"LowerWRITE_REGISTER called for non-i64 type argument.\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 3395, __extension__ __PRETTY_FUNCTION__)); | |||
3396 | ||||
3397 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, | |||
3398 | DAG.getConstant(0, DL, MVT::i32)); | |||
3399 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, | |||
3400 | DAG.getConstant(1, DL, MVT::i32)); | |||
3401 | SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi }; | |||
3402 | return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops); | |||
3403 | } | |||
3404 | ||||
3405 | // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as | |||
3406 | // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is | |||
3407 | // one of the above mentioned nodes. It has to be wrapped because otherwise | |||
3408 | // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only | |||
3409 | // be used to form addressing mode. These wrapped nodes will be selected | |||
3410 | // into MOVi. | |||
3411 | SDValue ARMTargetLowering::LowerConstantPool(SDValue Op, | |||
3412 | SelectionDAG &DAG) const { | |||
3413 | EVT PtrVT = Op.getValueType(); | |||
3414 | // FIXME there is no actual debug info here | |||
3415 | SDLoc dl(Op); | |||
3416 | ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); | |||
3417 | SDValue Res; | |||
3418 | ||||
3419 | // When generating execute-only code Constant Pools must be promoted to the | |||
3420 | // global data section. It's a bit ugly that we can't share them across basic | |||
3421 | // blocks, but this way we guarantee that execute-only behaves correct with | |||
3422 | // position-independent addressing modes. | |||
3423 | if (Subtarget->genExecuteOnly()) { | |||
3424 | auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); | |||
3425 | auto T = const_cast<Type*>(CP->getType()); | |||
3426 | auto C = const_cast<Constant*>(CP->getConstVal()); | |||
3427 | auto M = const_cast<Module*>(DAG.getMachineFunction(). | |||
3428 | getFunction().getParent()); | |||
3429 | auto GV = new GlobalVariable( | |||
3430 | *M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C, | |||
3431 | Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" + | |||
3432 | Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" + | |||
3433 | Twine(AFI->createPICLabelUId()) | |||
3434 | ); | |||
3435 | SDValue GA = DAG.getTargetGlobalAddress(dyn_cast<GlobalValue>(GV), | |||
3436 | dl, PtrVT); | |||
3437 | return LowerGlobalAddress(GA, DAG); | |||
3438 | } | |||
3439 | ||||
3440 | if (CP->isMachineConstantPoolEntry()) | |||
3441 | Res = | |||
3442 | DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, CP->getAlign()); | |||
3443 | else | |||
3444 | Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign()); | |||
3445 | return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); | |||
3446 | } | |||
3447 | ||||
3448 | unsigned ARMTargetLowering::getJumpTableEncoding() const { | |||
3449 | return MachineJumpTableInfo::EK_Inline; | |||
3450 | } | |||
3451 | ||||
3452 | SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, | |||
3453 | SelectionDAG &DAG) const { | |||
3454 | MachineFunction &MF = DAG.getMachineFunction(); | |||
3455 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); | |||
3456 | unsigned ARMPCLabelIndex = 0; | |||
3457 | SDLoc DL(Op); | |||
3458 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
3459 | const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); | |||
3460 | SDValue CPAddr; | |||
3461 | bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI(); | |||
3462 | if (!IsPositionIndependent) { | |||
3463 | CPAddr = DAG.getTargetConstantPool(BA, PtrVT, Align(4)); | |||
3464 | } else { | |||
3465 | unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; | |||
3466 | ARMPCLabelIndex = AFI->createPICLabelUId(); | |||
3467 | ARMConstantPoolValue *CPV = | |||
3468 | ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, | |||
3469 | ARMCP::CPBlockAddress, PCAdj); | |||
3470 | CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); | |||
3471 | } | |||
3472 | CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); | |||
3473 | SDValue Result = DAG.getLoad( | |||
3474 | PtrVT, DL, DAG.getEntryNode(), CPAddr, | |||
3475 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); | |||
3476 | if (!IsPositionIndependent) | |||
3477 | return Result; | |||
3478 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32); | |||
3479 | return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); | |||
3480 | } | |||
3481 | ||||
3482 | /// Convert a TLS address reference into the correct sequence of loads | |||
3483 | /// and calls to compute the variable's address for Darwin, and return an | |||
3484 | /// SDValue containing the final node. | |||
3485 | ||||
3486 | /// Darwin only has one TLS scheme which must be capable of dealing with the | |||
3487 | /// fully general situation, in the worst case. This means: | |||
3488 | /// + "extern __thread" declaration. | |||
3489 | /// + Defined in a possibly unknown dynamic library. | |||
3490 | /// | |||
3491 | /// The general system is that each __thread variable has a [3 x i32] descriptor | |||
3492 | /// which contains information used by the runtime to calculate the address. The | |||
3493 | /// only part of this the compiler needs to know about is the first word, which | |||
3494 | /// contains a function pointer that must be called with the address of the | |||
3495 | /// entire descriptor in "r0". | |||
3496 | /// | |||
3497 | /// Since this descriptor may be in a different unit, in general access must | |||
3498 | /// proceed along the usual ARM rules. A common sequence to produce is: | |||
3499 | /// | |||
3500 | /// movw rT1, :lower16:_var$non_lazy_ptr | |||
3501 | /// movt rT1, :upper16:_var$non_lazy_ptr | |||
3502 | /// ldr r0, [rT1] | |||
3503 | /// ldr rT2, [r0] | |||
3504 | /// blx rT2 | |||
3505 | /// [...address now in r0...] | |||
3506 | SDValue | |||
3507 | ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op, | |||
3508 | SelectionDAG &DAG) const { | |||
3509 | assert(Subtarget->isTargetDarwin() &&(static_cast <bool> (Subtarget->isTargetDarwin() && "This function expects a Darwin target") ? void (0) : __assert_fail ("Subtarget->isTargetDarwin() && \"This function expects a Darwin target\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 3510, __extension__ __PRETTY_FUNCTION__)) | |||
3510 | "This function expects a Darwin target")(static_cast <bool> (Subtarget->isTargetDarwin() && "This function expects a Darwin target") ? void (0) : __assert_fail ("Subtarget->isTargetDarwin() && \"This function expects a Darwin target\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 3510, __extension__ __PRETTY_FUNCTION__)); | |||
3511 | SDLoc DL(Op); | |||
3512 | ||||
3513 | // First step is to get the address of the actua global symbol. This is where | |||
3514 | // the TLS descriptor lives. | |||
3515 | SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG); | |||
3516 | ||||
3517 | // The first entry in the descriptor is a function pointer that we must call | |||
3518 | // to obtain the address of the variable. | |||
3519 | SDValue Chain = DAG.getEntryNode(); | |||
3520 | SDValue FuncTLVGet = DAG.getLoad( | |||
3521 | MVT::i32, DL, Chain, DescAddr, | |||
3522 | MachinePointerInfo::getGOT(DAG.getMachineFunction()), Align(4), | |||
3523 | MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable | | |||
3524 | MachineMemOperand::MOInvariant); | |||
3525 | Chain = FuncTLVGet.getValue(1); | |||
3526 | ||||
3527 | MachineFunction &F = DAG.getMachineFunction(); | |||
3528 | MachineFrameInfo &MFI = F.getFrameInfo(); | |||
3529 | MFI.setAdjustsStack(true); | |||
3530 | ||||
3531 | // TLS calls preserve all registers except those that absolutely must be | |||
3532 | // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be | |||
3533 | // silly). | |||
3534 | auto TRI = | |||
3535 | getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo(); | |||
3536 | auto ARI = static_cast<const ARMRegisterInfo *>(TRI); | |||
3537 | const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction()); | |||
3538 | ||||
3539 | // Finally, we can make the call. This is just a degenerate version of a | |||
3540 | // normal AArch64 call node: r0 takes the address of the descriptor, and | |||
3541 | // returns the address of the variable in this thread. | |||
3542 | Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue()); | |||
3543 | Chain = | |||
3544 | DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue), | |||
3545 | Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32), | |||
3546 | DAG.getRegisterMask(Mask), Chain.getValue(1)); | |||
3547 | return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1)); | |||
3548 | } | |||
3549 | ||||
3550 | SDValue | |||
3551 | ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op, | |||
3552 | SelectionDAG &DAG) const { | |||
3553 | assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering")(static_cast <bool> (Subtarget->isTargetWindows() && "Windows specific TLS lowering") ? void (0) : __assert_fail ( "Subtarget->isTargetWindows() && \"Windows specific TLS lowering\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 3553, __extension__ __PRETTY_FUNCTION__)); | |||
3554 | ||||
3555 | SDValue Chain = DAG.getEntryNode(); | |||
3556 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
3557 | SDLoc DL(Op); | |||
3558 | ||||
3559 | // Load the current TEB (thread environment block) | |||
3560 | SDValue Ops[] = {Chain, | |||
3561 | DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32), | |||
3562 | DAG.getTargetConstant(15, DL, MVT::i32), | |||
3563 | DAG.getTargetConstant(0, DL, MVT::i32), | |||
3564 | DAG.getTargetConstant(13, DL, MVT::i32), | |||
3565 | DAG.getTargetConstant(0, DL, MVT::i32), | |||
3566 | DAG.getTargetConstant(2, DL, MVT::i32)}; | |||
3567 | SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, | |||
3568 | DAG.getVTList(MVT::i32, MVT::Other), Ops); | |||
3569 | ||||
3570 | SDValue TEB = CurrentTEB.getValue(0); | |||
3571 | Chain = CurrentTEB.getValue(1); | |||
3572 | ||||
3573 | // Load the ThreadLocalStoragePointer from the TEB | |||
3574 | // A pointer to the TLS array is located at offset 0x2c from the TEB. | |||
3575 | SDValue TLSArray = | |||
3576 | DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL)); | |||
3577 | TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo()); | |||
3578 | ||||
3579 | // The pointer to the thread's TLS data area is at the TLS Index scaled by 4 | |||
3580 | // offset into the TLSArray. | |||
3581 | ||||
3582 | // Load the TLS index from the C runtime | |||
3583 | SDValue TLSIndex = | |||
3584 | DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG); | |||
3585 | TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex); | |||
3586 | TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo()); | |||
3587 | ||||
3588 | SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex, | |||
3589 | DAG.getConstant(2, DL, MVT::i32)); | |||
3590 | SDValue TLS = DAG.getLoad(PtrVT, DL, Chain, | |||
3591 | DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot), | |||
3592 | MachinePointerInfo()); | |||
3593 | ||||
3594 | // Get the offset of the start of the .tls section (section base) | |||
3595 | const auto *GA = cast<GlobalAddressSDNode>(Op); | |||
3596 | auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL); | |||
3597 | SDValue Offset = DAG.getLoad( | |||
3598 | PtrVT, DL, Chain, | |||
3599 | DAG.getNode(ARMISD::Wrapper, DL, MVT::i32, | |||
3600 | DAG.getTargetConstantPool(CPV, PtrVT, Align(4))), | |||
3601 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); | |||
3602 | ||||
3603 | return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset); | |||
3604 | } | |||
3605 | ||||
3606 | // Lower ISD::GlobalTLSAddress using the "general dynamic" model | |||
3607 | SDValue | |||
3608 | ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, | |||
3609 | SelectionDAG &DAG) const { | |||
3610 | SDLoc dl(GA); | |||
3611 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
3612 | unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; | |||
3613 | MachineFunction &MF = DAG.getMachineFunction(); | |||
3614 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); | |||
3615 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); | |||
3616 | ARMConstantPoolValue *CPV = | |||
3617 | ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, | |||
3618 | ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); | |||
3619 | SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); | |||
3620 | Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); | |||
3621 | Argument = DAG.getLoad( | |||
3622 | PtrVT, dl, DAG.getEntryNode(), Argument, | |||
3623 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); | |||
3624 | SDValue Chain = Argument.getValue(1); | |||
3625 | ||||
3626 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); | |||
3627 | Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); | |||
3628 | ||||
3629 | // call __tls_get_addr. | |||
3630 | ArgListTy Args; | |||
3631 | ArgListEntry Entry; | |||
3632 | Entry.Node = Argument; | |||
3633 | Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); | |||
3634 | Args.push_back(Entry); | |||
3635 | ||||
3636 | // FIXME: is there useful debug info available here? | |||
3637 | TargetLowering::CallLoweringInfo CLI(DAG); | |||
3638 | CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( | |||
3639 | CallingConv::C, Type::getInt32Ty(*DAG.getContext()), | |||
3640 | DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args)); | |||
3641 | ||||
3642 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); | |||
3643 | return CallResult.first; | |||
3644 | } | |||
3645 | ||||
3646 | // Lower ISD::GlobalTLSAddress using the "initial exec" or | |||
3647 | // "local exec" model. | |||
3648 | SDValue | |||
3649 | ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, | |||
3650 | SelectionDAG &DAG, | |||
3651 | TLSModel::Model model) const { | |||
3652 | const GlobalValue *GV = GA->getGlobal(); | |||
3653 | SDLoc dl(GA); | |||
3654 | SDValue Offset; | |||
3655 | SDValue Chain = DAG.getEntryNode(); | |||
3656 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
3657 | // Get the Thread Pointer | |||
3658 | SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); | |||
3659 | ||||
3660 | if (model == TLSModel::InitialExec) { | |||
3661 | MachineFunction &MF = DAG.getMachineFunction(); | |||
3662 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); | |||
3663 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); | |||
3664 | // Initial exec model. | |||
3665 | unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; | |||
3666 | ARMConstantPoolValue *CPV = | |||
3667 | ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, | |||
3668 | ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, | |||
3669 | true); | |||
3670 | Offset = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); | |||
3671 | Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); | |||
3672 | Offset = DAG.getLoad( | |||
3673 | PtrVT, dl, Chain, Offset, | |||
3674 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); | |||
3675 | Chain = Offset.getValue(1); | |||
3676 | ||||
3677 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); | |||
3678 | Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); | |||
3679 | ||||
3680 | Offset = DAG.getLoad( | |||
3681 | PtrVT, dl, Chain, Offset, | |||
3682 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); | |||
3683 | } else { | |||
3684 | // local exec model | |||
3685 | assert(model == TLSModel::LocalExec)(static_cast <bool> (model == TLSModel::LocalExec) ? void (0) : __assert_fail ("model == TLSModel::LocalExec", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 3685, __extension__ __PRETTY_FUNCTION__)); | |||
3686 | ARMConstantPoolValue *CPV = | |||
3687 | ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); | |||
3688 | Offset = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); | |||
3689 | Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); | |||
3690 | Offset = DAG.getLoad( | |||
3691 | PtrVT, dl, Chain, Offset, | |||
3692 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); | |||
3693 | } | |||
3694 | ||||
3695 | // The address of the thread local variable is the add of the thread | |||
3696 | // pointer with the offset of the variable. | |||
3697 | return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); | |||
3698 | } | |||
3699 | ||||
3700 | SDValue | |||
3701 | ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { | |||
3702 | GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); | |||
3703 | if (DAG.getTarget().useEmulatedTLS()) | |||
3704 | return LowerToTLSEmulatedModel(GA, DAG); | |||
3705 | ||||
3706 | if (Subtarget->isTargetDarwin()) | |||
3707 | return LowerGlobalTLSAddressDarwin(Op, DAG); | |||
3708 | ||||
3709 | if (Subtarget->isTargetWindows()) | |||
3710 | return LowerGlobalTLSAddressWindows(Op, DAG); | |||
3711 | ||||
3712 | // TODO: implement the "local dynamic" model | |||
3713 | assert(Subtarget->isTargetELF() && "Only ELF implemented here")(static_cast <bool> (Subtarget->isTargetELF() && "Only ELF implemented here") ? void (0) : __assert_fail ("Subtarget->isTargetELF() && \"Only ELF implemented here\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 3713, __extension__ __PRETTY_FUNCTION__)); | |||
3714 | TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal()); | |||
3715 | ||||
3716 | switch (model) { | |||
3717 | case TLSModel::GeneralDynamic: | |||
3718 | case TLSModel::LocalDynamic: | |||
3719 | return LowerToTLSGeneralDynamicModel(GA, DAG); | |||
3720 | case TLSModel::InitialExec: | |||
3721 | case TLSModel::LocalExec: | |||
3722 | return LowerToTLSExecModels(GA, DAG, model); | |||
3723 | } | |||
3724 | llvm_unreachable("bogus TLS model")::llvm::llvm_unreachable_internal("bogus TLS model", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 3724); | |||
3725 | } | |||
3726 | ||||
3727 | /// Return true if all users of V are within function F, looking through | |||
3728 | /// ConstantExprs. | |||
3729 | static bool allUsersAreInFunction(const Value *V, const Function *F) { | |||
3730 | SmallVector<const User*,4> Worklist(V->users()); | |||
3731 | while (!Worklist.empty()) { | |||
3732 | auto *U = Worklist.pop_back_val(); | |||
3733 | if (isa<ConstantExpr>(U)) { | |||
3734 | append_range(Worklist, U->users()); | |||
3735 | continue; | |||
3736 | } | |||
3737 | ||||
3738 | auto *I = dyn_cast<Instruction>(U); | |||
3739 | if (!I || I->getParent()->getParent() != F) | |||
3740 | return false; | |||
3741 | } | |||
3742 | return true; | |||
3743 | } | |||
3744 | ||||
3745 | static SDValue promoteToConstantPool(const ARMTargetLowering *TLI, | |||
3746 | const GlobalValue *GV, SelectionDAG &DAG, | |||
3747 | EVT PtrVT, const SDLoc &dl) { | |||
3748 | // If we're creating a pool entry for a constant global with unnamed address, | |||
3749 | // and the global is small enough, we can emit it inline into the constant pool | |||
3750 | // to save ourselves an indirection. | |||
3751 | // | |||
3752 | // This is a win if the constant is only used in one function (so it doesn't | |||
3753 | // need to be duplicated) or duplicating the constant wouldn't increase code | |||
3754 | // size (implying the constant is no larger than 4 bytes). | |||
3755 | const Function &F = DAG.getMachineFunction().getFunction(); | |||
3756 | ||||
3757 | // We rely on this decision to inline being idemopotent and unrelated to the | |||
3758 | // use-site. We know that if we inline a variable at one use site, we'll | |||
3759 | // inline it elsewhere too (and reuse the constant pool entry). Fast-isel | |||
3760 | // doesn't know about this optimization, so bail out if it's enabled else | |||
3761 | // we could decide to inline here (and thus never emit the GV) but require | |||
3762 | // the GV from fast-isel generated code. | |||
3763 | if (!EnableConstpoolPromotion || | |||
3764 | DAG.getMachineFunction().getTarget().Options.EnableFastISel) | |||
3765 | return SDValue(); | |||
3766 | ||||
3767 | auto *GVar = dyn_cast<GlobalVariable>(GV); | |||
3768 | if (!GVar || !GVar->hasInitializer() || | |||
3769 | !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() || | |||
3770 | !GVar->hasLocalLinkage()) | |||
3771 | return SDValue(); | |||
3772 | ||||
3773 | // If we inline a value that contains relocations, we move the relocations | |||
3774 | // from .data to .text. This is not allowed in position-independent code. | |||
3775 | auto *Init = GVar->getInitializer(); | |||
3776 | if ((TLI->isPositionIndependent() || TLI->getSubtarget()->isROPI()) && | |||
3777 | Init->needsDynamicRelocation()) | |||
3778 | return SDValue(); | |||
3779 | ||||
3780 | // The constant islands pass can only really deal with alignment requests | |||
3781 | // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote | |||
3782 | // any type wanting greater alignment requirements than 4 bytes. We also | |||
3783 | // can only promote constants that are multiples of 4 bytes in size or | |||
3784 | // are paddable to a multiple of 4. Currently we only try and pad constants | |||
3785 | // that are strings for simplicity. | |||
3786 | auto *CDAInit = dyn_cast<ConstantDataArray>(Init); | |||
3787 | unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType()); | |||
3788 | Align PrefAlign = DAG.getDataLayout().getPreferredAlign(GVar); | |||
3789 | unsigned RequiredPadding = 4 - (Size % 4); | |||
3790 | bool PaddingPossible = | |||
3791 | RequiredPadding == 4 || (CDAInit && CDAInit->isString()); | |||
3792 | if (!PaddingPossible || PrefAlign > 4 || Size > ConstpoolPromotionMaxSize || | |||
3793 | Size == 0) | |||
3794 | return SDValue(); | |||
3795 | ||||
3796 | unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding); | |||
3797 | MachineFunction &MF = DAG.getMachineFunction(); | |||
3798 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); | |||
3799 | ||||
3800 | // We can't bloat the constant pool too much, else the ConstantIslands pass | |||
3801 | // may fail to converge. If we haven't promoted this global yet (it may have | |||
3802 | // multiple uses), and promoting it would increase the constant pool size (Sz | |||
3803 | // > 4), ensure we have space to do so up to MaxTotal. | |||
3804 | if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4) | |||
3805 | if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >= | |||
3806 | ConstpoolPromotionMaxTotal) | |||
3807 | return SDValue(); | |||
3808 | ||||
3809 | // This is only valid if all users are in a single function; we can't clone | |||
3810 | // the constant in general. The LLVM IR unnamed_addr allows merging | |||
3811 | // constants, but not cloning them. | |||
3812 | // | |||
3813 | // We could potentially allow cloning if we could prove all uses of the | |||
3814 | // constant in the current function don't care about the address, like | |||
3815 | // printf format strings. But that isn't implemented for now. | |||
3816 | if (!allUsersAreInFunction(GVar, &F)) | |||
3817 | return SDValue(); | |||
3818 | ||||
3819 | // We're going to inline this global. Pad it out if needed. | |||
3820 | if (RequiredPadding != 4) { | |||
3821 | StringRef S = CDAInit->getAsString(); | |||
3822 | ||||
3823 | SmallVector<uint8_t,16> V(S.size()); | |||
3824 | std::copy(S.bytes_begin(), S.bytes_end(), V.begin()); | |||
3825 | while (RequiredPadding--) | |||
3826 | V.push_back(0); | |||
3827 | Init = ConstantDataArray::get(*DAG.getContext(), V); | |||
3828 | } | |||
3829 | ||||
3830 | auto CPVal = ARMConstantPoolConstant::Create(GVar, Init); | |||
3831 | SDValue CPAddr = DAG.getTargetConstantPool(CPVal, PtrVT, Align(4)); | |||
3832 | if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) { | |||
3833 | AFI->markGlobalAsPromotedToConstantPool(GVar); | |||
3834 | AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() + | |||
3835 | PaddedSize - 4); | |||
3836 | } | |||
3837 | ++NumConstpoolPromoted; | |||
3838 | return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); | |||
3839 | } | |||
3840 | ||||
3841 | bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const { | |||
3842 | if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) | |||
3843 | if (!(GV = GA->getAliaseeObject())) | |||
3844 | return false; | |||
3845 | if (const auto *V = dyn_cast<GlobalVariable>(GV)) | |||
3846 | return V->isConstant(); | |||
3847 | return isa<Function>(GV); | |||
3848 | } | |||
3849 | ||||
3850 | SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op, | |||
3851 | SelectionDAG &DAG) const { | |||
3852 | switch (Subtarget->getTargetTriple().getObjectFormat()) { | |||
3853 | default: llvm_unreachable("unknown object format")::llvm::llvm_unreachable_internal("unknown object format", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 3853); | |||
3854 | case Triple::COFF: | |||
3855 | return LowerGlobalAddressWindows(Op, DAG); | |||
3856 | case Triple::ELF: | |||
3857 | return LowerGlobalAddressELF(Op, DAG); | |||
3858 | case Triple::MachO: | |||
3859 | return LowerGlobalAddressDarwin(Op, DAG); | |||
3860 | } | |||
3861 | } | |||
3862 | ||||
3863 | SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, | |||
3864 | SelectionDAG &DAG) const { | |||
3865 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
3866 | SDLoc dl(Op); | |||
3867 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); | |||
3868 | const TargetMachine &TM = getTargetMachine(); | |||
3869 | bool IsRO = isReadOnly(GV); | |||
3870 | ||||
3871 | // promoteToConstantPool only if not generating XO text section | |||
3872 | if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly()) | |||
3873 | if (SDValue V = promoteToConstantPool(this, GV, DAG, PtrVT, dl)) | |||
3874 | return V; | |||
3875 | ||||
3876 | if (isPositionIndependent()) { | |||
3877 | bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV); | |||
3878 | SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, | |||
3879 | UseGOT_PREL ? ARMII::MO_GOT : 0); | |||
3880 | SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G); | |||
3881 | if (UseGOT_PREL) | |||
3882 | Result = | |||
3883 | DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, | |||
3884 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); | |||
3885 | return Result; | |||
3886 | } else if (Subtarget->isROPI() && IsRO) { | |||
3887 | // PC-relative. | |||
3888 | SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT); | |||
3889 | SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G); | |||
3890 | return Result; | |||
3891 | } else if (Subtarget->isRWPI() && !IsRO) { | |||
3892 | // SB-relative. | |||
3893 | SDValue RelAddr; | |||
3894 | if (Subtarget->useMovt()) { | |||
3895 | ++NumMovwMovt; | |||
3896 | SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_SBREL); | |||
3897 | RelAddr = DAG.getNode(ARMISD::Wrapper, dl, PtrVT, G); | |||
3898 | } else { // use literal pool for address constant | |||
3899 | ARMConstantPoolValue *CPV = | |||
3900 | ARMConstantPoolConstant::Create(GV, ARMCP::SBREL); | |||
3901 | SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); | |||
3902 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); | |||
3903 | RelAddr = DAG.getLoad( | |||
3904 | PtrVT, dl, DAG.getEntryNode(), CPAddr, | |||
3905 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); | |||
3906 | } | |||
3907 | SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT); | |||
3908 | SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, RelAddr); | |||
3909 | return Result; | |||
3910 | } | |||
3911 | ||||
3912 | // If we have T2 ops, we can materialize the address directly via movt/movw | |||
3913 | // pair. This is always cheaper. | |||
3914 | if (Subtarget->useMovt()) { | |||
3915 | ++NumMovwMovt; | |||
3916 | // FIXME: Once remat is capable of dealing with instructions with register | |||
3917 | // operands, expand this into two nodes. | |||
3918 | return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, | |||
3919 | DAG.getTargetGlobalAddress(GV, dl, PtrVT)); | |||
3920 | } else { | |||
3921 | SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, Align(4)); | |||
3922 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); | |||
3923 | return DAG.getLoad( | |||
3924 | PtrVT, dl, DAG.getEntryNode(), CPAddr, | |||
3925 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); | |||
3926 | } | |||
3927 | } | |||
3928 | ||||
3929 | SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, | |||
3930 | SelectionDAG &DAG) const { | |||
3931 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&(static_cast <bool> (!Subtarget->isROPI() && !Subtarget->isRWPI() && "ROPI/RWPI not currently supported for Darwin" ) ? void (0) : __assert_fail ("!Subtarget->isROPI() && !Subtarget->isRWPI() && \"ROPI/RWPI not currently supported for Darwin\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 3932, __extension__ __PRETTY_FUNCTION__)) | |||
3932 | "ROPI/RWPI not currently supported for Darwin")(static_cast <bool> (!Subtarget->isROPI() && !Subtarget->isRWPI() && "ROPI/RWPI not currently supported for Darwin" ) ? void (0) : __assert_fail ("!Subtarget->isROPI() && !Subtarget->isRWPI() && \"ROPI/RWPI not currently supported for Darwin\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 3932, __extension__ __PRETTY_FUNCTION__)); | |||
3933 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
3934 | SDLoc dl(Op); | |||
3935 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); | |||
3936 | ||||
3937 | if (Subtarget->useMovt()) | |||
3938 | ++NumMovwMovt; | |||
3939 | ||||
3940 | // FIXME: Once remat is capable of dealing with instructions with register | |||
3941 | // operands, expand this into multiple nodes | |||
3942 | unsigned Wrapper = | |||
3943 | isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper; | |||
3944 | ||||
3945 | SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY); | |||
3946 | SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G); | |||
3947 | ||||
3948 | if (Subtarget->isGVIndirectSymbol(GV)) | |||
3949 | Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, | |||
3950 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); | |||
3951 | return Result; | |||
3952 | } | |||
3953 | ||||
3954 | SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op, | |||
3955 | SelectionDAG &DAG) const { | |||
3956 | assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported")(static_cast <bool> (Subtarget->isTargetWindows() && "non-Windows COFF is not supported") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"non-Windows COFF is not supported\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 3956, __extension__ __PRETTY_FUNCTION__)); | |||
3957 | assert(Subtarget->useMovt() &&(static_cast <bool> (Subtarget->useMovt() && "Windows on ARM expects to use movw/movt") ? void (0) : __assert_fail ("Subtarget->useMovt() && \"Windows on ARM expects to use movw/movt\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 3958, __extension__ __PRETTY_FUNCTION__)) | |||
3958 | "Windows on ARM expects to use movw/movt")(static_cast <bool> (Subtarget->useMovt() && "Windows on ARM expects to use movw/movt") ? void (0) : __assert_fail ("Subtarget->useMovt() && \"Windows on ARM expects to use movw/movt\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 3958, __extension__ __PRETTY_FUNCTION__)); | |||
3959 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&(static_cast <bool> (!Subtarget->isROPI() && !Subtarget->isRWPI() && "ROPI/RWPI not currently supported for Windows" ) ? void (0) : __assert_fail ("!Subtarget->isROPI() && !Subtarget->isRWPI() && \"ROPI/RWPI not currently supported for Windows\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 3960, __extension__ __PRETTY_FUNCTION__)) | |||
3960 | "ROPI/RWPI not currently supported for Windows")(static_cast <bool> (!Subtarget->isROPI() && !Subtarget->isRWPI() && "ROPI/RWPI not currently supported for Windows" ) ? void (0) : __assert_fail ("!Subtarget->isROPI() && !Subtarget->isRWPI() && \"ROPI/RWPI not currently supported for Windows\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 3960, __extension__ __PRETTY_FUNCTION__)); | |||
3961 | ||||
3962 | const TargetMachine &TM = getTargetMachine(); | |||
3963 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); | |||
3964 | ARMII::TOF TargetFlags = ARMII::MO_NO_FLAG; | |||
3965 | if (GV->hasDLLImportStorageClass()) | |||
3966 | TargetFlags = ARMII::MO_DLLIMPORT; | |||
3967 | else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) | |||
3968 | TargetFlags = ARMII::MO_COFFSTUB; | |||
3969 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
3970 | SDValue Result; | |||
3971 | SDLoc DL(Op); | |||
3972 | ||||
3973 | ++NumMovwMovt; | |||
3974 | ||||
3975 | // FIXME: Once remat is capable of dealing with instructions with register | |||
3976 | // operands, expand this into two nodes. | |||
3977 | Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, | |||
3978 | DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*offset=*/0, | |||
3979 | TargetFlags)); | |||
3980 | if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) | |||
3981 | Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, | |||
3982 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); | |||
3983 | return Result; | |||
3984 | } | |||
3985 | ||||
3986 | SDValue | |||
3987 | ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { | |||
3988 | SDLoc dl(Op); | |||
3989 | SDValue Val = DAG.getConstant(0, dl, MVT::i32); | |||
3990 | return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, | |||
3991 | DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), | |||
3992 | Op.getOperand(1), Val); | |||
3993 | } | |||
3994 | ||||
3995 | SDValue | |||
3996 | ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { | |||
3997 | SDLoc dl(Op); | |||
3998 | return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), | |||
3999 | Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32)); | |||
4000 | } | |||
4001 | ||||
4002 | SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, | |||
4003 | SelectionDAG &DAG) const { | |||
4004 | SDLoc dl(Op); | |||
4005 | return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other, | |||
4006 | Op.getOperand(0)); | |||
4007 | } | |||
4008 | ||||
4009 | SDValue ARMTargetLowering::LowerINTRINSIC_VOID( | |||
4010 | SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) const { | |||
4011 | unsigned IntNo = | |||
4012 | cast<ConstantSDNode>( | |||
4013 | Op.getOperand(Op.getOperand(0).getValueType() == MVT::Other)) | |||
4014 | ->getZExtValue(); | |||
4015 | switch (IntNo) { | |||
4016 | default: | |||
4017 | return SDValue(); // Don't custom lower most intrinsics. | |||
4018 | case Intrinsic::arm_gnu_eabi_mcount: { | |||
4019 | MachineFunction &MF = DAG.getMachineFunction(); | |||
4020 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
4021 | SDLoc dl(Op); | |||
4022 | SDValue Chain = Op.getOperand(0); | |||
4023 | // call "\01__gnu_mcount_nc" | |||
4024 | const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); | |||
4025 | const uint32_t *Mask = | |||
4026 | ARI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C); | |||
4027 | assert(Mask && "Missing call preserved mask for calling convention")(static_cast <bool> (Mask && "Missing call preserved mask for calling convention" ) ? void (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 4027, __extension__ __PRETTY_FUNCTION__)); | |||
4028 | // Mark LR an implicit live-in. | |||
4029 | Register Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); | |||
4030 | SDValue ReturnAddress = | |||
4031 | DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, PtrVT); | |||
4032 | constexpr EVT ResultTys[] = {MVT::Other, MVT::Glue}; | |||
4033 | SDValue Callee = | |||
4034 | DAG.getTargetExternalSymbol("\01__gnu_mcount_nc", PtrVT, 0); | |||
4035 | SDValue RegisterMask = DAG.getRegisterMask(Mask); | |||
4036 | if (Subtarget->isThumb()) | |||
4037 | return SDValue( | |||
4038 | DAG.getMachineNode( | |||
4039 | ARM::tBL_PUSHLR, dl, ResultTys, | |||
4040 | {ReturnAddress, DAG.getTargetConstant(ARMCC::AL, dl, PtrVT), | |||
4041 | DAG.getRegister(0, PtrVT), Callee, RegisterMask, Chain}), | |||
4042 | 0); | |||
4043 | return SDValue( | |||
4044 | DAG.getMachineNode(ARM::BL_PUSHLR, dl, ResultTys, | |||
4045 | {ReturnAddress, Callee, RegisterMask, Chain}), | |||
4046 | 0); | |||
4047 | } | |||
4048 | } | |||
4049 | } | |||
4050 | ||||
4051 | SDValue | |||
4052 | ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, | |||
4053 | const ARMSubtarget *Subtarget) const { | |||
4054 | unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
4055 | SDLoc dl(Op); | |||
4056 | switch (IntNo) { | |||
4057 | default: return SDValue(); // Don't custom lower most intrinsics. | |||
4058 | case Intrinsic::thread_pointer: { | |||
4059 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
4060 | return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); | |||
4061 | } | |||
4062 | case Intrinsic::arm_cls: { | |||
4063 | const SDValue &Operand = Op.getOperand(1); | |||
4064 | const EVT VTy = Op.getValueType(); | |||
4065 | SDValue SRA = | |||
4066 | DAG.getNode(ISD::SRA, dl, VTy, Operand, DAG.getConstant(31, dl, VTy)); | |||
4067 | SDValue XOR = DAG.getNode(ISD::XOR, dl, VTy, SRA, Operand); | |||
4068 | SDValue SHL = | |||
4069 | DAG.getNode(ISD::SHL, dl, VTy, XOR, DAG.getConstant(1, dl, VTy)); | |||
4070 | SDValue OR = | |||
4071 | DAG.getNode(ISD::OR, dl, VTy, SHL, DAG.getConstant(1, dl, VTy)); | |||
4072 | SDValue Result = DAG.getNode(ISD::CTLZ, dl, VTy, OR); | |||
4073 | return Result; | |||
4074 | } | |||
4075 | case Intrinsic::arm_cls64: { | |||
4076 | // cls(x) = if cls(hi(x)) != 31 then cls(hi(x)) | |||
4077 | // else 31 + clz(if hi(x) == 0 then lo(x) else not(lo(x))) | |||
4078 | const SDValue &Operand = Op.getOperand(1); | |||
4079 | const EVT VTy = Op.getValueType(); | |||
4080 | ||||
4081 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VTy, Operand, | |||
4082 | DAG.getConstant(1, dl, VTy)); | |||
4083 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VTy, Operand, | |||
4084 | DAG.getConstant(0, dl, VTy)); | |||
4085 | SDValue Constant0 = DAG.getConstant(0, dl, VTy); | |||
4086 | SDValue Constant1 = DAG.getConstant(1, dl, VTy); | |||
4087 | SDValue Constant31 = DAG.getConstant(31, dl, VTy); | |||
4088 | SDValue SRAHi = DAG.getNode(ISD::SRA, dl, VTy, Hi, Constant31); | |||
4089 | SDValue XORHi = DAG.getNode(ISD::XOR, dl, VTy, SRAHi, Hi); | |||
4090 | SDValue SHLHi = DAG.getNode(ISD::SHL, dl, VTy, XORHi, Constant1); | |||
4091 | SDValue ORHi = DAG.getNode(ISD::OR, dl, VTy, SHLHi, Constant1); | |||
4092 | SDValue CLSHi = DAG.getNode(ISD::CTLZ, dl, VTy, ORHi); | |||
4093 | SDValue CheckLo = | |||
4094 | DAG.getSetCC(dl, MVT::i1, CLSHi, Constant31, ISD::CondCode::SETEQ); | |||
4095 | SDValue HiIsZero = | |||
4096 | DAG.getSetCC(dl, MVT::i1, Hi, Constant0, ISD::CondCode::SETEQ); | |||
4097 | SDValue AdjustedLo = | |||
4098 | DAG.getSelect(dl, VTy, HiIsZero, Lo, DAG.getNOT(dl, Lo, VTy)); | |||
4099 | SDValue CLZAdjustedLo = DAG.getNode(ISD::CTLZ, dl, VTy, AdjustedLo); | |||
4100 | SDValue Result = | |||
4101 | DAG.getSelect(dl, VTy, CheckLo, | |||
4102 | DAG.getNode(ISD::ADD, dl, VTy, CLZAdjustedLo, Constant31), CLSHi); | |||
4103 | return Result; | |||
4104 | } | |||
4105 | case Intrinsic::eh_sjlj_lsda: { | |||
4106 | MachineFunction &MF = DAG.getMachineFunction(); | |||
4107 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); | |||
4108 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); | |||
4109 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
4110 | SDValue CPAddr; | |||
4111 | bool IsPositionIndependent = isPositionIndependent(); | |||
4112 | unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; | |||
4113 | ARMConstantPoolValue *CPV = | |||
4114 | ARMConstantPoolConstant::Create(&MF.getFunction(), ARMPCLabelIndex, | |||
4115 | ARMCP::CPLSDA, PCAdj); | |||
4116 | CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); | |||
4117 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); | |||
4118 | SDValue Result = DAG.getLoad( | |||
4119 | PtrVT, dl, DAG.getEntryNode(), CPAddr, | |||
4120 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); | |||
4121 | ||||
4122 | if (IsPositionIndependent) { | |||
4123 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); | |||
4124 | Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); | |||
4125 | } | |||
4126 | return Result; | |||
4127 | } | |||
4128 | case Intrinsic::arm_neon_vabs: | |||
4129 | return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(), | |||
4130 | Op.getOperand(1)); | |||
4131 | case Intrinsic::arm_neon_vmulls: | |||
4132 | case Intrinsic::arm_neon_vmullu: { | |||
4133 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) | |||
4134 | ? ARMISD::VMULLs : ARMISD::VMULLu; | |||
4135 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), | |||
4136 | Op.getOperand(1), Op.getOperand(2)); | |||
4137 | } | |||
4138 | case Intrinsic::arm_neon_vminnm: | |||
4139 | case Intrinsic::arm_neon_vmaxnm: { | |||
4140 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm) | |||
4141 | ? ISD::FMINNUM : ISD::FMAXNUM; | |||
4142 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), | |||
4143 | Op.getOperand(1), Op.getOperand(2)); | |||
4144 | } | |||
4145 | case Intrinsic::arm_neon_vminu: | |||
4146 | case Intrinsic::arm_neon_vmaxu: { | |||
4147 | if (Op.getValueType().isFloatingPoint()) | |||
4148 | return SDValue(); | |||
4149 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu) | |||
4150 | ? ISD::UMIN : ISD::UMAX; | |||
4151 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), | |||
4152 | Op.getOperand(1), Op.getOperand(2)); | |||
4153 | } | |||
4154 | case Intrinsic::arm_neon_vmins: | |||
4155 | case Intrinsic::arm_neon_vmaxs: { | |||
4156 | // v{min,max}s is overloaded between signed integers and floats. | |||
4157 | if (!Op.getValueType().isFloatingPoint()) { | |||
4158 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) | |||
4159 | ? ISD::SMIN : ISD::SMAX; | |||
4160 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), | |||
4161 | Op.getOperand(1), Op.getOperand(2)); | |||
4162 | } | |||
4163 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) | |||
4164 | ? ISD::FMINIMUM : ISD::FMAXIMUM; | |||
4165 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), | |||
4166 | Op.getOperand(1), Op.getOperand(2)); | |||
4167 | } | |||
4168 | case Intrinsic::arm_neon_vtbl1: | |||
4169 | return DAG.getNode(ARMISD::VTBL1, SDLoc(Op), Op.getValueType(), | |||
4170 | Op.getOperand(1), Op.getOperand(2)); | |||
4171 | case Intrinsic::arm_neon_vtbl2: | |||
4172 | return DAG.getNode(ARMISD::VTBL2, SDLoc(Op), Op.getValueType(), | |||
4173 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); | |||
4174 | case Intrinsic::arm_mve_pred_i2v: | |||
4175 | case Intrinsic::arm_mve_pred_v2i: | |||
4176 | return DAG.getNode(ARMISD::PREDICATE_CAST, SDLoc(Op), Op.getValueType(), | |||
4177 | Op.getOperand(1)); | |||
4178 | case Intrinsic::arm_mve_vreinterpretq: | |||
4179 | return DAG.getNode(ARMISD::VECTOR_REG_CAST, SDLoc(Op), Op.getValueType(), | |||
4180 | Op.getOperand(1)); | |||
4181 | case Intrinsic::arm_mve_lsll: | |||
4182 | return DAG.getNode(ARMISD::LSLL, SDLoc(Op), Op->getVTList(), | |||
4183 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); | |||
4184 | case Intrinsic::arm_mve_asrl: | |||
4185 | return DAG.getNode(ARMISD::ASRL, SDLoc(Op), Op->getVTList(), | |||
4186 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); | |||
4187 | } | |||
4188 | } | |||
4189 | ||||
4190 | static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, | |||
4191 | const ARMSubtarget *Subtarget) { | |||
4192 | SDLoc dl(Op); | |||
4193 | ConstantSDNode *SSIDNode = cast<ConstantSDNode>(Op.getOperand(2)); | |||
4194 | auto SSID = static_cast<SyncScope::ID>(SSIDNode->getZExtValue()); | |||
4195 | if (SSID == SyncScope::SingleThread) | |||
4196 | return Op; | |||
4197 | ||||
4198 | if (!Subtarget->hasDataBarrier()) { | |||
4199 | // Some ARMv6 cpus can support data barriers with an mcr instruction. | |||
4200 | // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get | |||
4201 | // here. | |||
4202 | assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&(static_cast <bool> (Subtarget->hasV6Ops() && !Subtarget->isThumb() && "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!" ) ? void (0) : __assert_fail ("Subtarget->hasV6Ops() && !Subtarget->isThumb() && \"Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 4203, __extension__ __PRETTY_FUNCTION__)) | |||
4203 | "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!")(static_cast <bool> (Subtarget->hasV6Ops() && !Subtarget->isThumb() && "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!" ) ? void (0) : __assert_fail ("Subtarget->hasV6Ops() && !Subtarget->isThumb() && \"Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 4203, __extension__ __PRETTY_FUNCTION__)); | |||
4204 | return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), | |||
4205 | DAG.getConstant(0, dl, MVT::i32)); | |||
4206 | } | |||
4207 | ||||
4208 | ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1)); | |||
4209 | AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue()); | |||
4210 | ARM_MB::MemBOpt Domain = ARM_MB::ISH; | |||
4211 | if (Subtarget->isMClass()) { | |||
4212 | // Only a full system barrier exists in the M-class architectures. | |||
4213 | Domain = ARM_MB::SY; | |||
4214 | } else if (Subtarget->preferISHSTBarriers() && | |||
4215 | Ord == AtomicOrdering::Release) { | |||
4216 | // Swift happens to implement ISHST barriers in a way that's compatible with | |||
4217 | // Release semantics but weaker than ISH so we'd be fools not to use | |||
4218 | // it. Beware: other processors probably don't! | |||
4219 | Domain = ARM_MB::ISHST; | |||
4220 | } | |||
4221 | ||||
4222 | return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0), | |||
4223 | DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32), | |||
4224 | DAG.getConstant(Domain, dl, MVT::i32)); | |||
4225 | } | |||
4226 | ||||
4227 | static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, | |||
4228 | const ARMSubtarget *Subtarget) { | |||
4229 | // ARM pre v5TE and Thumb1 does not have preload instructions. | |||
4230 | if (!(Subtarget->isThumb2() || | |||
4231 | (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) | |||
4232 | // Just preserve the chain. | |||
4233 | return Op.getOperand(0); | |||
4234 | ||||
4235 | SDLoc dl(Op); | |||
4236 | unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; | |||
4237 | if (!isRead && | |||
4238 | (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) | |||
4239 | // ARMv7 with MP extension has PLDW. | |||
4240 | return Op.getOperand(0); | |||
4241 | ||||
4242 | unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); | |||
4243 | if (Subtarget->isThumb()) { | |||
4244 | // Invert the bits. | |||
4245 | isRead = ~isRead & 1; | |||
4246 | isData = ~isData & 1; | |||
4247 | } | |||
4248 | ||||
4249 | return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), | |||
4250 | Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32), | |||
4251 | DAG.getConstant(isData, dl, MVT::i32)); | |||
4252 | } | |||
4253 | ||||
4254 | static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { | |||
4255 | MachineFunction &MF = DAG.getMachineFunction(); | |||
4256 | ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); | |||
4257 | ||||
4258 | // vastart just stores the address of the VarArgsFrameIndex slot into the | |||
4259 | // memory location argument. | |||
4260 | SDLoc dl(Op); | |||
4261 | EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); | |||
4262 | SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); | |||
4263 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); | |||
4264 | return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), | |||
4265 | MachinePointerInfo(SV)); | |||
4266 | } | |||
4267 | ||||
4268 | SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, | |||
4269 | CCValAssign &NextVA, | |||
4270 | SDValue &Root, | |||
4271 | SelectionDAG &DAG, | |||
4272 | const SDLoc &dl) const { | |||
4273 | MachineFunction &MF = DAG.getMachineFunction(); | |||
4274 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); | |||
4275 | ||||
4276 | const TargetRegisterClass *RC; | |||
4277 | if (AFI->isThumb1OnlyFunction()) | |||
4278 | RC = &ARM::tGPRRegClass; | |||
4279 | else | |||
4280 | RC = &ARM::GPRRegClass; | |||
4281 | ||||
4282 | // Transform the arguments stored in physical registers into virtual ones. | |||
4283 | Register Reg = MF.addLiveIn(VA.getLocReg(), RC); | |||
4284 | SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); | |||
4285 | ||||
4286 | SDValue ArgValue2; | |||
4287 | if (NextVA.isMemLoc()) { | |||
4288 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
4289 | int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true); | |||
4290 | ||||
4291 | // Create load node to retrieve arguments from the stack. | |||
4292 | SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); | |||
4293 | ArgValue2 = DAG.getLoad( | |||
4294 | MVT::i32, dl, Root, FIN, | |||
4295 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); | |||
4296 | } else { | |||
4297 | Reg = MF.addLiveIn(NextVA.getLocReg(), RC); | |||
4298 | ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); | |||
4299 | } | |||
4300 | if (!Subtarget->isLittle()) | |||
4301 | std::swap (ArgValue, ArgValue2); | |||
4302 | return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); | |||
4303 | } | |||
4304 | ||||
4305 | // The remaining GPRs hold either the beginning of variable-argument | |||
4306 | // data, or the beginning of an aggregate passed by value (usually | |||
4307 | // byval). Either way, we allocate stack slots adjacent to the data | |||
4308 | // provided by our caller, and store the unallocated registers there. | |||
4309 | // If this is a variadic function, the va_list pointer will begin with | |||
4310 | // these values; otherwise, this reassembles a (byval) structure that | |||
4311 | // was split between registers and memory. | |||
4312 | // Return: The frame index registers were stored into. | |||
4313 | int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, | |||
4314 | const SDLoc &dl, SDValue &Chain, | |||
4315 | const Value *OrigArg, | |||
4316 | unsigned InRegsParamRecordIdx, | |||
4317 | int ArgOffset, unsigned ArgSize) const { | |||
4318 | // Currently, two use-cases possible: | |||
4319 | // Case #1. Non-var-args function, and we meet first byval parameter. | |||
4320 | // Setup first unallocated register as first byval register; | |||
4321 | // eat all remained registers | |||
4322 | // (these two actions are performed by HandleByVal method). | |||
4323 | // Then, here, we initialize stack frame with | |||
4324 | // "store-reg" instructions. | |||
4325 | // Case #2. Var-args function, that doesn't contain byval parameters. | |||
4326 | // The same: eat all remained unallocated registers, | |||
4327 | // initialize stack frame. | |||
4328 | ||||
4329 | MachineFunction &MF = DAG.getMachineFunction(); | |||
4330 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
4331 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); | |||
4332 | unsigned RBegin, REnd; | |||
4333 | if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { | |||
4334 | CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd); | |||
4335 | } else { | |||
4336 | unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs); | |||
4337 | RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx]; | |||
4338 | REnd = ARM::R4; | |||
4339 | } | |||
4340 | ||||
4341 | if (REnd != RBegin) | |||
4342 | ArgOffset = -4 * (ARM::R4 - RBegin); | |||
4343 | ||||
4344 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | |||
4345 | int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false); | |||
4346 | SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT); | |||
4347 | ||||
4348 | SmallVector<SDValue, 4> MemOps; | |||
4349 | const TargetRegisterClass *RC = | |||
4350 | AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass; | |||
4351 | ||||
4352 | for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) { | |||
4353 | Register VReg = MF.addLiveIn(Reg, RC); | |||
4354 | SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); | |||
4355 | SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, | |||
4356 | MachinePointerInfo(OrigArg, 4 * i)); | |||
4357 | MemOps.push_back(Store); | |||
4358 | FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT)); | |||
4359 | } | |||
4360 | ||||
4361 | if (!MemOps.empty()) | |||
4362 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); | |||
4363 | return FrameIndex; | |||
4364 | } | |||
4365 | ||||
4366 | // Setup stack frame, the va_list pointer will start from. | |||
4367 | void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, | |||
4368 | const SDLoc &dl, SDValue &Chain, | |||
4369 | unsigned ArgOffset, | |||
4370 | unsigned TotalArgRegsSaveSize, | |||
4371 | bool ForceMutable) const { | |||
4372 | MachineFunction &MF = DAG.getMachineFunction(); | |||
4373 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); | |||
4374 | ||||
4375 | // Try to store any remaining integer argument regs | |||
4376 | // to their spots on the stack so that they may be loaded by dereferencing | |||
4377 | // the result of va_next. | |||
4378 | // If there is no regs to be stored, just point address after last | |||
4379 | // argument passed via stack. | |||
4380 | int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr, | |||
4381 | CCInfo.getInRegsParamsCount(), | |||
4382 | CCInfo.getNextStackOffset(), | |||
4383 | std::max(4U, TotalArgRegsSaveSize)); | |||
4384 | AFI->setVarArgsFrameIndex(FrameIndex); | |||
4385 | } | |||
4386 | ||||
4387 | bool ARMTargetLowering::splitValueIntoRegisterParts( | |||
4388 | SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, | |||
4389 | unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const { | |||
4390 | bool IsABIRegCopy = CC.hasValue(); | |||
4391 | EVT ValueVT = Val.getValueType(); | |||
4392 | if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) && | |||
4393 | PartVT == MVT::f32) { | |||
4394 | unsigned ValueBits = ValueVT.getSizeInBits(); | |||
4395 | unsigned PartBits = PartVT.getSizeInBits(); | |||
4396 | Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(ValueBits), Val); | |||
4397 | Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::getIntegerVT(PartBits), Val); | |||
4398 | Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); | |||
4399 | Parts[0] = Val; | |||
4400 | return true; | |||
4401 | } | |||
4402 | return false; | |||
4403 | } | |||
4404 | ||||
4405 | SDValue ARMTargetLowering::joinRegisterPartsIntoValue( | |||
4406 | SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, | |||
4407 | MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const { | |||
4408 | bool IsABIRegCopy = CC.hasValue(); | |||
4409 | if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) && | |||
4410 | PartVT == MVT::f32) { | |||
4411 | unsigned ValueBits = ValueVT.getSizeInBits(); | |||
4412 | unsigned PartBits = PartVT.getSizeInBits(); | |||
4413 | SDValue Val = Parts[0]; | |||
4414 | ||||
4415 | Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(PartBits), Val); | |||
4416 | Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::getIntegerVT(ValueBits), Val); | |||
4417 | Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); | |||
4418 | return Val; | |||
4419 | } | |||
4420 | return SDValue(); | |||
4421 | } | |||
4422 | ||||
4423 | SDValue ARMTargetLowering::LowerFormalArguments( | |||
4424 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, | |||
4425 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, | |||
4426 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { | |||
4427 | MachineFunction &MF = DAG.getMachineFunction(); | |||
4428 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
4429 | ||||
4430 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); | |||
4431 | ||||
4432 | // Assign locations to all of the incoming arguments. | |||
4433 | SmallVector<CCValAssign, 16> ArgLocs; | |||
4434 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, | |||
4435 | *DAG.getContext()); | |||
4436 | CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg)); | |||
4437 | ||||
4438 | SmallVector<SDValue, 16> ArgValues; | |||
4439 | SDValue ArgValue; | |||
4440 | Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin(); | |||
4441 | unsigned CurArgIdx = 0; | |||
4442 | ||||
4443 | // Initially ArgRegsSaveSize is zero. | |||
4444 | // Then we increase this value each time we meet byval parameter. | |||
4445 | // We also increase this value in case of varargs function. | |||
4446 | AFI->setArgRegsSaveSize(0); | |||
4447 | ||||
4448 | // Calculate the amount of stack space that we need to allocate to store | |||
4449 | // byval and variadic arguments that are passed in registers. | |||
4450 | // We need to know this before we allocate the first byval or variadic | |||
4451 | // argument, as they will be allocated a stack slot below the CFA (Canonical | |||
4452 | // Frame Address, the stack pointer at entry to the function). | |||
4453 | unsigned ArgRegBegin = ARM::R4; | |||
4454 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { | |||
4455 | if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount()) | |||
4456 | break; | |||
4457 | ||||
4458 | CCValAssign &VA = ArgLocs[i]; | |||
4459 | unsigned Index = VA.getValNo(); | |||
4460 | ISD::ArgFlagsTy Flags = Ins[Index].Flags; | |||
4461 | if (!Flags.isByVal()) | |||
4462 | continue; | |||
4463 | ||||
4464 | assert(VA.isMemLoc() && "unexpected byval pointer in reg")(static_cast <bool> (VA.isMemLoc() && "unexpected byval pointer in reg" ) ? void (0) : __assert_fail ("VA.isMemLoc() && \"unexpected byval pointer in reg\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 4464, __extension__ __PRETTY_FUNCTION__)); | |||
4465 | unsigned RBegin, REnd; | |||
4466 | CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd); | |||
4467 | ArgRegBegin = std::min(ArgRegBegin, RBegin); | |||
4468 | ||||
4469 | CCInfo.nextInRegsParam(); | |||
4470 | } | |||
4471 | CCInfo.rewindByValRegsInfo(); | |||
4472 | ||||
4473 | int lastInsIndex = -1; | |||
4474 | if (isVarArg && MFI.hasVAStart()) { | |||
4475 | unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs); | |||
4476 | if (RegIdx != array_lengthof(GPRArgRegs)) | |||
4477 | ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]); | |||
4478 | } | |||
4479 | ||||
4480 | unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin); | |||
4481 | AFI->setArgRegsSaveSize(TotalArgRegsSaveSize); | |||
4482 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | |||
4483 | ||||
4484 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { | |||
4485 | CCValAssign &VA = ArgLocs[i]; | |||
4486 | if (Ins[VA.getValNo()].isOrigArg()) { | |||
4487 | std::advance(CurOrigArg, | |||
4488 | Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx); | |||
4489 | CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex(); | |||
4490 | } | |||
4491 | // Arguments stored in registers. | |||
4492 | if (VA.isRegLoc()) { | |||
4493 | EVT RegVT = VA.getLocVT(); | |||
4494 | ||||
4495 | if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) { | |||
4496 | // f64 and vector types are split up into multiple registers or | |||
4497 | // combinations of registers and stack slots. | |||
4498 | SDValue ArgValue1 = | |||
4499 | GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); | |||
4500 | VA = ArgLocs[++i]; // skip ahead to next loc | |||
4501 | SDValue ArgValue2; | |||
4502 | if (VA.isMemLoc()) { | |||
4503 | int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true); | |||
4504 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); | |||
4505 | ArgValue2 = DAG.getLoad( | |||
4506 | MVT::f64, dl, Chain, FIN, | |||
4507 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); | |||
4508 | } else { | |||
4509 | ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); | |||
4510 | } | |||
4511 | ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); | |||
4512 | ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue, | |||
4513 | ArgValue1, DAG.getIntPtrConstant(0, dl)); | |||
4514 | ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue, | |||
4515 | ArgValue2, DAG.getIntPtrConstant(1, dl)); | |||
4516 | } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) { | |||
4517 | ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); | |||
4518 | } else { | |||
4519 | const TargetRegisterClass *RC; | |||
4520 | ||||
4521 | if (RegVT == MVT::f16 || RegVT == MVT::bf16) | |||
4522 | RC = &ARM::HPRRegClass; | |||
4523 | else if (RegVT == MVT::f32) | |||
4524 | RC = &ARM::SPRRegClass; | |||
4525 | else if (RegVT == MVT::f64 || RegVT == MVT::v4f16 || | |||
4526 | RegVT == MVT::v4bf16) | |||
4527 | RC = &ARM::DPRRegClass; | |||
4528 | else if (RegVT == MVT::v2f64 || RegVT == MVT::v8f16 || | |||
4529 | RegVT == MVT::v8bf16) | |||
4530 | RC = &ARM::QPRRegClass; | |||
4531 | else if (RegVT == MVT::i32) | |||
4532 | RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass | |||
4533 | : &ARM::GPRRegClass; | |||
4534 | else | |||
4535 | llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering")::llvm::llvm_unreachable_internal("RegVT not supported by FORMAL_ARGUMENTS Lowering" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 4535); | |||
4536 | ||||
4537 | // Transform the arguments in physical registers into virtual ones. | |||
4538 | Register Reg = MF.addLiveIn(VA.getLocReg(), RC); | |||
4539 | ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); | |||
4540 | ||||
4541 | // If this value is passed in r0 and has the returned attribute (e.g. | |||
4542 | // C++ 'structors), record this fact for later use. | |||
4543 | if (VA.getLocReg() == ARM::R0 && Ins[VA.getValNo()].Flags.isReturned()) { | |||
4544 | AFI->setPreservesR0(); | |||
4545 | } | |||
4546 | } | |||
4547 | ||||
4548 | // If this is an 8 or 16-bit value, it is really passed promoted | |||
4549 | // to 32 bits. Insert an assert[sz]ext to capture this, then | |||
4550 | // truncate to the right size. | |||
4551 | switch (VA.getLocInfo()) { | |||
4552 | default: llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 4552); | |||
4553 | case CCValAssign::Full: break; | |||
4554 | case CCValAssign::BCvt: | |||
4555 | ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); | |||
4556 | break; | |||
4557 | case CCValAssign::SExt: | |||
4558 | ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, | |||
4559 | DAG.getValueType(VA.getValVT())); | |||
4560 | ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); | |||
4561 | break; | |||
4562 | case CCValAssign::ZExt: | |||
4563 | ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, | |||
4564 | DAG.getValueType(VA.getValVT())); | |||
4565 | ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); | |||
4566 | break; | |||
4567 | } | |||
4568 | ||||
4569 | // f16 arguments have their size extended to 4 bytes and passed as if they | |||
4570 | // had been copied to the LSBs of a 32-bit register. | |||
4571 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) | |||
4572 | if (VA.needsCustom() && | |||
4573 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) | |||
4574 | ArgValue = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), ArgValue); | |||
4575 | ||||
4576 | InVals.push_back(ArgValue); | |||
4577 | } else { // VA.isRegLoc() | |||
4578 | // Only arguments passed on the stack should make it here. | |||
4579 | assert(VA.isMemLoc())(static_cast <bool> (VA.isMemLoc()) ? void (0) : __assert_fail ("VA.isMemLoc()", "llvm/lib/Target/ARM/ARMISelLowering.cpp", 4579, __extension__ __PRETTY_FUNCTION__)); | |||
4580 | assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered")(static_cast <bool> (VA.getValVT() != MVT::i64 && "i64 should already be lowered") ? void (0) : __assert_fail ( "VA.getValVT() != MVT::i64 && \"i64 should already be lowered\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 4580, __extension__ __PRETTY_FUNCTION__)); | |||
4581 | ||||
4582 | int index = VA.getValNo(); | |||
4583 | ||||
4584 | // Some Ins[] entries become multiple ArgLoc[] entries. | |||
4585 | // Process them only once. | |||
4586 | if (index != lastInsIndex) | |||
4587 | { | |||
4588 | ISD::ArgFlagsTy Flags = Ins[index].Flags; | |||
4589 | // FIXME: For now, all byval parameter objects are marked mutable. | |||
4590 | // This can be changed with more analysis. | |||
4591 | // In case of tail call optimization mark all arguments mutable. | |||
4592 | // Since they could be overwritten by lowering of arguments in case of | |||
4593 | // a tail call. | |||
4594 | if (Flags.isByVal()) { | |||
4595 | assert(Ins[index].isOrigArg() &&(static_cast <bool> (Ins[index].isOrigArg() && "Byval arguments cannot be implicit" ) ? void (0) : __assert_fail ("Ins[index].isOrigArg() && \"Byval arguments cannot be implicit\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 4596, __extension__ __PRETTY_FUNCTION__)) | |||
4596 | "Byval arguments cannot be implicit")(static_cast <bool> (Ins[index].isOrigArg() && "Byval arguments cannot be implicit" ) ? void (0) : __assert_fail ("Ins[index].isOrigArg() && \"Byval arguments cannot be implicit\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 4596, __extension__ __PRETTY_FUNCTION__)); | |||
4597 | unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed(); | |||
4598 | ||||
4599 | int FrameIndex = StoreByValRegs( | |||
4600 | CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex, | |||
4601 | VA.getLocMemOffset(), Flags.getByValSize()); | |||
4602 | InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT)); | |||
4603 | CCInfo.nextInRegsParam(); | |||
4604 | } else { | |||
4605 | unsigned FIOffset = VA.getLocMemOffset(); | |||
4606 | int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8, | |||
4607 | FIOffset, true); | |||
4608 | ||||
4609 | // Create load nodes to retrieve arguments from the stack. | |||
4610 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); | |||
4611 | InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, | |||
4612 | MachinePointerInfo::getFixedStack( | |||
4613 | DAG.getMachineFunction(), FI))); | |||
4614 | } | |||
4615 | lastInsIndex = index; | |||
4616 | } | |||
4617 | } | |||
4618 | } | |||
4619 | ||||
4620 | // varargs | |||
4621 | if (isVarArg && MFI.hasVAStart()) { | |||
4622 | VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getNextStackOffset(), | |||
4623 | TotalArgRegsSaveSize); | |||
4624 | if (AFI->isCmseNSEntryFunction()) { | |||
4625 | DiagnosticInfoUnsupported Diag( | |||
4626 | DAG.getMachineFunction().getFunction(), | |||
4627 | "secure entry function must not be variadic", dl.getDebugLoc()); | |||
4628 | DAG.getContext()->diagnose(Diag); | |||
4629 | } | |||
4630 | } | |||
4631 | ||||
4632 | unsigned StackArgSize = CCInfo.getNextStackOffset(); | |||
4633 | bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; | |||
4634 | if (canGuaranteeTCO(CallConv, TailCallOpt)) { | |||
4635 | // The only way to guarantee a tail call is if the callee restores its | |||
4636 | // argument area, but it must also keep the stack aligned when doing so. | |||
4637 | const DataLayout &DL = DAG.getDataLayout(); | |||
4638 | StackArgSize = alignTo(StackArgSize, DL.getStackAlignment()); | |||
4639 | ||||
4640 | AFI->setArgumentStackToRestore(StackArgSize); | |||
4641 | } | |||
4642 | AFI->setArgumentStackSize(StackArgSize); | |||
4643 | ||||
4644 | if (CCInfo.getNextStackOffset() > 0 && AFI->isCmseNSEntryFunction()) { | |||
4645 | DiagnosticInfoUnsupported Diag( | |||
4646 | DAG.getMachineFunction().getFunction(), | |||
4647 | "secure entry function requires arguments on stack", dl.getDebugLoc()); | |||
4648 | DAG.getContext()->diagnose(Diag); | |||
4649 | } | |||
4650 | ||||
4651 | return Chain; | |||
4652 | } | |||
4653 | ||||
4654 | /// isFloatingPointZero - Return true if this is +0.0. | |||
4655 | static bool isFloatingPointZero(SDValue Op) { | |||
4656 | if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) | |||
4657 | return CFP->getValueAPF().isPosZero(); | |||
4658 | else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { | |||
4659 | // Maybe this has already been legalized into the constant pool? | |||
4660 | if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { | |||
4661 | SDValue WrapperOp = Op.getOperand(1).getOperand(0); | |||
4662 | if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) | |||
4663 | if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) | |||
4664 | return CFP->getValueAPF().isPosZero(); | |||
4665 | } | |||
4666 | } else if (Op->getOpcode() == ISD::BITCAST && | |||
4667 | Op->getValueType(0) == MVT::f64) { | |||
4668 | // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64) | |||
4669 | // created by LowerConstantFP(). | |||
4670 | SDValue BitcastOp = Op->getOperand(0); | |||
4671 | if (BitcastOp->getOpcode() == ARMISD::VMOVIMM && | |||
4672 | isNullConstant(BitcastOp->getOperand(0))) | |||
4673 | return true; | |||
4674 | } | |||
4675 | return false; | |||
4676 | } | |||
4677 | ||||
4678 | /// Returns appropriate ARM CMP (cmp) and corresponding condition code for | |||
4679 | /// the given operands. | |||
4680 | SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, | |||
4681 | SDValue &ARMcc, SelectionDAG &DAG, | |||
4682 | const SDLoc &dl) const { | |||
4683 | if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { | |||
4684 | unsigned C = RHSC->getZExtValue(); | |||
4685 | if (!isLegalICmpImmediate((int32_t)C)) { | |||
4686 | // Constant does not fit, try adjusting it by one. | |||
4687 | switch (CC) { | |||
4688 | default: break; | |||
4689 | case ISD::SETLT: | |||
4690 | case ISD::SETGE: | |||
4691 | if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { | |||
4692 | CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; | |||
4693 | RHS = DAG.getConstant(C - 1, dl, MVT::i32); | |||
4694 | } | |||
4695 | break; | |||
4696 | case ISD::SETULT: | |||
4697 | case ISD::SETUGE: | |||
4698 | if (C != 0 && isLegalICmpImmediate(C-1)) { | |||
4699 | CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; | |||
4700 | RHS = DAG.getConstant(C - 1, dl, MVT::i32); | |||
4701 | } | |||
4702 | break; | |||
4703 | case ISD::SETLE: | |||
4704 | case ISD::SETGT: | |||
4705 | if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { | |||
4706 | CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; | |||
4707 | RHS = DAG.getConstant(C + 1, dl, MVT::i32); | |||
4708 | } | |||
4709 | break; | |||
4710 | case ISD::SETULE: | |||
4711 | case ISD::SETUGT: | |||
4712 | if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { | |||
4713 | CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; | |||
4714 | RHS = DAG.getConstant(C + 1, dl, MVT::i32); | |||
4715 | } | |||
4716 | break; | |||
4717 | } | |||
4718 | } | |||
4719 | } else if ((ARM_AM::getShiftOpcForNode(LHS.getOpcode()) != ARM_AM::no_shift) && | |||
4720 | (ARM_AM::getShiftOpcForNode(RHS.getOpcode()) == ARM_AM::no_shift)) { | |||
4721 | // In ARM and Thumb-2, the compare instructions can shift their second | |||
4722 | // operand. | |||
4723 | CC = ISD::getSetCCSwappedOperands(CC); | |||
4724 | std::swap(LHS, RHS); | |||
4725 | } | |||
4726 | ||||
4727 | // Thumb1 has very limited immediate modes, so turning an "and" into a | |||
4728 | // shift can save multiple instructions. | |||
4729 | // | |||
4730 | // If we have (x & C1), and C1 is an appropriate mask, we can transform it | |||
4731 | // into "((x << n) >> n)". But that isn't necessarily profitable on its | |||
4732 | // own. If it's the operand to an unsigned comparison with an immediate, | |||
4733 | // we can eliminate one of the shifts: we transform | |||
4734 | // "((x << n) >> n) == C2" to "(x << n) == (C2 << n)". | |||
4735 | // | |||
4736 | // We avoid transforming cases which aren't profitable due to encoding | |||
4737 | // details: | |||
4738 | // | |||
4739 | // 1. C2 fits into the immediate field of a cmp, and the transformed version | |||
4740 | // would not; in that case, we're essentially trading one immediate load for | |||
4741 | // another. | |||
4742 | // 2. C1 is 255 or 65535, so we can use uxtb or uxth. | |||
4743 | // 3. C2 is zero; we have other code for this special case. | |||
4744 | // | |||
4745 | // FIXME: Figure out profitability for Thumb2; we usually can't save an | |||
4746 | // instruction, since the AND is always one instruction anyway, but we could | |||
4747 | // use narrow instructions in some cases. | |||
4748 | if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::AND && | |||
4749 | LHS->hasOneUse() && isa<ConstantSDNode>(LHS.getOperand(1)) && | |||
4750 | LHS.getValueType() == MVT::i32 && isa<ConstantSDNode>(RHS) && | |||
4751 | !isSignedIntSetCC(CC)) { | |||
4752 | unsigned Mask = cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue(); | |||
4753 | auto *RHSC = cast<ConstantSDNode>(RHS.getNode()); | |||
4754 | uint64_t RHSV = RHSC->getZExtValue(); | |||
4755 | if (isMask_32(Mask) && (RHSV & ~Mask) == 0 && Mask != 255 && Mask != 65535) { | |||
4756 | unsigned ShiftBits = countLeadingZeros(Mask); | |||
4757 | if (RHSV && (RHSV > 255 || (RHSV << ShiftBits) <= 255)) { | |||
4758 | SDValue ShiftAmt = DAG.getConstant(ShiftBits, dl, MVT::i32); | |||
4759 | LHS = DAG.getNode(ISD::SHL, dl, MVT::i32, LHS.getOperand(0), ShiftAmt); | |||
4760 | RHS = DAG.getConstant(RHSV << ShiftBits, dl, MVT::i32); | |||
4761 | } | |||
4762 | } | |||
4763 | } | |||
4764 | ||||
4765 | // The specific comparison "(x<<c) > 0x80000000U" can be optimized to a | |||
4766 | // single "lsls x, c+1". The shift sets the "C" and "Z" flags the same | |||
4767 | // way a cmp would. | |||
4768 | // FIXME: Add support for ARM/Thumb2; this would need isel patterns, and | |||
4769 | // some tweaks to the heuristics for the previous and->shift transform. | |||
4770 | // FIXME: Optimize cases where the LHS isn't a shift. | |||
4771 | if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::SHL && | |||
4772 | isa<ConstantSDNode>(RHS) && | |||
4773 | cast<ConstantSDNode>(RHS)->getZExtValue() == 0x80000000U && | |||
4774 | CC == ISD::SETUGT && isa<ConstantSDNode>(LHS.getOperand(1)) && | |||
4775 | cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() < 31) { | |||
4776 | unsigned ShiftAmt = | |||
4777 | cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() + 1; | |||
4778 | SDValue Shift = DAG.getNode(ARMISD::LSLS, dl, | |||
4779 | DAG.getVTList(MVT::i32, MVT::i32), | |||
4780 | LHS.getOperand(0), | |||
4781 | DAG.getConstant(ShiftAmt, dl, MVT::i32)); | |||
4782 | SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR, | |||
4783 | Shift.getValue(1), SDValue()); | |||
4784 | ARMcc = DAG.getConstant(ARMCC::HI, dl, MVT::i32); | |||
4785 | return Chain.getValue(1); | |||
4786 | } | |||
4787 | ||||
4788 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); | |||
4789 | ||||
4790 | // If the RHS is a constant zero then the V (overflow) flag will never be | |||
4791 | // set. This can allow us to simplify GE to PL or LT to MI, which can be | |||
4792 | // simpler for other passes (like the peephole optimiser) to deal with. | |||
4793 | if (isNullConstant(RHS)) { | |||
4794 | switch (CondCode) { | |||
4795 | default: break; | |||
4796 | case ARMCC::GE: | |||
4797 | CondCode = ARMCC::PL; | |||
4798 | break; | |||
4799 | case ARMCC::LT: | |||
4800 | CondCode = ARMCC::MI; | |||
4801 | break; | |||
4802 | } | |||
4803 | } | |||
4804 | ||||
4805 | ARMISD::NodeType CompareType; | |||
4806 | switch (CondCode) { | |||
4807 | default: | |||
4808 | CompareType = ARMISD::CMP; | |||
4809 | break; | |||
4810 | case ARMCC::EQ: | |||
4811 | case ARMCC::NE: | |||
4812 | // Uses only Z Flag | |||
4813 | CompareType = ARMISD::CMPZ; | |||
4814 | break; | |||
4815 | } | |||
4816 | ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); | |||
4817 | return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); | |||
4818 | } | |||
4819 | ||||
4820 | /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. | |||
4821 | SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, | |||
4822 | SelectionDAG &DAG, const SDLoc &dl, | |||
4823 | bool Signaling) const { | |||
4824 | assert(Subtarget->hasFP64() || RHS.getValueType() != MVT::f64)(static_cast <bool> (Subtarget->hasFP64() || RHS.getValueType () != MVT::f64) ? void (0) : __assert_fail ("Subtarget->hasFP64() || RHS.getValueType() != MVT::f64" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 4824, __extension__ __PRETTY_FUNCTION__)); | |||
4825 | SDValue Cmp; | |||
4826 | if (!isFloatingPointZero(RHS)) | |||
4827 | Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPE : ARMISD::CMPFP, | |||
4828 | dl, MVT::Glue, LHS, RHS); | |||
4829 | else | |||
4830 | Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPEw0 : ARMISD::CMPFPw0, | |||
4831 | dl, MVT::Glue, LHS); | |||
4832 | return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); | |||
4833 | } | |||
4834 | ||||
4835 | /// duplicateCmp - Glue values can have only one use, so this function | |||
4836 | /// duplicates a comparison node. | |||
4837 | SDValue | |||
4838 | ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { | |||
4839 | unsigned Opc = Cmp.getOpcode(); | |||
4840 | SDLoc DL(Cmp); | |||
4841 | if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) | |||
4842 | return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); | |||
4843 | ||||
4844 | assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation")(static_cast <bool> (Opc == ARMISD::FMSTAT && "unexpected comparison operation" ) ? void (0) : __assert_fail ("Opc == ARMISD::FMSTAT && \"unexpected comparison operation\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 4844, __extension__ __PRETTY_FUNCTION__)); | |||
4845 | Cmp = Cmp.getOperand(0); | |||
4846 | Opc = Cmp.getOpcode(); | |||
4847 | if (Opc == ARMISD::CMPFP) | |||
4848 | Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); | |||
4849 | else { | |||
4850 | assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT")(static_cast <bool> (Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT" ) ? void (0) : __assert_fail ("Opc == ARMISD::CMPFPw0 && \"unexpected operand of FMSTAT\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 4850, __extension__ __PRETTY_FUNCTION__)); | |||
4851 | Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); | |||
4852 | } | |||
4853 | return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); | |||
4854 | } | |||
4855 | ||||
4856 | // This function returns three things: the arithmetic computation itself | |||
4857 | // (Value), a comparison (OverflowCmp), and a condition code (ARMcc). The | |||
4858 | // comparison and the condition code define the case in which the arithmetic | |||
4859 | // computation *does not* overflow. | |||
4860 | std::pair<SDValue, SDValue> | |||
4861 | ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG, | |||
4862 | SDValue &ARMcc) const { | |||
4863 | assert(Op.getValueType() == MVT::i32 && "Unsupported value type")(static_cast <bool> (Op.getValueType() == MVT::i32 && "Unsupported value type") ? void (0) : __assert_fail ("Op.getValueType() == MVT::i32 && \"Unsupported value type\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 4863, __extension__ __PRETTY_FUNCTION__)); | |||
4864 | ||||
4865 | SDValue Value, OverflowCmp; | |||
4866 | SDValue LHS = Op.getOperand(0); | |||
4867 | SDValue RHS = Op.getOperand(1); | |||
4868 | SDLoc dl(Op); | |||
4869 | ||||
4870 | // FIXME: We are currently always generating CMPs because we don't support | |||
4871 | // generating CMN through the backend. This is not as good as the natural | |||
4872 | // CMP case because it causes a register dependency and cannot be folded | |||
4873 | // later. | |||
4874 | ||||
4875 | switch (Op.getOpcode()) { | |||
4876 | default: | |||
4877 | llvm_unreachable("Unknown overflow instruction!")::llvm::llvm_unreachable_internal("Unknown overflow instruction!" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 4877); | |||
4878 | case ISD::SADDO: | |||
4879 | ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); | |||
4880 | Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS); | |||
4881 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); | |||
4882 | break; | |||
4883 | case ISD::UADDO: | |||
4884 | ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); | |||
4885 | // We use ADDC here to correspond to its use in LowerUnsignedALUO. | |||
4886 | // We do not use it in the USUBO case as Value may not be used. | |||
4887 | Value = DAG.getNode(ARMISD::ADDC, dl, | |||
4888 | DAG.getVTList(Op.getValueType(), MVT::i32), LHS, RHS) | |||
4889 | .getValue(0); | |||
4890 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); | |||
4891 | break; | |||
4892 | case ISD::SSUBO: | |||
4893 | ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); | |||
4894 | Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); | |||
4895 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); | |||
4896 | break; | |||
4897 | case ISD::USUBO: | |||
4898 | ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); | |||
4899 | Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); | |||
4900 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); | |||
4901 | break; | |||
4902 | case ISD::UMULO: | |||
4903 | // We generate a UMUL_LOHI and then check if the high word is 0. | |||
4904 | ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32); | |||
4905 | Value = DAG.getNode(ISD::UMUL_LOHI, dl, | |||
4906 | DAG.getVTList(Op.getValueType(), Op.getValueType()), | |||
4907 | LHS, RHS); | |||
4908 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1), | |||
4909 | DAG.getConstant(0, dl, MVT::i32)); | |||
4910 | Value = Value.getValue(0); // We only want the low 32 bits for the result. | |||
4911 | break; | |||
4912 | case ISD::SMULO: | |||
4913 | // We generate a SMUL_LOHI and then check if all the bits of the high word | |||
4914 | // are the same as the sign bit of the low word. | |||
4915 | ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32); | |||
4916 | Value = DAG.getNode(ISD::SMUL_LOHI, dl, | |||
4917 | DAG.getVTList(Op.getValueType(), Op.getValueType()), | |||
4918 | LHS, RHS); | |||
4919 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1), | |||
4920 | DAG.getNode(ISD::SRA, dl, Op.getValueType(), | |||
4921 | Value.getValue(0), | |||
4922 | DAG.getConstant(31, dl, MVT::i32))); | |||
4923 | Value = Value.getValue(0); // We only want the low 32 bits for the result. | |||
4924 | break; | |||
4925 | } // switch (...) | |||
4926 | ||||
4927 | return std::make_pair(Value, OverflowCmp); | |||
4928 | } | |||
4929 | ||||
4930 | SDValue | |||
4931 | ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const { | |||
4932 | // Let legalize expand this if it isn't a legal type yet. | |||
4933 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) | |||
4934 | return SDValue(); | |||
4935 | ||||
4936 | SDValue Value, OverflowCmp; | |||
4937 | SDValue ARMcc; | |||
4938 | std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc); | |||
4939 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); | |||
4940 | SDLoc dl(Op); | |||
4941 | // We use 0 and 1 as false and true values. | |||
4942 | SDValue TVal = DAG.getConstant(1, dl, MVT::i32); | |||
4943 | SDValue FVal = DAG.getConstant(0, dl, MVT::i32); | |||
4944 | EVT VT = Op.getValueType(); | |||
4945 | ||||
4946 | SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal, | |||
4947 | ARMcc, CCR, OverflowCmp); | |||
4948 | ||||
4949 | SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); | |||
4950 | return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); | |||
4951 | } | |||
4952 | ||||
4953 | static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry, | |||
4954 | SelectionDAG &DAG) { | |||
4955 | SDLoc DL(BoolCarry); | |||
4956 | EVT CarryVT = BoolCarry.getValueType(); | |||
4957 | ||||
4958 | // This converts the boolean value carry into the carry flag by doing | |||
4959 | // ARMISD::SUBC Carry, 1 | |||
4960 | SDValue Carry = DAG.getNode(ARMISD::SUBC, DL, | |||
4961 | DAG.getVTList(CarryVT, MVT::i32), | |||
4962 | BoolCarry, DAG.getConstant(1, DL, CarryVT)); | |||
4963 | return Carry.getValue(1); | |||
4964 | } | |||
4965 | ||||
4966 | static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT, | |||
4967 | SelectionDAG &DAG) { | |||
4968 | SDLoc DL(Flags); | |||
4969 | ||||
4970 | // Now convert the carry flag into a boolean carry. We do this | |||
4971 | // using ARMISD:ADDE 0, 0, Carry | |||
4972 | return DAG.getNode(ARMISD::ADDE, DL, DAG.getVTList(VT, MVT::i32), | |||
4973 | DAG.getConstant(0, DL, MVT::i32), | |||
4974 | DAG.getConstant(0, DL, MVT::i32), Flags); | |||
4975 | } | |||
4976 | ||||
4977 | SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op, | |||
4978 | SelectionDAG &DAG) const { | |||
4979 | // Let legalize expand this if it isn't a legal type yet. | |||
4980 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) | |||
4981 | return SDValue(); | |||
4982 | ||||
4983 | SDValue LHS = Op.getOperand(0); | |||
4984 | SDValue RHS = Op.getOperand(1); | |||
4985 | SDLoc dl(Op); | |||
4986 | ||||
4987 | EVT VT = Op.getValueType(); | |||
4988 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); | |||
4989 | SDValue Value; | |||
4990 | SDValue Overflow; | |||
4991 | switch (Op.getOpcode()) { | |||
4992 | default: | |||
4993 | llvm_unreachable("Unknown overflow instruction!")::llvm::llvm_unreachable_internal("Unknown overflow instruction!" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 4993); | |||
4994 | case ISD::UADDO: | |||
4995 | Value = DAG.getNode(ARMISD::ADDC, dl, VTs, LHS, RHS); | |||
4996 | // Convert the carry flag into a boolean value. | |||
4997 | Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG); | |||
4998 | break; | |||
4999 | case ISD::USUBO: { | |||
5000 | Value = DAG.getNode(ARMISD::SUBC, dl, VTs, LHS, RHS); | |||
5001 | // Convert the carry flag into a boolean value. | |||
5002 | Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG); | |||
5003 | // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow | |||
5004 | // value. So compute 1 - C. | |||
5005 | Overflow = DAG.getNode(ISD::SUB, dl, MVT::i32, | |||
5006 | DAG.getConstant(1, dl, MVT::i32), Overflow); | |||
5007 | break; | |||
5008 | } | |||
5009 | } | |||
5010 | ||||
5011 | return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); | |||
5012 | } | |||
5013 | ||||
5014 | static SDValue LowerADDSUBSAT(SDValue Op, SelectionDAG &DAG, | |||
5015 | const ARMSubtarget *Subtarget) { | |||
5016 | EVT VT = Op.getValueType(); | |||
5017 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) | |||
5018 | return SDValue(); | |||
5019 | if (!VT.isSimple()) | |||
5020 | return SDValue(); | |||
5021 | ||||
5022 | unsigned NewOpcode; | |||
5023 | switch (VT.getSimpleVT().SimpleTy) { | |||
5024 | default: | |||
5025 | return SDValue(); | |||
5026 | case MVT::i8: | |||
5027 | switch (Op->getOpcode()) { | |||
5028 | case ISD::UADDSAT: | |||
5029 | NewOpcode = ARMISD::UQADD8b; | |||
5030 | break; | |||
5031 | case ISD::SADDSAT: | |||
5032 | NewOpcode = ARMISD::QADD8b; | |||
5033 | break; | |||
5034 | case ISD::USUBSAT: | |||
5035 | NewOpcode = ARMISD::UQSUB8b; | |||
5036 | break; | |||
5037 | case ISD::SSUBSAT: | |||
5038 | NewOpcode = ARMISD::QSUB8b; | |||
5039 | break; | |||
5040 | } | |||
5041 | break; | |||
5042 | case MVT::i16: | |||
5043 | switch (Op->getOpcode()) { | |||
5044 | case ISD::UADDSAT: | |||
5045 | NewOpcode = ARMISD::UQADD16b; | |||
5046 | break; | |||
5047 | case ISD::SADDSAT: | |||
5048 | NewOpcode = ARMISD::QADD16b; | |||
5049 | break; | |||
5050 | case ISD::USUBSAT: | |||
5051 | NewOpcode = ARMISD::UQSUB16b; | |||
5052 | break; | |||
5053 | case ISD::SSUBSAT: | |||
5054 | NewOpcode = ARMISD::QSUB16b; | |||
5055 | break; | |||
5056 | } | |||
5057 | break; | |||
5058 | } | |||
5059 | ||||
5060 | SDLoc dl(Op); | |||
5061 | SDValue Add = | |||
5062 | DAG.getNode(NewOpcode, dl, MVT::i32, | |||
| ||||
5063 | DAG.getSExtOrTrunc(Op->getOperand(0), dl, MVT::i32), | |||
5064 | DAG.getSExtOrTrunc(Op->getOperand(1), dl, MVT::i32)); | |||
5065 | return DAG.getNode(ISD::TRUNCATE, dl, VT, Add); | |||
5066 | } | |||
5067 | ||||
5068 | SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { | |||
5069 | SDValue Cond = Op.getOperand(0); | |||
5070 | SDValue SelectTrue = Op.getOperand(1); | |||
5071 | SDValue SelectFalse = Op.getOperand(2); | |||
5072 | SDLoc dl(Op); | |||
5073 | unsigned Opc = Cond.getOpcode(); | |||
5074 | ||||
5075 | if (Cond.getResNo() == 1 && | |||
5076 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || | |||
5077 | Opc == ISD::USUBO)) { | |||
5078 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) | |||
5079 | return SDValue(); | |||
5080 | ||||
5081 | SDValue Value, OverflowCmp; | |||
5082 | SDValue ARMcc; | |||
5083 | std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); | |||
5084 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); | |||
5085 | EVT VT = Op.getValueType(); | |||
5086 | ||||
5087 | return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR, | |||
5088 | OverflowCmp, DAG); | |||
5089 | } | |||
5090 | ||||
5091 | // Convert: | |||
5092 | // | |||
5093 | // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) | |||
5094 | // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) | |||
5095 | // | |||
5096 | if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { | |||
5097 | const ConstantSDNode *CMOVTrue = | |||
5098 | dyn_cast<ConstantSDNode>(Cond.getOperand(0)); | |||
5099 | const ConstantSDNode *CMOVFalse = | |||
5100 | dyn_cast<ConstantSDNode>(Cond.getOperand(1)); | |||
5101 | ||||
5102 | if (CMOVTrue && CMOVFalse) { | |||
5103 | unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); | |||
5104 | unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); | |||
5105 | ||||
5106 | SDValue True; | |||
5107 | SDValue False; | |||
5108 | if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { | |||
5109 | True = SelectTrue; | |||
5110 | False = SelectFalse; | |||
5111 | } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { | |||
5112 | True = SelectFalse; | |||
5113 | False = SelectTrue; | |||
5114 | } | |||
5115 | ||||
5116 | if (True.getNode() && False.getNode()) { | |||
5117 | EVT VT = Op.getValueType(); | |||
5118 | SDValue ARMcc = Cond.getOperand(2); | |||
5119 | SDValue CCR = Cond.getOperand(3); | |||
5120 | SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); | |||
5121 | assert(True.getValueType() == VT)(static_cast <bool> (True.getValueType() == VT) ? void ( 0) : __assert_fail ("True.getValueType() == VT", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 5121, __extension__ __PRETTY_FUNCTION__)); | |||
5122 | return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG); | |||
5123 | } | |||
5124 | } | |||
5125 | } | |||
5126 | ||||
5127 | // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the | |||
5128 | // undefined bits before doing a full-word comparison with zero. | |||
5129 | Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond, | |||
5130 | DAG.getConstant(1, dl, Cond.getValueType())); | |||
5131 | ||||
5132 | return DAG.getSelectCC(dl, Cond, | |||
5133 | DAG.getConstant(0, dl, Cond.getValueType()), | |||
5134 | SelectTrue, SelectFalse, ISD::SETNE); | |||
5135 | } | |||
5136 | ||||
5137 | static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, | |||
5138 | bool &swpCmpOps, bool &swpVselOps) { | |||
5139 | // Start by selecting the GE condition code for opcodes that return true for | |||
5140 | // 'equality' | |||
5141 | if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE || | |||
5142 | CC == ISD::SETULE || CC == ISD::SETGE || CC == ISD::SETLE) | |||
5143 | CondCode = ARMCC::GE; | |||
5144 | ||||
5145 | // and GT for opcodes that return false for 'equality'. | |||
5146 | else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT || | |||
5147 | CC == ISD::SETULT || CC == ISD::SETGT || CC == ISD::SETLT) | |||
5148 | CondCode = ARMCC::GT; | |||
5149 | ||||
5150 | // Since we are constrained to GE/GT, if the opcode contains 'less', we need | |||
5151 | // to swap the compare operands. | |||
5152 | if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT || | |||
5153 | CC == ISD::SETULT || CC == ISD::SETLE || CC == ISD::SETLT) | |||
5154 | swpCmpOps = true; | |||
5155 | ||||
5156 | // Both GT and GE are ordered comparisons, and return false for 'unordered'. | |||
5157 | // If we have an unordered opcode, we need to swap the operands to the VSEL | |||
5158 | // instruction (effectively negating the condition). | |||
5159 | // | |||
5160 | // This also has the effect of swapping which one of 'less' or 'greater' | |||
5161 | // returns true, so we also swap the compare operands. It also switches | |||
5162 | // whether we return true for 'equality', so we compensate by picking the | |||
5163 | // opposite condition code to our original choice. | |||
5164 | if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE || | |||
5165 | CC == ISD::SETUGT) { | |||
5166 | swpCmpOps = !swpCmpOps; | |||
5167 | swpVselOps = !swpVselOps; | |||
5168 | CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT; | |||
5169 | } | |||
5170 | ||||
5171 | // 'ordered' is 'anything but unordered', so use the VS condition code and | |||
5172 | // swap the VSEL operands. | |||
5173 | if (CC == ISD::SETO) { | |||
5174 | CondCode = ARMCC::VS; | |||
5175 | swpVselOps = true; | |||
5176 | } | |||
5177 | ||||
5178 | // 'unordered or not equal' is 'anything but equal', so use the EQ condition | |||
5179 | // code and swap the VSEL operands. Also do this if we don't care about the | |||
5180 | // unordered case. | |||
5181 | if (CC == ISD::SETUNE || CC == ISD::SETNE) { | |||
5182 | CondCode = ARMCC::EQ; | |||
5183 | swpVselOps = true; | |||
5184 | } | |||
5185 | } | |||
5186 | ||||
5187 | SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, | |||
5188 | SDValue TrueVal, SDValue ARMcc, SDValue CCR, | |||
5189 | SDValue Cmp, SelectionDAG &DAG) const { | |||
5190 | if (!Subtarget->hasFP64() && VT == MVT::f64) { | |||
5191 | FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl, | |||
5192 | DAG.getVTList(MVT::i32, MVT::i32), FalseVal); | |||
5193 | TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl, | |||
5194 | DAG.getVTList(MVT::i32, MVT::i32), TrueVal); | |||
5195 | ||||
5196 | SDValue TrueLow = TrueVal.getValue(0); | |||
5197 | SDValue TrueHigh = TrueVal.getValue(1); | |||
5198 | SDValue FalseLow = FalseVal.getValue(0); | |||
5199 | SDValue FalseHigh = FalseVal.getValue(1); | |||
5200 | ||||
5201 | SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow, | |||
5202 | ARMcc, CCR, Cmp); | |||
5203 | SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh, | |||
5204 | ARMcc, CCR, duplicateCmp(Cmp, DAG)); | |||
5205 | ||||
5206 | return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High); | |||
5207 | } else { | |||
5208 | return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR, | |||
5209 | Cmp); | |||
5210 | } | |||
5211 | } | |||
5212 | ||||
5213 | static bool isGTorGE(ISD::CondCode CC) { | |||
5214 | return CC == ISD::SETGT || CC == ISD::SETGE; | |||
5215 | } | |||
5216 | ||||
5217 | static bool isLTorLE(ISD::CondCode CC) { | |||
5218 | return CC == ISD::SETLT || CC == ISD::SETLE; | |||
5219 | } | |||
5220 | ||||
5221 | // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating. | |||
5222 | // All of these conditions (and their <= and >= counterparts) will do: | |||
5223 | // x < k ? k : x | |||
5224 | // x > k ? x : k | |||
5225 | // k < x ? x : k | |||
5226 | // k > x ? k : x | |||
5227 | static bool isLowerSaturate(const SDValue LHS, const SDValue RHS, | |||
5228 | const SDValue TrueVal, const SDValue FalseVal, | |||
5229 | const ISD::CondCode CC, const SDValue K) { | |||
5230 | return (isGTorGE(CC) && | |||
5231 | ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) || | |||
5232 | (isLTorLE(CC) && | |||
5233 | ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))); | |||
5234 | } | |||
5235 | ||||
5236 | // Check if two chained conditionals could be converted into SSAT or USAT. | |||
5237 | // | |||
5238 | // SSAT can replace a set of two conditional selectors that bound a number to an | |||
5239 | // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples: | |||
5240 | // | |||
5241 | // x < -k ? -k : (x > k ? k : x) | |||
5242 | // x < -k ? -k : (x < k ? x : k) | |||
5243 | // x > -k ? (x > k ? k : x) : -k | |||
5244 | // x < k ? (x < -k ? -k : x) : k | |||
5245 | // etc. | |||
5246 | // | |||
5247 | // LLVM canonicalizes these to either a min(max()) or a max(min()) | |||
5248 | // pattern. This function tries to match one of these and will return a SSAT | |||
5249 | // node if successful. | |||
5250 | // | |||
5251 | // USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1 | |||
5252 | // is a power of 2. | |||
5253 | static SDValue LowerSaturatingConditional(SDValue Op, SelectionDAG &DAG) { | |||
5254 | EVT VT = Op.getValueType(); | |||
5255 | SDValue V1 = Op.getOperand(0); | |||
5256 | SDValue K1 = Op.getOperand(1); | |||
5257 | SDValue TrueVal1 = Op.getOperand(2); | |||
5258 | SDValue FalseVal1 = Op.getOperand(3); | |||
5259 | ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get(); | |||
5260 | ||||
5261 | const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1; | |||
5262 | if (Op2.getOpcode() != ISD::SELECT_CC) | |||
5263 | return SDValue(); | |||
5264 | ||||
5265 | SDValue V2 = Op2.getOperand(0); | |||
5266 | SDValue K2 = Op2.getOperand(1); | |||
5267 | SDValue TrueVal2 = Op2.getOperand(2); | |||
5268 | SDValue FalseVal2 = Op2.getOperand(3); | |||
5269 | ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get(); | |||
5270 | ||||
5271 | SDValue V1Tmp = V1; | |||
5272 | SDValue V2Tmp = V2; | |||
5273 | ||||
5274 | // Check that the registers and the constants match a max(min()) or min(max()) | |||
5275 | // pattern | |||
5276 | if (V1Tmp != TrueVal1 || V2Tmp != TrueVal2 || K1 != FalseVal1 || | |||
5277 | K2 != FalseVal2 || | |||
5278 | !((isGTorGE(CC1) && isLTorLE(CC2)) || (isLTorLE(CC1) && isGTorGE(CC2)))) | |||
5279 | return SDValue(); | |||
5280 | ||||
5281 | // Check that the constant in the lower-bound check is | |||
5282 | // the opposite of the constant in the upper-bound check | |||
5283 | // in 1's complement. | |||
5284 | if (!isa<ConstantSDNode>(K1) || !isa<ConstantSDNode>(K2)) | |||
5285 | return SDValue(); | |||
5286 | ||||
5287 | int64_t Val1 = cast<ConstantSDNode>(K1)->getSExtValue(); | |||
5288 | int64_t Val2 = cast<ConstantSDNode>(K2)->getSExtValue(); | |||
5289 | int64_t PosVal = std::max(Val1, Val2); | |||
5290 | int64_t NegVal = std::min(Val1, Val2); | |||
5291 | ||||
5292 | if (!((Val1 > Val2 && isLTorLE(CC1)) || (Val1 < Val2 && isLTorLE(CC2))) || | |||
5293 | !isPowerOf2_64(PosVal + 1)) | |||
5294 | return SDValue(); | |||
5295 | ||||
5296 | // Handle the difference between USAT (unsigned) and SSAT (signed) | |||
5297 | // saturation | |||
5298 | // At this point, PosVal is guaranteed to be positive | |||
5299 | uint64_t K = PosVal; | |||
5300 | SDLoc dl(Op); | |||
5301 | if (Val1 == ~Val2) | |||
5302 | return DAG.getNode(ARMISD::SSAT, dl, VT, V2Tmp, | |||
5303 | DAG.getConstant(countTrailingOnes(K), dl, VT)); | |||
5304 | if (NegVal == 0) | |||
5305 | return DAG.getNode(ARMISD::USAT, dl, VT, V2Tmp, | |||
5306 | DAG.getConstant(countTrailingOnes(K), dl, VT)); | |||
5307 | ||||
5308 | return SDValue(); | |||
5309 | } | |||
5310 | ||||
5311 | // Check if a condition of the type x < k ? k : x can be converted into a | |||
5312 | // bit operation instead of conditional moves. | |||
5313 | // Currently this is allowed given: | |||
5314 | // - The conditions and values match up | |||
5315 | // - k is 0 or -1 (all ones) | |||
5316 | // This function will not check the last condition, thats up to the caller | |||
5317 | // It returns true if the transformation can be made, and in such case | |||
5318 | // returns x in V, and k in SatK. | |||
5319 | static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V, | |||
5320 | SDValue &SatK) | |||
5321 | { | |||
5322 | SDValue LHS = Op.getOperand(0); | |||
5323 | SDValue RHS = Op.getOperand(1); | |||
5324 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); | |||
5325 | SDValue TrueVal = Op.getOperand(2); | |||
5326 | SDValue FalseVal = Op.getOperand(3); | |||
5327 | ||||
5328 | SDValue *K = isa<ConstantSDNode>(LHS) ? &LHS : isa<ConstantSDNode>(RHS) | |||
5329 | ? &RHS | |||
5330 | : nullptr; | |||
5331 | ||||
5332 | // No constant operation in comparison, early out | |||
5333 | if (!K) | |||
5334 | return false; | |||
5335 | ||||
5336 | SDValue KTmp = isa<ConstantSDNode>(TrueVal) ? TrueVal : FalseVal; | |||
5337 | V = (KTmp == TrueVal) ? FalseVal : TrueVal; | |||
5338 | SDValue VTmp = (K && *K == LHS) ? RHS : LHS; | |||
5339 | ||||
5340 | // If the constant on left and right side, or variable on left and right, | |||
5341 | // does not match, early out | |||
5342 | if (*K != KTmp || V != VTmp) | |||
5343 | return false; | |||
5344 | ||||
5345 | if (isLowerSaturate(LHS, RHS, TrueVal, FalseVal, CC, *K)) { | |||
5346 | SatK = *K; | |||
5347 | return true; | |||
5348 | } | |||
5349 | ||||
5350 | return false; | |||
5351 | } | |||
5352 | ||||
5353 | bool ARMTargetLowering::isUnsupportedFloatingType(EVT VT) const { | |||
5354 | if (VT == MVT::f32) | |||
5355 | return !Subtarget->hasVFP2Base(); | |||
5356 | if (VT == MVT::f64) | |||
5357 | return !Subtarget->hasFP64(); | |||
5358 | if (VT == MVT::f16) | |||
5359 | return !Subtarget->hasFullFP16(); | |||
5360 | return false; | |||
5361 | } | |||
5362 | ||||
5363 | SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { | |||
5364 | EVT VT = Op.getValueType(); | |||
5365 | SDLoc dl(Op); | |||
5366 | ||||
5367 | // Try to convert two saturating conditional selects into a single SSAT | |||
5368 | if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) | |||
5369 | if (SDValue SatValue = LowerSaturatingConditional(Op, DAG)) | |||
5370 | return SatValue; | |||
5371 | ||||
5372 | // Try to convert expressions of the form x < k ? k : x (and similar forms) | |||
5373 | // into more efficient bit operations, which is possible when k is 0 or -1 | |||
5374 | // On ARM and Thumb-2 which have flexible operand 2 this will result in | |||
5375 | // single instructions. On Thumb the shift and the bit operation will be two | |||
5376 | // instructions. | |||
5377 | // Only allow this transformation on full-width (32-bit) operations | |||
5378 | SDValue LowerSatConstant; | |||
5379 | SDValue SatValue; | |||
5380 | if (VT == MVT::i32 && | |||
5381 | isLowerSaturatingConditional(Op, SatValue, LowerSatConstant)) { | |||
5382 | SDValue ShiftV = DAG.getNode(ISD::SRA, dl, VT, SatValue, | |||
5383 | DAG.getConstant(31, dl, VT)); | |||
5384 | if (isNullConstant(LowerSatConstant)) { | |||
5385 | SDValue NotShiftV = DAG.getNode(ISD::XOR, dl, VT, ShiftV, | |||
5386 | DAG.getAllOnesConstant(dl, VT)); | |||
5387 | return DAG.getNode(ISD::AND, dl, VT, SatValue, NotShiftV); | |||
5388 | } else if (isAllOnesConstant(LowerSatConstant)) | |||
5389 | return DAG.getNode(ISD::OR, dl, VT, SatValue, ShiftV); | |||
5390 | } | |||
5391 | ||||
5392 | SDValue LHS = Op.getOperand(0); | |||
5393 | SDValue RHS = Op.getOperand(1); | |||
5394 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); | |||
5395 | SDValue TrueVal = Op.getOperand(2); | |||
5396 | SDValue FalseVal = Op.getOperand(3); | |||
5397 | ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FalseVal); | |||
5398 | ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TrueVal); | |||
5399 | ||||
5400 | if (Subtarget->hasV8_1MMainlineOps() && CFVal && CTVal && | |||
5401 | LHS.getValueType() == MVT::i32 && RHS.getValueType() == MVT::i32) { | |||
5402 | unsigned TVal = CTVal->getZExtValue(); | |||
5403 | unsigned FVal = CFVal->getZExtValue(); | |||
5404 | unsigned Opcode = 0; | |||
5405 | ||||
5406 | if (TVal == ~FVal) { | |||
5407 | Opcode = ARMISD::CSINV; | |||
5408 | } else if (TVal == ~FVal + 1) { | |||
5409 | Opcode = ARMISD::CSNEG; | |||
5410 | } else if (TVal + 1 == FVal) { | |||
5411 | Opcode = ARMISD::CSINC; | |||
5412 | } else if (TVal == FVal + 1) { | |||
5413 | Opcode = ARMISD::CSINC; | |||
5414 | std::swap(TrueVal, FalseVal); | |||
5415 | std::swap(TVal, FVal); | |||
5416 | CC = ISD::getSetCCInverse(CC, LHS.getValueType()); | |||
5417 | } | |||
5418 | ||||
5419 | if (Opcode) { | |||
5420 | // If one of the constants is cheaper than another, materialise the | |||
5421 | // cheaper one and let the csel generate the other. | |||
5422 | if (Opcode != ARMISD::CSINC && | |||
5423 | HasLowerConstantMaterializationCost(FVal, TVal, Subtarget)) { | |||
5424 | std::swap(TrueVal, FalseVal); | |||
5425 | std::swap(TVal, FVal); | |||
5426 | CC = ISD::getSetCCInverse(CC, LHS.getValueType()); | |||
5427 | } | |||
5428 | ||||
5429 | // Attempt to use ZR checking TVal is 0, possibly inverting the condition | |||
5430 | // to get there. CSINC not is invertable like the other two (~(~a) == a, | |||
5431 | // -(-a) == a, but (a+1)+1 != a). | |||
5432 | if (FVal == 0 && Opcode != ARMISD::CSINC) { | |||
5433 | std::swap(TrueVal, FalseVal); | |||
5434 | std::swap(TVal, FVal); | |||
5435 | CC = ISD::getSetCCInverse(CC, LHS.getValueType()); | |||
5436 | } | |||
5437 | ||||
5438 | // Drops F's value because we can get it by inverting/negating TVal. | |||
5439 | FalseVal = TrueVal; | |||
5440 | ||||
5441 | SDValue ARMcc; | |||
5442 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); | |||
5443 | EVT VT = TrueVal.getValueType(); | |||
5444 | return DAG.getNode(Opcode, dl, VT, TrueVal, FalseVal, ARMcc, Cmp); | |||
5445 | } | |||
5446 | } | |||
5447 | ||||
5448 | if (isUnsupportedFloatingType(LHS.getValueType())) { | |||
5449 | DAG.getTargetLoweringInfo().softenSetCCOperands( | |||
5450 | DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS); | |||
5451 | ||||
5452 | // If softenSetCCOperands only returned one value, we should compare it to | |||
5453 | // zero. | |||
5454 | if (!RHS.getNode()) { | |||
5455 | RHS = DAG.getConstant(0, dl, LHS.getValueType()); | |||
5456 | CC = ISD::SETNE; | |||
5457 | } | |||
5458 | } | |||
5459 | ||||
5460 | if (LHS.getValueType() == MVT::i32) { | |||
5461 | // Try to generate VSEL on ARMv8. | |||
5462 | // The VSEL instruction can't use all the usual ARM condition | |||
5463 | // codes: it only has two bits to select the condition code, so it's | |||
5464 | // constrained to use only GE, GT, VS and EQ. | |||
5465 | // | |||
5466 | // To implement all the various ISD::SETXXX opcodes, we sometimes need to | |||
5467 | // swap the operands of the previous compare instruction (effectively | |||
5468 | // inverting the compare condition, swapping 'less' and 'greater') and | |||
5469 | // sometimes need to swap the operands to the VSEL (which inverts the | |||
5470 | // condition in the sense of firing whenever the previous condition didn't) | |||
5471 | if (Subtarget->hasFPARMv8Base() && (TrueVal.getValueType() == MVT::f16 || | |||
5472 | TrueVal.getValueType() == MVT::f32 || | |||
5473 | TrueVal.getValueType() == MVT::f64)) { | |||
5474 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); | |||
5475 | if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || | |||
5476 | CondCode == ARMCC::VC || CondCode == ARMCC::NE) { | |||
5477 | CC = ISD::getSetCCInverse(CC, LHS.getValueType()); | |||
5478 | std::swap(TrueVal, FalseVal); | |||
5479 | } | |||
5480 | } | |||
5481 | ||||
5482 | SDValue ARMcc; | |||
5483 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); | |||
5484 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); | |||
5485 | // Choose GE over PL, which vsel does now support | |||
5486 | if (cast<ConstantSDNode>(ARMcc)->getZExtValue() == ARMCC::PL) | |||
5487 | ARMcc = DAG.getConstant(ARMCC::GE, dl, MVT::i32); | |||
5488 | return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); | |||
5489 | } | |||
5490 | ||||
5491 | ARMCC::CondCodes CondCode, CondCode2; | |||
5492 | FPCCToARMCC(CC, CondCode, CondCode2); | |||
5493 | ||||
5494 | // Normalize the fp compare. If RHS is zero we prefer to keep it there so we | |||
5495 | // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we | |||
5496 | // must use VSEL (limited condition codes), due to not having conditional f16 | |||
5497 | // moves. | |||
5498 | if (Subtarget->hasFPARMv8Base() && | |||
5499 | !(isFloatingPointZero(RHS) && TrueVal.getValueType() != MVT::f16) && | |||
5500 | (TrueVal.getValueType() == MVT::f16 || | |||
5501 | TrueVal.getValueType() == MVT::f32 || | |||
5502 | TrueVal.getValueType() == MVT::f64)) { | |||
5503 | bool swpCmpOps = false; | |||
5504 | bool swpVselOps = false; | |||
5505 | checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps); | |||
5506 | ||||
5507 | if (CondCode == ARMCC::GT || CondCode == ARMCC::GE || | |||
5508 | CondCode == ARMCC::VS || CondCode == ARMCC::EQ) { | |||
5509 | if (swpCmpOps) | |||
5510 | std::swap(LHS, RHS); | |||
5511 | if (swpVselOps) | |||
5512 | std::swap(TrueVal, FalseVal); | |||
5513 | } | |||
5514 | } | |||
5515 | ||||
5516 | SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); | |||
5517 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); | |||
5518 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); | |||
5519 | SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); | |||
5520 | if (CondCode2 != ARMCC::AL) { | |||
5521 | SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32); | |||
5522 | // FIXME: Needs another CMP because flag can have but one use. | |||
5523 | SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); | |||
5524 | Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG); | |||
5525 | } | |||
5526 | return Result; | |||
5527 | } | |||
5528 | ||||
5529 | /// canChangeToInt - Given the fp compare operand, return true if it is suitable | |||
5530 | /// to morph to an integer compare sequence. | |||
5531 | static bool canChangeToInt(SDValue Op, bool &SeenZero, | |||
5532 | const ARMSubtarget *Subtarget) { | |||
5533 | SDNode *N = Op.getNode(); | |||
5534 | if (!N->hasOneUse()) | |||
5535 | // Otherwise it requires moving the value from fp to integer registers. | |||
5536 | return false; | |||
5537 | if (!N->getNumValues()) | |||
5538 | return false; | |||
5539 | EVT VT = Op.getValueType(); | |||
5540 | if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) | |||
5541 | // f32 case is generally profitable. f64 case only makes sense when vcmpe + | |||
5542 | // vmrs are very slow, e.g. cortex-a8. | |||
5543 | return false; | |||
5544 | ||||
5545 | if (isFloatingPointZero(Op)) { | |||
5546 | SeenZero = true; | |||
5547 | return true; | |||
5548 | } | |||
5549 | return ISD::isNormalLoad(N); | |||
5550 | } | |||
5551 | ||||
5552 | static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { | |||
5553 | if (isFloatingPointZero(Op)) | |||
5554 | return DAG.getConstant(0, SDLoc(Op), MVT::i32); | |||
5555 | ||||
5556 | if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) | |||
5557 | return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(), | |||
5558 | Ld->getPointerInfo(), Ld->getAlignment(), | |||
5559 | Ld->getMemOperand()->getFlags()); | |||
5560 | ||||
5561 | llvm_unreachable("Unknown VFP cmp argument!")::llvm::llvm_unreachable_internal("Unknown VFP cmp argument!" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 5561); | |||
5562 | } | |||
5563 | ||||
5564 | static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, | |||
5565 | SDValue &RetVal1, SDValue &RetVal2) { | |||
5566 | SDLoc dl(Op); | |||
5567 | ||||
5568 | if (isFloatingPointZero(Op)) { | |||
5569 | RetVal1 = DAG.getConstant(0, dl, MVT::i32); | |||
5570 | RetVal2 = DAG.getConstant(0, dl, MVT::i32); | |||
5571 | return; | |||
5572 | } | |||
5573 | ||||
5574 | if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { | |||
5575 | SDValue Ptr = Ld->getBasePtr(); | |||
5576 | RetVal1 = | |||
5577 | DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(), | |||
5578 | Ld->getAlignment(), Ld->getMemOperand()->getFlags()); | |||
5579 | ||||
5580 | EVT PtrType = Ptr.getValueType(); | |||
5581 | unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); | |||
5582 | SDValue NewPtr = DAG.getNode(ISD::ADD, dl, | |||
5583 | PtrType, Ptr, DAG.getConstant(4, dl, PtrType)); | |||
5584 | RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr, | |||
5585 | Ld->getPointerInfo().getWithOffset(4), NewAlign, | |||
5586 | Ld->getMemOperand()->getFlags()); | |||
5587 | return; | |||
5588 | } | |||
5589 | ||||
5590 | llvm_unreachable("Unknown VFP cmp argument!")::llvm::llvm_unreachable_internal("Unknown VFP cmp argument!" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 5590); | |||
5591 | } | |||
5592 | ||||
5593 | /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some | |||
5594 | /// f32 and even f64 comparisons to integer ones. | |||
5595 | SDValue | |||
5596 | ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { | |||
5597 | SDValue Chain = Op.getOperand(0); | |||
5598 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); | |||
5599 | SDValue LHS = Op.getOperand(2); | |||
5600 | SDValue RHS = Op.getOperand(3); | |||
5601 | SDValue Dest = Op.getOperand(4); | |||
5602 | SDLoc dl(Op); | |||
5603 | ||||
5604 | bool LHSSeenZero = false; | |||
5605 | bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget); | |||
5606 | bool RHSSeenZero = false; | |||
5607 | bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget); | |||
5608 | if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { | |||
5609 | // If unsafe fp math optimization is enabled and there are no other uses of | |||
5610 | // the CMP operands, and the condition code is EQ or NE, we can optimize it | |||
5611 | // to an integer comparison. | |||
5612 | if (CC == ISD::SETOEQ) | |||
5613 | CC = ISD::SETEQ; | |||
5614 | else if (CC == ISD::SETUNE) | |||
5615 | CC = ISD::SETNE; | |||
5616 | ||||
5617 | SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32); | |||
5618 | SDValue ARMcc; | |||
5619 | if (LHS.getValueType() == MVT::f32) { | |||
5620 | LHS = DAG.getNode(ISD::AND, dl, MVT::i32, | |||
5621 | bitcastf32Toi32(LHS, DAG), Mask); | |||
5622 | RHS = DAG.getNode(ISD::AND, dl, MVT::i32, | |||
5623 | bitcastf32Toi32(RHS, DAG), Mask); | |||
5624 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); | |||
5625 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); | |||
5626 | return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, | |||
5627 | Chain, Dest, ARMcc, CCR, Cmp); | |||
5628 | } | |||
5629 | ||||
5630 | SDValue LHS1, LHS2; | |||
5631 | SDValue RHS1, RHS2; | |||
5632 | expandf64Toi32(LHS, DAG, LHS1, LHS2); | |||
5633 | expandf64Toi32(RHS, DAG, RHS1, RHS2); | |||
5634 | LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask); | |||
5635 | RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask); | |||
5636 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); | |||
5637 | ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); | |||
5638 | SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); | |||
5639 | SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; | |||
5640 | return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops); | |||
5641 | } | |||
5642 | ||||
5643 | return SDValue(); | |||
5644 | } | |||
5645 | ||||
5646 | SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { | |||
5647 | SDValue Chain = Op.getOperand(0); | |||
5648 | SDValue Cond = Op.getOperand(1); | |||
5649 | SDValue Dest = Op.getOperand(2); | |||
5650 | SDLoc dl(Op); | |||
5651 | ||||
5652 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch | |||
5653 | // instruction. | |||
5654 | unsigned Opc = Cond.getOpcode(); | |||
5655 | bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && | |||
5656 | !Subtarget->isThumb1Only(); | |||
5657 | if (Cond.getResNo() == 1 && | |||
5658 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || | |||
5659 | Opc == ISD::USUBO || OptimizeMul)) { | |||
5660 | // Only lower legal XALUO ops. | |||
5661 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) | |||
5662 | return SDValue(); | |||
5663 | ||||
5664 | // The actual operation with overflow check. | |||
5665 | SDValue Value, OverflowCmp; | |||
5666 | SDValue ARMcc; | |||
5667 | std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); | |||
5668 | ||||
5669 | // Reverse the condition code. | |||
5670 | ARMCC::CondCodes CondCode = | |||
5671 | (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue(); | |||
5672 | CondCode = ARMCC::getOppositeCondition(CondCode); | |||
5673 | ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32); | |||
5674 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); | |||
5675 | ||||
5676 | return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR, | |||
5677 | OverflowCmp); | |||
5678 | } | |||
5679 | ||||
5680 | return SDValue(); | |||
5681 | } | |||
5682 | ||||
5683 | SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { | |||
5684 | SDValue Chain = Op.getOperand(0); | |||
5685 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); | |||
5686 | SDValue LHS = Op.getOperand(2); | |||
5687 | SDValue RHS = Op.getOperand(3); | |||
5688 | SDValue Dest = Op.getOperand(4); | |||
5689 | SDLoc dl(Op); | |||
5690 | ||||
5691 | if (isUnsupportedFloatingType(LHS.getValueType())) { | |||
5692 | DAG.getTargetLoweringInfo().softenSetCCOperands( | |||
5693 | DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS); | |||
5694 | ||||
5695 | // If softenSetCCOperands only returned one value, we should compare it to | |||
5696 | // zero. | |||
5697 | if (!RHS.getNode()) { | |||
5698 | RHS = DAG.getConstant(0, dl, LHS.getValueType()); | |||
5699 | CC = ISD::SETNE; | |||
5700 | } | |||
5701 | } | |||
5702 | ||||
5703 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch | |||
5704 | // instruction. | |||
5705 | unsigned Opc = LHS.getOpcode(); | |||
5706 | bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && | |||
5707 | !Subtarget->isThumb1Only(); | |||
5708 | if (LHS.getResNo() == 1 && (isOneConstant(RHS) || isNullConstant(RHS)) && | |||
5709 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || | |||
5710 | Opc == ISD::USUBO || OptimizeMul) && | |||
5711 | (CC == ISD::SETEQ || CC == ISD::SETNE)) { | |||
5712 | // Only lower legal XALUO ops. | |||
5713 | if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0))) | |||
5714 | return SDValue(); | |||
5715 | ||||
5716 | // The actual operation with overflow check. | |||
5717 | SDValue Value, OverflowCmp; | |||
5718 | SDValue ARMcc; | |||
5719 | std::tie(Value, OverflowCmp) = getARMXALUOOp(LHS.getValue(0), DAG, ARMcc); | |||
5720 | ||||
5721 | if ((CC == ISD::SETNE) != isOneConstant(RHS)) { | |||
5722 | // Reverse the condition code. | |||
5723 | ARMCC::CondCodes CondCode = | |||
5724 | (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue(); | |||
5725 | CondCode = ARMCC::getOppositeCondition(CondCode); | |||
5726 | ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32); | |||
5727 | } | |||
5728 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); | |||
5729 | ||||
5730 | return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR, | |||
5731 | OverflowCmp); | |||
5732 | } | |||
5733 | ||||
5734 | if (LHS.getValueType() == MVT::i32) { | |||
5735 | SDValue ARMcc; | |||
5736 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); | |||
5737 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); | |||
5738 | return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, | |||
5739 | Chain, Dest, ARMcc, CCR, Cmp); | |||
5740 | } | |||
5741 | ||||
5742 | if (getTargetMachine().Options.UnsafeFPMath && | |||
5743 | (CC == ISD::SETEQ || CC == ISD::SETOEQ || | |||
5744 | CC == ISD::SETNE || CC == ISD::SETUNE)) { | |||
5745 | if (SDValue Result = OptimizeVFPBrcond(Op, DAG)) | |||
5746 | return Result; | |||
5747 | } | |||
5748 | ||||
5749 | ARMCC::CondCodes CondCode, CondCode2; | |||
5750 | FPCCToARMCC(CC, CondCode, CondCode2); | |||
5751 | ||||
5752 | SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); | |||
5753 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); | |||
5754 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); | |||
5755 | SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); | |||
5756 | SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; | |||
5757 | SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); | |||
5758 | if (CondCode2 != ARMCC::AL) { | |||
5759 | ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32); | |||
5760 | SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; | |||
5761 | Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); | |||
5762 | } | |||
5763 | return Res; | |||
5764 | } | |||
5765 | ||||
5766 | SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { | |||
5767 | SDValue Chain = Op.getOperand(0); | |||
5768 | SDValue Table = Op.getOperand(1); | |||
5769 | SDValue Index = Op.getOperand(2); | |||
5770 | SDLoc dl(Op); | |||
5771 | ||||
5772 | EVT PTy = getPointerTy(DAG.getDataLayout()); | |||
5773 | JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); | |||
5774 | SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); | |||
5775 | Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI); | |||
5776 | Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy)); | |||
5777 | SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Index); | |||
5778 | if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) { | |||
5779 | // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table | |||
5780 | // which does another jump to the destination. This also makes it easier | |||
5781 | // to translate it to TBB / TBH later (Thumb2 only). | |||
5782 | // FIXME: This might not work if the function is extremely large. | |||
5783 | return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, | |||
5784 | Addr, Op.getOperand(2), JTI); | |||
5785 | } | |||
5786 | if (isPositionIndependent() || Subtarget->isROPI()) { | |||
5787 | Addr = | |||
5788 | DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, | |||
5789 | MachinePointerInfo::getJumpTable(DAG.getMachineFunction())); | |||
5790 | Chain = Addr.getValue(1); | |||
5791 | Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Addr); | |||
5792 | return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); | |||
5793 | } else { | |||
5794 | Addr = | |||
5795 | DAG.getLoad(PTy, dl, Chain, Addr, | |||
5796 | MachinePointerInfo::getJumpTable(DAG.getMachineFunction())); | |||
5797 | Chain = Addr.getValue(1); | |||
5798 | return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); | |||
5799 | } | |||
5800 | } | |||
5801 | ||||
5802 | static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { | |||
5803 | EVT VT = Op.getValueType(); | |||
5804 | SDLoc dl(Op); | |||
5805 | ||||
5806 | if (Op.getValueType().getVectorElementType() == MVT::i32) { | |||
5807 | if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32) | |||
5808 | return Op; | |||
5809 | return DAG.UnrollVectorOp(Op.getNode()); | |||
5810 | } | |||
5811 | ||||
5812 | const bool HasFullFP16 = | |||
5813 | static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16(); | |||
5814 | ||||
5815 | EVT NewTy; | |||
5816 | const EVT OpTy = Op.getOperand(0).getValueType(); | |||
5817 | if (OpTy == MVT::v4f32) | |||
5818 | NewTy = MVT::v4i32; | |||
5819 | else if (OpTy == MVT::v4f16 && HasFullFP16) | |||
5820 | NewTy = MVT::v4i16; | |||
5821 | else if (OpTy == MVT::v8f16 && HasFullFP16) | |||
5822 | NewTy = MVT::v8i16; | |||
5823 | else | |||
5824 | llvm_unreachable("Invalid type for custom lowering!")::llvm::llvm_unreachable_internal("Invalid type for custom lowering!" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 5824); | |||
5825 | ||||
5826 | if (VT != MVT::v4i16 && VT != MVT::v8i16) | |||
5827 | return DAG.UnrollVectorOp(Op.getNode()); | |||
5828 | ||||
5829 | Op = DAG.getNode(Op.getOpcode(), dl, NewTy, Op.getOperand(0)); | |||
5830 | return DAG.getNode(ISD::TRUNCATE, dl, VT, Op); | |||
5831 | } | |||
5832 | ||||
5833 | SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { | |||
5834 | EVT VT = Op.getValueType(); | |||
5835 | if (VT.isVector()) | |||
5836 | return LowerVectorFP_TO_INT(Op, DAG); | |||
5837 | ||||
5838 | bool IsStrict = Op->isStrictFPOpcode(); | |||
5839 | SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); | |||
5840 | ||||
5841 | if (isUnsupportedFloatingType(SrcVal.getValueType())) { | |||
5842 | RTLIB::Libcall LC; | |||
5843 | if (Op.getOpcode() == ISD::FP_TO_SINT || | |||
5844 | Op.getOpcode() == ISD::STRICT_FP_TO_SINT) | |||
5845 | LC = RTLIB::getFPTOSINT(SrcVal.getValueType(), | |||
5846 | Op.getValueType()); | |||
5847 | else | |||
5848 | LC = RTLIB::getFPTOUINT(SrcVal.getValueType(), | |||
5849 | Op.getValueType()); | |||
5850 | SDLoc Loc(Op); | |||
5851 | MakeLibCallOptions CallOptions; | |||
5852 | SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); | |||
5853 | SDValue Result; | |||
5854 | std::tie(Result, Chain) = makeLibCall(DAG, LC, Op.getValueType(), SrcVal, | |||
5855 | CallOptions, Loc, Chain); | |||
5856 | return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result; | |||
5857 | } | |||
5858 | ||||
5859 | // FIXME: Remove this when we have strict fp instruction selection patterns | |||
5860 | if (IsStrict) { | |||
5861 | SDLoc Loc(Op); | |||
5862 | SDValue Result = | |||
5863 | DAG.getNode(Op.getOpcode() == ISD::STRICT_FP_TO_SINT ? ISD::FP_TO_SINT | |||
5864 | : ISD::FP_TO_UINT, | |||
5865 | Loc, Op.getValueType(), SrcVal); | |||
5866 | return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc); | |||
5867 | } | |||
5868 | ||||
5869 | return Op; | |||
5870 | } | |||
5871 | ||||
5872 | static SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG, | |||
5873 | const ARMSubtarget *Subtarget) { | |||
5874 | EVT VT = Op.getValueType(); | |||
5875 | EVT ToVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); | |||
5876 | EVT FromVT = Op.getOperand(0).getValueType(); | |||
5877 | ||||
5878 | if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f32) | |||
5879 | return Op; | |||
5880 | if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f64 && | |||
5881 | Subtarget->hasFP64()) | |||
5882 | return Op; | |||
5883 | if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f16 && | |||
5884 | Subtarget->hasFullFP16()) | |||
5885 | return Op; | |||
5886 | if (VT == MVT::v4i32 && ToVT == MVT::i32 && FromVT == MVT::v4f32 && | |||
5887 | Subtarget->hasMVEFloatOps()) | |||
5888 | return Op; | |||
5889 | if (VT == MVT::v8i16 && ToVT == MVT::i16 && FromVT == MVT::v8f16 && | |||
5890 | Subtarget->hasMVEFloatOps()) | |||
5891 | return Op; | |||
5892 | ||||
5893 | if (FromVT != MVT::v4f32 && FromVT != MVT::v8f16) | |||
5894 | return SDValue(); | |||
5895 | ||||
5896 | SDLoc DL(Op); | |||
5897 | bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT; | |||
5898 | unsigned BW = ToVT.getScalarSizeInBits() - IsSigned; | |||
5899 | SDValue CVT = DAG.getNode(Op.getOpcode(), DL, VT, Op.getOperand(0), | |||
5900 | DAG.getValueType(VT.getScalarType())); | |||
5901 | SDValue Max = DAG.getNode(IsSigned ? ISD::SMIN : ISD::UMIN, DL, VT, CVT, | |||
5902 | DAG.getConstant((1 << BW) - 1, DL, VT)); | |||
5903 | if (IsSigned) | |||
5904 | Max = DAG.getNode(ISD::SMAX, DL, VT, Max, | |||
5905 | DAG.getConstant(-(1 << BW), DL, VT)); | |||
5906 | return Max; | |||
5907 | } | |||
5908 | ||||
5909 | static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { | |||
5910 | EVT VT = Op.getValueType(); | |||
5911 | SDLoc dl(Op); | |||
5912 | ||||
5913 | if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) { | |||
5914 | if (VT.getVectorElementType() == MVT::f32) | |||
5915 | return Op; | |||
5916 | return DAG.UnrollVectorOp(Op.getNode()); | |||
5917 | } | |||
5918 | ||||
5919 | assert((Op.getOperand(0).getValueType() == MVT::v4i16 ||(static_cast <bool> ((Op.getOperand(0).getValueType() == MVT::v4i16 || Op.getOperand(0).getValueType() == MVT::v8i16) && "Invalid type for custom lowering!") ? void (0) : __assert_fail ("(Op.getOperand(0).getValueType() == MVT::v4i16 || Op.getOperand(0).getValueType() == MVT::v8i16) && \"Invalid type for custom lowering!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 5921, __extension__ __PRETTY_FUNCTION__)) | |||
5920 | Op.getOperand(0).getValueType() == MVT::v8i16) &&(static_cast <bool> ((Op.getOperand(0).getValueType() == MVT::v4i16 || Op.getOperand(0).getValueType() == MVT::v8i16) && "Invalid type for custom lowering!") ? void (0) : __assert_fail ("(Op.getOperand(0).getValueType() == MVT::v4i16 || Op.getOperand(0).getValueType() == MVT::v8i16) && \"Invalid type for custom lowering!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 5921, __extension__ __PRETTY_FUNCTION__)) | |||
5921 | "Invalid type for custom lowering!")(static_cast <bool> ((Op.getOperand(0).getValueType() == MVT::v4i16 || Op.getOperand(0).getValueType() == MVT::v8i16) && "Invalid type for custom lowering!") ? void (0) : __assert_fail ("(Op.getOperand(0).getValueType() == MVT::v4i16 || Op.getOperand(0).getValueType() == MVT::v8i16) && \"Invalid type for custom lowering!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 5921, __extension__ __PRETTY_FUNCTION__)); | |||
5922 | ||||
5923 | const bool HasFullFP16 = | |||
5924 | static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16(); | |||
5925 | ||||
5926 | EVT DestVecType; | |||
5927 | if (VT == MVT::v4f32) | |||
5928 | DestVecType = MVT::v4i32; | |||
5929 | else if (VT == MVT::v4f16 && HasFullFP16) | |||
5930 | DestVecType = MVT::v4i16; | |||
5931 | else if (VT == MVT::v8f16 && HasFullFP16) | |||
5932 | DestVecType = MVT::v8i16; | |||
5933 | else | |||
5934 | return DAG.UnrollVectorOp(Op.getNode()); | |||
5935 | ||||
5936 | unsigned CastOpc; | |||
5937 | unsigned Opc; | |||
5938 | switch (Op.getOpcode()) { | |||
5939 | default: llvm_unreachable("Invalid opcode!")::llvm::llvm_unreachable_internal("Invalid opcode!", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 5939); | |||
5940 | case ISD::SINT_TO_FP: | |||
5941 | CastOpc = ISD::SIGN_EXTEND; | |||
5942 | Opc = ISD::SINT_TO_FP; | |||
5943 | break; | |||
5944 | case ISD::UINT_TO_FP: | |||
5945 | CastOpc = ISD::ZERO_EXTEND; | |||
5946 | Opc = ISD::UINT_TO_FP; | |||
5947 | break; | |||
5948 | } | |||
5949 | ||||
5950 | Op = DAG.getNode(CastOpc, dl, DestVecType, Op.getOperand(0)); | |||
5951 | return DAG.getNode(Opc, dl, VT, Op); | |||
5952 | } | |||
5953 | ||||
5954 | SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { | |||
5955 | EVT VT = Op.getValueType(); | |||
5956 | if (VT.isVector()) | |||
5957 | return LowerVectorINT_TO_FP(Op, DAG); | |||
5958 | if (isUnsupportedFloatingType(VT)) { | |||
5959 | RTLIB::Libcall LC; | |||
5960 | if (Op.getOpcode() == ISD::SINT_TO_FP) | |||
5961 | LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), | |||
5962 | Op.getValueType()); | |||
5963 | else | |||
5964 | LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), | |||
5965 | Op.getValueType()); | |||
5966 | MakeLibCallOptions CallOptions; | |||
5967 | return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0), | |||
5968 | CallOptions, SDLoc(Op)).first; | |||
5969 | } | |||
5970 | ||||
5971 | return Op; | |||
5972 | } | |||
5973 | ||||
5974 | SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { | |||
5975 | // Implement fcopysign with a fabs and a conditional fneg. | |||
5976 | SDValue Tmp0 = Op.getOperand(0); | |||
5977 | SDValue Tmp1 = Op.getOperand(1); | |||
5978 | SDLoc dl(Op); | |||
5979 | EVT VT = Op.getValueType(); | |||
5980 | EVT SrcVT = Tmp1.getValueType(); | |||
5981 | bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || | |||
5982 | Tmp0.getOpcode() == ARMISD::VMOVDRR; | |||
5983 | bool UseNEON = !InGPR && Subtarget->hasNEON(); | |||
5984 | ||||
5985 | if (UseNEON) { | |||
5986 | // Use VBSL to copy the sign bit. | |||
5987 | unsigned EncodedVal = ARM_AM::createVMOVModImm(0x6, 0x80); | |||
5988 | SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, | |||
5989 | DAG.getTargetConstant(EncodedVal, dl, MVT::i32)); | |||
5990 | EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; | |||
5991 | if (VT == MVT::f64) | |||
5992 | Mask = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT, | |||
5993 | DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), | |||
5994 | DAG.getConstant(32, dl, MVT::i32)); | |||
5995 | else /*if (VT == MVT::f32)*/ | |||
5996 | Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); | |||
5997 | if (SrcVT == MVT::f32) { | |||
5998 | Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); | |||
5999 | if (VT == MVT::f64) | |||
6000 | Tmp1 = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT, | |||
6001 | DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), | |||
6002 | DAG.getConstant(32, dl, MVT::i32)); | |||
6003 | } else if (VT == MVT::f32) | |||
6004 | Tmp1 = DAG.getNode(ARMISD::VSHRuIMM, dl, MVT::v1i64, | |||
6005 | DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), | |||
6006 | DAG.getConstant(32, dl, MVT::i32)); | |||
6007 | Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); | |||
6008 | Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); | |||
6009 | ||||
6010 | SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff), | |||
6011 | dl, MVT::i32); | |||
6012 | AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); | |||
6013 | SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, | |||
6014 | DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); | |||
6015 | ||||
6016 | SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, | |||
6017 | DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), | |||
6018 | DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); | |||
6019 | if (VT == MVT::f32) { | |||
6020 | Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); | |||
6021 | Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, | |||
6022 | DAG.getConstant(0, dl, MVT::i32)); | |||
6023 | } else { | |||
6024 | Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); | |||
6025 | } | |||
6026 | ||||
6027 | return Res; | |||
6028 | } | |||
6029 | ||||
6030 | // Bitcast operand 1 to i32. | |||
6031 | if (SrcVT == MVT::f64) | |||
6032 | Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), | |||
6033 | Tmp1).getValue(1); | |||
6034 | Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); | |||
6035 | ||||
6036 | // Or in the signbit with integer operations. | |||
6037 | SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32); | |||
6038 | SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32); | |||
6039 | Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); | |||
6040 | if (VT == MVT::f32) { | |||
6041 | Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, | |||
6042 | DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); | |||
6043 | return DAG.getNode(ISD::BITCAST, dl, MVT::f32, | |||
6044 | DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); | |||
6045 | } | |||
6046 | ||||
6047 | // f64: Or the high part with signbit and then combine two parts. | |||
6048 | Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), | |||
6049 | Tmp0); | |||
6050 | SDValue Lo = Tmp0.getValue(0); | |||
6051 | SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); | |||
6052 | Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); | |||
6053 | return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); | |||
6054 | } | |||
6055 | ||||
6056 | SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ | |||
6057 | MachineFunction &MF = DAG.getMachineFunction(); | |||
6058 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
6059 | MFI.setReturnAddressIsTaken(true); | |||
6060 | ||||
6061 | if (verifyReturnAddressArgumentIsConstant(Op, DAG)) | |||
6062 | return SDValue(); | |||
6063 | ||||
6064 | EVT VT = Op.getValueType(); | |||
6065 | SDLoc dl(Op); | |||
6066 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
6067 | if (Depth) { | |||
6068 | SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); | |||
6069 | SDValue Offset = DAG.getConstant(4, dl, MVT::i32); | |||
6070 | return DAG.getLoad(VT, dl, DAG.getEntryNode(), | |||
6071 | DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), | |||
6072 | MachinePointerInfo()); | |||
6073 | } | |||
6074 | ||||
6075 | // Return LR, which contains the return address. Mark it an implicit live-in. | |||
6076 | Register Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); | |||
6077 | return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); | |||
6078 | } | |||
6079 | ||||
6080 | SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { | |||
6081 | const ARMBaseRegisterInfo &ARI = | |||
6082 | *static_cast<const ARMBaseRegisterInfo*>(RegInfo); | |||
6083 | MachineFunction &MF = DAG.getMachineFunction(); | |||
6084 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
6085 | MFI.setFrameAddressIsTaken(true); | |||
6086 | ||||
6087 | EVT VT = Op.getValueType(); | |||
6088 | SDLoc dl(Op); // FIXME probably not meaningful | |||
6089 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
6090 | Register FrameReg = ARI.getFrameRegister(MF); | |||
6091 | SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); | |||
6092 | while (Depth--) | |||
6093 | FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, | |||
6094 | MachinePointerInfo()); | |||
6095 | return FrameAddr; | |||
6096 | } | |||
6097 | ||||
6098 | // FIXME? Maybe this could be a TableGen attribute on some registers and | |||
6099 | // this table could be generated automatically from RegInfo. | |||
6100 | Register ARMTargetLowering::getRegisterByName(const char* RegName, LLT VT, | |||
6101 | const MachineFunction &MF) const { | |||
6102 | Register Reg = StringSwitch<unsigned>(RegName) | |||
6103 | .Case("sp", ARM::SP) | |||
6104 | .Default(0); | |||
6105 | if (Reg) | |||
6106 | return Reg; | |||
6107 | report_fatal_error(Twine("Invalid register name \"" | |||
6108 | + StringRef(RegName) + "\".")); | |||
6109 | } | |||
6110 | ||||
6111 | // Result is 64 bit value so split into two 32 bit values and return as a | |||
6112 | // pair of values. | |||
6113 | static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results, | |||
6114 | SelectionDAG &DAG) { | |||
6115 | SDLoc DL(N); | |||
6116 | ||||
6117 | // This function is only supposed to be called for i64 type destination. | |||
6118 | assert(N->getValueType(0) == MVT::i64(static_cast <bool> (N->getValueType(0) == MVT::i64 && "ExpandREAD_REGISTER called for non-i64 type result.") ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i64 && \"ExpandREAD_REGISTER called for non-i64 type result.\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6119, __extension__ __PRETTY_FUNCTION__)) | |||
6119 | && "ExpandREAD_REGISTER called for non-i64 type result.")(static_cast <bool> (N->getValueType(0) == MVT::i64 && "ExpandREAD_REGISTER called for non-i64 type result.") ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i64 && \"ExpandREAD_REGISTER called for non-i64 type result.\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6119, __extension__ __PRETTY_FUNCTION__)); | |||
6120 | ||||
6121 | SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL, | |||
6122 | DAG.getVTList(MVT::i32, MVT::i32, MVT::Other), | |||
6123 | N->getOperand(0), | |||
6124 | N->getOperand(1)); | |||
6125 | ||||
6126 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0), | |||
6127 | Read.getValue(1))); | |||
6128 | Results.push_back(Read.getOperand(0)); | |||
6129 | } | |||
6130 | ||||
6131 | /// \p BC is a bitcast that is about to be turned into a VMOVDRR. | |||
6132 | /// When \p DstVT, the destination type of \p BC, is on the vector | |||
6133 | /// register bank and the source of bitcast, \p Op, operates on the same bank, | |||
6134 | /// it might be possible to combine them, such that everything stays on the | |||
6135 | /// vector register bank. | |||
6136 | /// \p return The node that would replace \p BT, if the combine | |||
6137 | /// is possible. | |||
6138 | static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, | |||
6139 | SelectionDAG &DAG) { | |||
6140 | SDValue Op = BC->getOperand(0); | |||
6141 | EVT DstVT = BC->getValueType(0); | |||
6142 | ||||
6143 | // The only vector instruction that can produce a scalar (remember, | |||
6144 | // since the bitcast was about to be turned into VMOVDRR, the source | |||
6145 | // type is i64) from a vector is EXTRACT_VECTOR_ELT. | |||
6146 | // Moreover, we can do this combine only if there is one use. | |||
6147 | // Finally, if the destination type is not a vector, there is not | |||
6148 | // much point on forcing everything on the vector bank. | |||
6149 | if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
6150 | !Op.hasOneUse()) | |||
6151 | return SDValue(); | |||
6152 | ||||
6153 | // If the index is not constant, we will introduce an additional | |||
6154 | // multiply that will stick. | |||
6155 | // Give up in that case. | |||
6156 | ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1)); | |||
6157 | if (!Index) | |||
6158 | return SDValue(); | |||
6159 | unsigned DstNumElt = DstVT.getVectorNumElements(); | |||
6160 | ||||
6161 | // Compute the new index. | |||
6162 | const APInt &APIntIndex = Index->getAPIntValue(); | |||
6163 | APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt); | |||
6164 | NewIndex *= APIntIndex; | |||
6165 | // Check if the new constant index fits into i32. | |||
6166 | if (NewIndex.getBitWidth() > 32) | |||
6167 | return SDValue(); | |||
6168 | ||||
6169 | // vMTy bitcast(i64 extractelt vNi64 src, i32 index) -> | |||
6170 | // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M) | |||
6171 | SDLoc dl(Op); | |||
6172 | SDValue ExtractSrc = Op.getOperand(0); | |||
6173 | EVT VecVT = EVT::getVectorVT( | |||
6174 | *DAG.getContext(), DstVT.getScalarType(), | |||
6175 | ExtractSrc.getValueType().getVectorNumElements() * DstNumElt); | |||
6176 | SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc); | |||
6177 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast, | |||
6178 | DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32)); | |||
6179 | } | |||
6180 | ||||
6181 | /// ExpandBITCAST - If the target supports VFP, this function is called to | |||
6182 | /// expand a bit convert where either the source or destination type is i64 to | |||
6183 | /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 | |||
6184 | /// operand type is illegal (e.g., v2f32 for a target that doesn't support | |||
6185 | /// vectors), since the legalizer won't know what to do with that. | |||
6186 | SDValue ARMTargetLowering::ExpandBITCAST(SDNode *N, SelectionDAG &DAG, | |||
6187 | const ARMSubtarget *Subtarget) const { | |||
6188 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
6189 | SDLoc dl(N); | |||
6190 | SDValue Op = N->getOperand(0); | |||
6191 | ||||
6192 | // This function is only supposed to be called for i16 and i64 types, either | |||
6193 | // as the source or destination of the bit convert. | |||
6194 | EVT SrcVT = Op.getValueType(); | |||
6195 | EVT DstVT = N->getValueType(0); | |||
6196 | ||||
6197 | if ((SrcVT == MVT::i16 || SrcVT == MVT::i32) && | |||
6198 | (DstVT == MVT::f16 || DstVT == MVT::bf16)) | |||
6199 | return MoveToHPR(SDLoc(N), DAG, MVT::i32, DstVT.getSimpleVT(), | |||
6200 | DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), MVT::i32, Op)); | |||
6201 | ||||
6202 | if ((DstVT == MVT::i16 || DstVT == MVT::i32) && | |||
6203 | (SrcVT == MVT::f16 || SrcVT == MVT::bf16)) | |||
6204 | return DAG.getNode( | |||
6205 | ISD::TRUNCATE, SDLoc(N), DstVT, | |||
6206 | MoveFromHPR(SDLoc(N), DAG, MVT::i32, SrcVT.getSimpleVT(), Op)); | |||
6207 | ||||
6208 | if (!(SrcVT == MVT::i64 || DstVT == MVT::i64)) | |||
6209 | return SDValue(); | |||
6210 | ||||
6211 | // Turn i64->f64 into VMOVDRR. | |||
6212 | if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { | |||
6213 | // Do not force values to GPRs (this is what VMOVDRR does for the inputs) | |||
6214 | // if we can combine the bitcast with its source. | |||
6215 | if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG)) | |||
6216 | return Val; | |||
6217 | ||||
6218 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, | |||
6219 | DAG.getConstant(0, dl, MVT::i32)); | |||
6220 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, | |||
6221 | DAG.getConstant(1, dl, MVT::i32)); | |||
6222 | return DAG.getNode(ISD::BITCAST, dl, DstVT, | |||
6223 | DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); | |||
6224 | } | |||
6225 | ||||
6226 | // Turn f64->i64 into VMOVRRD. | |||
6227 | if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { | |||
6228 | SDValue Cvt; | |||
6229 | if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() && | |||
6230 | SrcVT.getVectorNumElements() > 1) | |||
6231 | Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, | |||
6232 | DAG.getVTList(MVT::i32, MVT::i32), | |||
6233 | DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op)); | |||
6234 | else | |||
6235 | Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, | |||
6236 | DAG.getVTList(MVT::i32, MVT::i32), Op); | |||
6237 | // Merge the pieces into a single i64 value. | |||
6238 | return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); | |||
6239 | } | |||
6240 | ||||
6241 | return SDValue(); | |||
6242 | } | |||
6243 | ||||
6244 | /// getZeroVector - Returns a vector of specified type with all zero elements. | |||
6245 | /// Zero vectors are used to represent vector negation and in those cases | |||
6246 | /// will be implemented with the NEON VNEG instruction. However, VNEG does | |||
6247 | /// not support i64 elements, so sometimes the zero vectors will need to be | |||
6248 | /// explicitly constructed. Regardless, use a canonical VMOV to create the | |||
6249 | /// zero vector. | |||
6250 | static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) { | |||
6251 | assert(VT.isVector() && "Expected a vector type")(static_cast <bool> (VT.isVector() && "Expected a vector type" ) ? void (0) : __assert_fail ("VT.isVector() && \"Expected a vector type\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6251, __extension__ __PRETTY_FUNCTION__)); | |||
6252 | // The canonical modified immediate encoding of a zero vector is....0! | |||
6253 | SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32); | |||
6254 | EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; | |||
6255 | SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); | |||
6256 | return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); | |||
6257 | } | |||
6258 | ||||
6259 | /// LowerShiftRightParts - Lower SRA_PARTS, which returns two | |||
6260 | /// i32 values and take a 2 x i32 value to shift plus a shift amount. | |||
6261 | SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, | |||
6262 | SelectionDAG &DAG) const { | |||
6263 | assert(Op.getNumOperands() == 3 && "Not a double-shift!")(static_cast <bool> (Op.getNumOperands() == 3 && "Not a double-shift!") ? void (0) : __assert_fail ("Op.getNumOperands() == 3 && \"Not a double-shift!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6263, __extension__ __PRETTY_FUNCTION__)); | |||
6264 | EVT VT = Op.getValueType(); | |||
6265 | unsigned VTBits = VT.getSizeInBits(); | |||
6266 | SDLoc dl(Op); | |||
6267 | SDValue ShOpLo = Op.getOperand(0); | |||
6268 | SDValue ShOpHi = Op.getOperand(1); | |||
6269 | SDValue ShAmt = Op.getOperand(2); | |||
6270 | SDValue ARMcc; | |||
6271 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); | |||
6272 | unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; | |||
6273 | ||||
6274 | assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS)(static_cast <bool> (Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS) ? void (0) : __assert_fail ("Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6274, __extension__ __PRETTY_FUNCTION__)); | |||
6275 | ||||
6276 | SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, | |||
6277 | DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); | |||
6278 | SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); | |||
6279 | SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, | |||
6280 | DAG.getConstant(VTBits, dl, MVT::i32)); | |||
6281 | SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); | |||
6282 | SDValue LoSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); | |||
6283 | SDValue LoBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); | |||
6284 | SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), | |||
6285 | ISD::SETGE, ARMcc, DAG, dl); | |||
6286 | SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift, | |||
6287 | ARMcc, CCR, CmpLo); | |||
6288 | ||||
6289 | SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); | |||
6290 | SDValue HiBigShift = Opc == ISD::SRA | |||
6291 | ? DAG.getNode(Opc, dl, VT, ShOpHi, | |||
6292 | DAG.getConstant(VTBits - 1, dl, VT)) | |||
6293 | : DAG.getConstant(0, dl, VT); | |||
6294 | SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), | |||
6295 | ISD::SETGE, ARMcc, DAG, dl); | |||
6296 | SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift, | |||
6297 | ARMcc, CCR, CmpHi); | |||
6298 | ||||
6299 | SDValue Ops[2] = { Lo, Hi }; | |||
6300 | return DAG.getMergeValues(Ops, dl); | |||
6301 | } | |||
6302 | ||||
6303 | /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two | |||
6304 | /// i32 values and take a 2 x i32 value to shift plus a shift amount. | |||
6305 | SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, | |||
6306 | SelectionDAG &DAG) const { | |||
6307 | assert(Op.getNumOperands() == 3 && "Not a double-shift!")(static_cast <bool> (Op.getNumOperands() == 3 && "Not a double-shift!") ? void (0) : __assert_fail ("Op.getNumOperands() == 3 && \"Not a double-shift!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6307, __extension__ __PRETTY_FUNCTION__)); | |||
6308 | EVT VT = Op.getValueType(); | |||
6309 | unsigned VTBits = VT.getSizeInBits(); | |||
6310 | SDLoc dl(Op); | |||
6311 | SDValue ShOpLo = Op.getOperand(0); | |||
6312 | SDValue ShOpHi = Op.getOperand(1); | |||
6313 | SDValue ShAmt = Op.getOperand(2); | |||
6314 | SDValue ARMcc; | |||
6315 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); | |||
6316 | ||||
6317 | assert(Op.getOpcode() == ISD::SHL_PARTS)(static_cast <bool> (Op.getOpcode() == ISD::SHL_PARTS) ? void (0) : __assert_fail ("Op.getOpcode() == ISD::SHL_PARTS" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6317, __extension__ __PRETTY_FUNCTION__)); | |||
6318 | SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, | |||
6319 | DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); | |||
6320 | SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); | |||
6321 | SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); | |||
6322 | SDValue HiSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); | |||
6323 | ||||
6324 | SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, | |||
6325 | DAG.getConstant(VTBits, dl, MVT::i32)); | |||
6326 | SDValue HiBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); | |||
6327 | SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), | |||
6328 | ISD::SETGE, ARMcc, DAG, dl); | |||
6329 | SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift, | |||
6330 | ARMcc, CCR, CmpHi); | |||
6331 | ||||
6332 | SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), | |||
6333 | ISD::SETGE, ARMcc, DAG, dl); | |||
6334 | SDValue LoSmallShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); | |||
6335 | SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, | |||
6336 | DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo); | |||
6337 | ||||
6338 | SDValue Ops[2] = { Lo, Hi }; | |||
6339 | return DAG.getMergeValues(Ops, dl); | |||
6340 | } | |||
6341 | ||||
6342 | SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, | |||
6343 | SelectionDAG &DAG) const { | |||
6344 | // The rounding mode is in bits 23:22 of the FPSCR. | |||
6345 | // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 | |||
6346 | // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) | |||
6347 | // so that the shift + and get folded into a bitfield extract. | |||
6348 | SDLoc dl(Op); | |||
6349 | SDValue Chain = Op.getOperand(0); | |||
6350 | SDValue Ops[] = {Chain, | |||
6351 | DAG.getConstant(Intrinsic::arm_get_fpscr, dl, MVT::i32)}; | |||
6352 | ||||
6353 | SDValue FPSCR = | |||
6354 | DAG.getNode(ISD::INTRINSIC_W_CHAIN, dl, {MVT::i32, MVT::Other}, Ops); | |||
6355 | Chain = FPSCR.getValue(1); | |||
6356 | SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, | |||
6357 | DAG.getConstant(1U << 22, dl, MVT::i32)); | |||
6358 | SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, | |||
6359 | DAG.getConstant(22, dl, MVT::i32)); | |||
6360 | SDValue And = DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, | |||
6361 | DAG.getConstant(3, dl, MVT::i32)); | |||
6362 | return DAG.getMergeValues({And, Chain}, dl); | |||
6363 | } | |||
6364 | ||||
6365 | SDValue ARMTargetLowering::LowerSET_ROUNDING(SDValue Op, | |||
6366 | SelectionDAG &DAG) const { | |||
6367 | SDLoc DL(Op); | |||
6368 | SDValue Chain = Op->getOperand(0); | |||
6369 | SDValue RMValue = Op->getOperand(1); | |||
6370 | ||||
6371 | // The rounding mode is in bits 23:22 of the FPSCR. | |||
6372 | // The llvm.set.rounding argument value to ARM rounding mode value mapping | |||
6373 | // is 0->3, 1->0, 2->1, 3->2. The formula we use to implement this is | |||
6374 | // ((arg - 1) & 3) << 22). | |||
6375 | // | |||
6376 | // It is expected that the argument of llvm.set.rounding is within the | |||
6377 | // segment [0, 3], so NearestTiesToAway (4) is not handled here. It is | |||
6378 | // responsibility of the code generated llvm.set.rounding to ensure this | |||
6379 | // condition. | |||
6380 | ||||
6381 | // Calculate new value of FPSCR[23:22]. | |||
6382 | RMValue = DAG.getNode(ISD::SUB, DL, MVT::i32, RMValue, | |||
6383 | DAG.getConstant(1, DL, MVT::i32)); | |||
6384 | RMValue = DAG.getNode(ISD::AND, DL, MVT::i32, RMValue, | |||
6385 | DAG.getConstant(0x3, DL, MVT::i32)); | |||
6386 | RMValue = DAG.getNode(ISD::SHL, DL, MVT::i32, RMValue, | |||
6387 | DAG.getConstant(ARM::RoundingBitsPos, DL, MVT::i32)); | |||
6388 | ||||
6389 | // Get current value of FPSCR. | |||
6390 | SDValue Ops[] = {Chain, | |||
6391 | DAG.getConstant(Intrinsic::arm_get_fpscr, DL, MVT::i32)}; | |||
6392 | SDValue FPSCR = | |||
6393 | DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, {MVT::i32, MVT::Other}, Ops); | |||
6394 | Chain = FPSCR.getValue(1); | |||
6395 | FPSCR = FPSCR.getValue(0); | |||
6396 | ||||
6397 | // Put new rounding mode into FPSCR[23:22]. | |||
6398 | const unsigned RMMask = ~(ARM::Rounding::rmMask << ARM::RoundingBitsPos); | |||
6399 | FPSCR = DAG.getNode(ISD::AND, DL, MVT::i32, FPSCR, | |||
6400 | DAG.getConstant(RMMask, DL, MVT::i32)); | |||
6401 | FPSCR = DAG.getNode(ISD::OR, DL, MVT::i32, FPSCR, RMValue); | |||
6402 | SDValue Ops2[] = { | |||
6403 | Chain, DAG.getConstant(Intrinsic::arm_set_fpscr, DL, MVT::i32), FPSCR}; | |||
6404 | return DAG.getNode(ISD::INTRINSIC_VOID, DL, MVT::Other, Ops2); | |||
6405 | } | |||
6406 | ||||
6407 | static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, | |||
6408 | const ARMSubtarget *ST) { | |||
6409 | SDLoc dl(N); | |||
6410 | EVT VT = N->getValueType(0); | |||
6411 | if (VT.isVector() && ST->hasNEON()) { | |||
6412 | ||||
6413 | // Compute the least significant set bit: LSB = X & -X | |||
6414 | SDValue X = N->getOperand(0); | |||
6415 | SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X); | |||
6416 | SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX); | |||
6417 | ||||
6418 | EVT ElemTy = VT.getVectorElementType(); | |||
6419 | ||||
6420 | if (ElemTy == MVT::i8) { | |||
6421 | // Compute with: cttz(x) = ctpop(lsb - 1) | |||
6422 | SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, | |||
6423 | DAG.getTargetConstant(1, dl, ElemTy)); | |||
6424 | SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); | |||
6425 | return DAG.getNode(ISD::CTPOP, dl, VT, Bits); | |||
6426 | } | |||
6427 | ||||
6428 | if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) && | |||
6429 | (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) { | |||
6430 | // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0 | |||
6431 | unsigned NumBits = ElemTy.getSizeInBits(); | |||
6432 | SDValue WidthMinus1 = | |||
6433 | DAG.getNode(ARMISD::VMOVIMM, dl, VT, | |||
6434 | DAG.getTargetConstant(NumBits - 1, dl, ElemTy)); | |||
6435 | SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB); | |||
6436 | return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ); | |||
6437 | } | |||
6438 | ||||
6439 | // Compute with: cttz(x) = ctpop(lsb - 1) | |||
6440 | ||||
6441 | // Compute LSB - 1. | |||
6442 | SDValue Bits; | |||
6443 | if (ElemTy == MVT::i64) { | |||
6444 | // Load constant 0xffff'ffff'ffff'ffff to register. | |||
6445 | SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT, | |||
6446 | DAG.getTargetConstant(0x1eff, dl, MVT::i32)); | |||
6447 | Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF); | |||
6448 | } else { | |||
6449 | SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, | |||
6450 | DAG.getTargetConstant(1, dl, ElemTy)); | |||
6451 | Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); | |||
6452 | } | |||
6453 | return DAG.getNode(ISD::CTPOP, dl, VT, Bits); | |||
6454 | } | |||
6455 | ||||
6456 | if (!ST->hasV6T2Ops()) | |||
6457 | return SDValue(); | |||
6458 | ||||
6459 | SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0)); | |||
6460 | return DAG.getNode(ISD::CTLZ, dl, VT, rbit); | |||
6461 | } | |||
6462 | ||||
6463 | static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, | |||
6464 | const ARMSubtarget *ST) { | |||
6465 | EVT VT = N->getValueType(0); | |||
6466 | SDLoc DL(N); | |||
6467 | ||||
6468 | assert(ST->hasNEON() && "Custom ctpop lowering requires NEON.")(static_cast <bool> (ST->hasNEON() && "Custom ctpop lowering requires NEON." ) ? void (0) : __assert_fail ("ST->hasNEON() && \"Custom ctpop lowering requires NEON.\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6468, __extension__ __PRETTY_FUNCTION__)); | |||
6469 | assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 ||(static_cast <bool> ((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && "Unexpected type for custom ctpop lowering" ) ? void (0) : __assert_fail ("(VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && \"Unexpected type for custom ctpop lowering\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6471, __extension__ __PRETTY_FUNCTION__)) | |||
6470 | VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) &&(static_cast <bool> ((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && "Unexpected type for custom ctpop lowering" ) ? void (0) : __assert_fail ("(VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && \"Unexpected type for custom ctpop lowering\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6471, __extension__ __PRETTY_FUNCTION__)) | |||
6471 | "Unexpected type for custom ctpop lowering")(static_cast <bool> ((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && "Unexpected type for custom ctpop lowering" ) ? void (0) : __assert_fail ("(VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && \"Unexpected type for custom ctpop lowering\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6471, __extension__ __PRETTY_FUNCTION__)); | |||
6472 | ||||
6473 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
6474 | EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; | |||
6475 | SDValue Res = DAG.getBitcast(VT8Bit, N->getOperand(0)); | |||
6476 | Res = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Res); | |||
6477 | ||||
6478 | // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds. | |||
6479 | unsigned EltSize = 8; | |||
6480 | unsigned NumElts = VT.is64BitVector() ? 8 : 16; | |||
6481 | while (EltSize != VT.getScalarSizeInBits()) { | |||
6482 | SmallVector<SDValue, 8> Ops; | |||
6483 | Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddlu, DL, | |||
6484 | TLI.getPointerTy(DAG.getDataLayout()))); | |||
6485 | Ops.push_back(Res); | |||
6486 | ||||
6487 | EltSize *= 2; | |||
6488 | NumElts /= 2; | |||
6489 | MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts); | |||
6490 | Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WidenVT, Ops); | |||
6491 | } | |||
6492 | ||||
6493 | return Res; | |||
6494 | } | |||
6495 | ||||
6496 | /// Getvshiftimm - Check if this is a valid build_vector for the immediate | |||
6497 | /// operand of a vector shift operation, where all the elements of the | |||
6498 | /// build_vector must have the same constant integer value. | |||
6499 | static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { | |||
6500 | // Ignore bit_converts. | |||
6501 | while (Op.getOpcode() == ISD::BITCAST) | |||
6502 | Op = Op.getOperand(0); | |||
6503 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); | |||
6504 | APInt SplatBits, SplatUndef; | |||
6505 | unsigned SplatBitSize; | |||
6506 | bool HasAnyUndefs; | |||
6507 | if (!BVN || | |||
6508 | !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, | |||
6509 | ElementBits) || | |||
6510 | SplatBitSize > ElementBits) | |||
6511 | return false; | |||
6512 | Cnt = SplatBits.getSExtValue(); | |||
6513 | return true; | |||
6514 | } | |||
6515 | ||||
6516 | /// isVShiftLImm - Check if this is a valid build_vector for the immediate | |||
6517 | /// operand of a vector shift left operation. That value must be in the range: | |||
6518 | /// 0 <= Value < ElementBits for a left shift; or | |||
6519 | /// 0 <= Value <= ElementBits for a long left shift. | |||
6520 | static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { | |||
6521 | assert(VT.isVector() && "vector shift count is not a vector type")(static_cast <bool> (VT.isVector() && "vector shift count is not a vector type" ) ? void (0) : __assert_fail ("VT.isVector() && \"vector shift count is not a vector type\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6521, __extension__ __PRETTY_FUNCTION__)); | |||
6522 | int64_t ElementBits = VT.getScalarSizeInBits(); | |||
6523 | if (!getVShiftImm(Op, ElementBits, Cnt)) | |||
6524 | return false; | |||
6525 | return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits); | |||
6526 | } | |||
6527 | ||||
6528 | /// isVShiftRImm - Check if this is a valid build_vector for the immediate | |||
6529 | /// operand of a vector shift right operation. For a shift opcode, the value | |||
6530 | /// is positive, but for an intrinsic the value count must be negative. The | |||
6531 | /// absolute value must be in the range: | |||
6532 | /// 1 <= |Value| <= ElementBits for a right shift; or | |||
6533 | /// 1 <= |Value| <= ElementBits/2 for a narrow right shift. | |||
6534 | static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, | |||
6535 | int64_t &Cnt) { | |||
6536 | assert(VT.isVector() && "vector shift count is not a vector type")(static_cast <bool> (VT.isVector() && "vector shift count is not a vector type" ) ? void (0) : __assert_fail ("VT.isVector() && \"vector shift count is not a vector type\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6536, __extension__ __PRETTY_FUNCTION__)); | |||
6537 | int64_t ElementBits = VT.getScalarSizeInBits(); | |||
6538 | if (!getVShiftImm(Op, ElementBits, Cnt)) | |||
6539 | return false; | |||
6540 | if (!isIntrinsic) | |||
6541 | return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits)); | |||
6542 | if (Cnt >= -(isNarrow ? ElementBits / 2 : ElementBits) && Cnt <= -1) { | |||
6543 | Cnt = -Cnt; | |||
6544 | return true; | |||
6545 | } | |||
6546 | return false; | |||
6547 | } | |||
6548 | ||||
6549 | static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, | |||
6550 | const ARMSubtarget *ST) { | |||
6551 | EVT VT = N->getValueType(0); | |||
6552 | SDLoc dl(N); | |||
6553 | int64_t Cnt; | |||
6554 | ||||
6555 | if (!VT.isVector()) | |||
6556 | return SDValue(); | |||
6557 | ||||
6558 | // We essentially have two forms here. Shift by an immediate and shift by a | |||
6559 | // vector register (there are also shift by a gpr, but that is just handled | |||
6560 | // with a tablegen pattern). We cannot easily match shift by an immediate in | |||
6561 | // tablegen so we do that here and generate a VSHLIMM/VSHRsIMM/VSHRuIMM. | |||
6562 | // For shifting by a vector, we don't have VSHR, only VSHL (which can be | |||
6563 | // signed or unsigned, and a negative shift indicates a shift right). | |||
6564 | if (N->getOpcode() == ISD::SHL) { | |||
6565 | if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) | |||
6566 | return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0), | |||
6567 | DAG.getConstant(Cnt, dl, MVT::i32)); | |||
6568 | return DAG.getNode(ARMISD::VSHLu, dl, VT, N->getOperand(0), | |||
6569 | N->getOperand(1)); | |||
6570 | } | |||
6571 | ||||
6572 | assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&(static_cast <bool> ((N->getOpcode() == ISD::SRA || N ->getOpcode() == ISD::SRL) && "unexpected vector shift opcode" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) && \"unexpected vector shift opcode\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6573, __extension__ __PRETTY_FUNCTION__)) | |||
6573 | "unexpected vector shift opcode")(static_cast <bool> ((N->getOpcode() == ISD::SRA || N ->getOpcode() == ISD::SRL) && "unexpected vector shift opcode" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) && \"unexpected vector shift opcode\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6573, __extension__ __PRETTY_FUNCTION__)); | |||
6574 | ||||
6575 | if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { | |||
6576 | unsigned VShiftOpc = | |||
6577 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); | |||
6578 | return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), | |||
6579 | DAG.getConstant(Cnt, dl, MVT::i32)); | |||
6580 | } | |||
6581 | ||||
6582 | // Other right shifts we don't have operations for (we use a shift left by a | |||
6583 | // negative number). | |||
6584 | EVT ShiftVT = N->getOperand(1).getValueType(); | |||
6585 | SDValue NegatedCount = DAG.getNode( | |||
6586 | ISD::SUB, dl, ShiftVT, getZeroVector(ShiftVT, DAG, dl), N->getOperand(1)); | |||
6587 | unsigned VShiftOpc = | |||
6588 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHLs : ARMISD::VSHLu); | |||
6589 | return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), NegatedCount); | |||
6590 | } | |||
6591 | ||||
6592 | static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, | |||
6593 | const ARMSubtarget *ST) { | |||
6594 | EVT VT = N->getValueType(0); | |||
6595 | SDLoc dl(N); | |||
6596 | ||||
6597 | // We can get here for a node like i32 = ISD::SHL i32, i64 | |||
6598 | if (VT != MVT::i64) | |||
6599 | return SDValue(); | |||
6600 | ||||
6601 | assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA ||(static_cast <bool> ((N->getOpcode() == ISD::SRL || N ->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SHL ) && "Unknown shift to lower!") ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SHL) && \"Unknown shift to lower!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6603, __extension__ __PRETTY_FUNCTION__)) | |||
6602 | N->getOpcode() == ISD::SHL) &&(static_cast <bool> ((N->getOpcode() == ISD::SRL || N ->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SHL ) && "Unknown shift to lower!") ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SHL) && \"Unknown shift to lower!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6603, __extension__ __PRETTY_FUNCTION__)) | |||
6603 | "Unknown shift to lower!")(static_cast <bool> ((N->getOpcode() == ISD::SRL || N ->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SHL ) && "Unknown shift to lower!") ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SHL) && \"Unknown shift to lower!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6603, __extension__ __PRETTY_FUNCTION__)); | |||
6604 | ||||
6605 | unsigned ShOpc = N->getOpcode(); | |||
6606 | if (ST->hasMVEIntegerOps()) { | |||
6607 | SDValue ShAmt = N->getOperand(1); | |||
6608 | unsigned ShPartsOpc = ARMISD::LSLL; | |||
6609 | ConstantSDNode *Con = dyn_cast<ConstantSDNode>(ShAmt); | |||
6610 | ||||
6611 | // If the shift amount is greater than 32 or has a greater bitwidth than 64 | |||
6612 | // then do the default optimisation | |||
6613 | if (ShAmt->getValueType(0).getSizeInBits() > 64 || | |||
6614 | (Con && (Con->getZExtValue() == 0 || Con->getZExtValue() >= 32))) | |||
6615 | return SDValue(); | |||
6616 | ||||
6617 | // Extract the lower 32 bits of the shift amount if it's not an i32 | |||
6618 | if (ShAmt->getValueType(0) != MVT::i32) | |||
6619 | ShAmt = DAG.getZExtOrTrunc(ShAmt, dl, MVT::i32); | |||
6620 | ||||
6621 | if (ShOpc == ISD::SRL) { | |||
6622 | if (!Con) | |||
6623 | // There is no t2LSRLr instruction so negate and perform an lsll if the | |||
6624 | // shift amount is in a register, emulating a right shift. | |||
6625 | ShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, | |||
6626 | DAG.getConstant(0, dl, MVT::i32), ShAmt); | |||
6627 | else | |||
6628 | // Else generate an lsrl on the immediate shift amount | |||
6629 | ShPartsOpc = ARMISD::LSRL; | |||
6630 | } else if (ShOpc == ISD::SRA) | |||
6631 | ShPartsOpc = ARMISD::ASRL; | |||
6632 | ||||
6633 | // Lower 32 bits of the destination/source | |||
6634 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), | |||
6635 | DAG.getConstant(0, dl, MVT::i32)); | |||
6636 | // Upper 32 bits of the destination/source | |||
6637 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), | |||
6638 | DAG.getConstant(1, dl, MVT::i32)); | |||
6639 | ||||
6640 | // Generate the shift operation as computed above | |||
6641 | Lo = DAG.getNode(ShPartsOpc, dl, DAG.getVTList(MVT::i32, MVT::i32), Lo, Hi, | |||
6642 | ShAmt); | |||
6643 | // The upper 32 bits come from the second return value of lsll | |||
6644 | Hi = SDValue(Lo.getNode(), 1); | |||
6645 | return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); | |||
6646 | } | |||
6647 | ||||
6648 | // We only lower SRA, SRL of 1 here, all others use generic lowering. | |||
6649 | if (!isOneConstant(N->getOperand(1)) || N->getOpcode() == ISD::SHL) | |||
6650 | return SDValue(); | |||
6651 | ||||
6652 | // If we are in thumb mode, we don't have RRX. | |||
6653 | if (ST->isThumb1Only()) | |||
6654 | return SDValue(); | |||
6655 | ||||
6656 | // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. | |||
6657 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), | |||
6658 | DAG.getConstant(0, dl, MVT::i32)); | |||
6659 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), | |||
6660 | DAG.getConstant(1, dl, MVT::i32)); | |||
6661 | ||||
6662 | // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and | |||
6663 | // captures the result into a carry flag. | |||
6664 | unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; | |||
6665 | Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi); | |||
6666 | ||||
6667 | // The low part is an ARMISD::RRX operand, which shifts the carry in. | |||
6668 | Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); | |||
6669 | ||||
6670 | // Merge the pieces into a single i64 value. | |||
6671 | return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); | |||
6672 | } | |||
6673 | ||||
6674 | static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG, | |||
6675 | const ARMSubtarget *ST) { | |||
6676 | bool Invert = false; | |||
6677 | bool Swap = false; | |||
6678 | unsigned Opc = ARMCC::AL; | |||
6679 | ||||
6680 | SDValue Op0 = Op.getOperand(0); | |||
6681 | SDValue Op1 = Op.getOperand(1); | |||
6682 | SDValue CC = Op.getOperand(2); | |||
6683 | EVT VT = Op.getValueType(); | |||
6684 | ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); | |||
6685 | SDLoc dl(Op); | |||
6686 | ||||
6687 | EVT CmpVT; | |||
6688 | if (ST->hasNEON()) | |||
6689 | CmpVT = Op0.getValueType().changeVectorElementTypeToInteger(); | |||
6690 | else { | |||
6691 | assert(ST->hasMVEIntegerOps() &&(static_cast <bool> (ST->hasMVEIntegerOps() && "No hardware support for integer vector comparison!") ? void (0) : __assert_fail ("ST->hasMVEIntegerOps() && \"No hardware support for integer vector comparison!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6692, __extension__ __PRETTY_FUNCTION__)) | |||
6692 | "No hardware support for integer vector comparison!")(static_cast <bool> (ST->hasMVEIntegerOps() && "No hardware support for integer vector comparison!") ? void (0) : __assert_fail ("ST->hasMVEIntegerOps() && \"No hardware support for integer vector comparison!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6692, __extension__ __PRETTY_FUNCTION__)); | |||
6693 | ||||
6694 | if (Op.getValueType().getVectorElementType() != MVT::i1) | |||
6695 | return SDValue(); | |||
6696 | ||||
6697 | // Make sure we expand floating point setcc to scalar if we do not have | |||
6698 | // mve.fp, so that we can handle them from there. | |||
6699 | if (Op0.getValueType().isFloatingPoint() && !ST->hasMVEFloatOps()) | |||
6700 | return SDValue(); | |||
6701 | ||||
6702 | CmpVT = VT; | |||
6703 | } | |||
6704 | ||||
6705 | if (Op0.getValueType().getVectorElementType() == MVT::i64 && | |||
6706 | (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) { | |||
6707 | // Special-case integer 64-bit equality comparisons. They aren't legal, | |||
6708 | // but they can be lowered with a few vector instructions. | |||
6709 | unsigned CmpElements = CmpVT.getVectorNumElements() * 2; | |||
6710 | EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, CmpElements); | |||
6711 | SDValue CastOp0 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op0); | |||
6712 | SDValue CastOp1 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op1); | |||
6713 | SDValue Cmp = DAG.getNode(ISD::SETCC, dl, SplitVT, CastOp0, CastOp1, | |||
6714 | DAG.getCondCode(ISD::SETEQ)); | |||
6715 | SDValue Reversed = DAG.getNode(ARMISD::VREV64, dl, SplitVT, Cmp); | |||
6716 | SDValue Merged = DAG.getNode(ISD::AND, dl, SplitVT, Cmp, Reversed); | |||
6717 | Merged = DAG.getNode(ISD::BITCAST, dl, CmpVT, Merged); | |||
6718 | if (SetCCOpcode == ISD::SETNE) | |||
6719 | Merged = DAG.getNOT(dl, Merged, CmpVT); | |||
6720 | Merged = DAG.getSExtOrTrunc(Merged, dl, VT); | |||
6721 | return Merged; | |||
6722 | } | |||
6723 | ||||
6724 | if (CmpVT.getVectorElementType() == MVT::i64) | |||
6725 | // 64-bit comparisons are not legal in general. | |||
6726 | return SDValue(); | |||
6727 | ||||
6728 | if (Op1.getValueType().isFloatingPoint()) { | |||
6729 | switch (SetCCOpcode) { | |||
6730 | default: llvm_unreachable("Illegal FP comparison")::llvm::llvm_unreachable_internal("Illegal FP comparison", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 6730); | |||
6731 | case ISD::SETUNE: | |||
6732 | case ISD::SETNE: | |||
6733 | if (ST->hasMVEFloatOps()) { | |||
6734 | Opc = ARMCC::NE; break; | |||
6735 | } else { | |||
6736 | Invert = true; LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
6737 | } | |||
6738 | case ISD::SETOEQ: | |||
6739 | case ISD::SETEQ: Opc = ARMCC::EQ; break; | |||
6740 | case ISD::SETOLT: | |||
6741 | case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
6742 | case ISD::SETOGT: | |||
6743 | case ISD::SETGT: Opc = ARMCC::GT; break; | |||
6744 | case ISD::SETOLE: | |||
6745 | case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
6746 | case ISD::SETOGE: | |||
6747 | case ISD::SETGE: Opc = ARMCC::GE; break; | |||
6748 | case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
6749 | case ISD::SETULE: Invert = true; Opc = ARMCC::GT; break; | |||
6750 | case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
6751 | case ISD::SETULT: Invert = true; Opc = ARMCC::GE; break; | |||
6752 | case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
6753 | case ISD::SETONE: { | |||
6754 | // Expand this to (OLT | OGT). | |||
6755 | SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0, | |||
6756 | DAG.getConstant(ARMCC::GT, dl, MVT::i32)); | |||
6757 | SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1, | |||
6758 | DAG.getConstant(ARMCC::GT, dl, MVT::i32)); | |||
6759 | SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1); | |||
6760 | if (Invert) | |||
6761 | Result = DAG.getNOT(dl, Result, VT); | |||
6762 | return Result; | |||
6763 | } | |||
6764 | case ISD::SETUO: Invert = true; LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
6765 | case ISD::SETO: { | |||
6766 | // Expand this to (OLT | OGE). | |||
6767 | SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0, | |||
6768 | DAG.getConstant(ARMCC::GT, dl, MVT::i32)); | |||
6769 | SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1, | |||
6770 | DAG.getConstant(ARMCC::GE, dl, MVT::i32)); | |||
6771 | SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1); | |||
6772 | if (Invert) | |||
6773 | Result = DAG.getNOT(dl, Result, VT); | |||
6774 | return Result; | |||
6775 | } | |||
6776 | } | |||
6777 | } else { | |||
6778 | // Integer comparisons. | |||
6779 | switch (SetCCOpcode) { | |||
6780 | default: llvm_unreachable("Illegal integer comparison")::llvm::llvm_unreachable_internal("Illegal integer comparison" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6780); | |||
6781 | case ISD::SETNE: | |||
6782 | if (ST->hasMVEIntegerOps()) { | |||
6783 | Opc = ARMCC::NE; break; | |||
6784 | } else { | |||
6785 | Invert = true; LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
6786 | } | |||
6787 | case ISD::SETEQ: Opc = ARMCC::EQ; break; | |||
6788 | case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
6789 | case ISD::SETGT: Opc = ARMCC::GT; break; | |||
6790 | case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
6791 | case ISD::SETGE: Opc = ARMCC::GE; break; | |||
6792 | case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
6793 | case ISD::SETUGT: Opc = ARMCC::HI; break; | |||
6794 | case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
6795 | case ISD::SETUGE: Opc = ARMCC::HS; break; | |||
6796 | } | |||
6797 | ||||
6798 | // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). | |||
6799 | if (ST->hasNEON() && Opc == ARMCC::EQ) { | |||
6800 | SDValue AndOp; | |||
6801 | if (ISD::isBuildVectorAllZeros(Op1.getNode())) | |||
6802 | AndOp = Op0; | |||
6803 | else if (ISD::isBuildVectorAllZeros(Op0.getNode())) | |||
6804 | AndOp = Op1; | |||
6805 | ||||
6806 | // Ignore bitconvert. | |||
6807 | if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) | |||
6808 | AndOp = AndOp.getOperand(0); | |||
6809 | ||||
6810 | if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { | |||
6811 | Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0)); | |||
6812 | Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1)); | |||
6813 | SDValue Result = DAG.getNode(ARMISD::VTST, dl, CmpVT, Op0, Op1); | |||
6814 | if (!Invert) | |||
6815 | Result = DAG.getNOT(dl, Result, VT); | |||
6816 | return Result; | |||
6817 | } | |||
6818 | } | |||
6819 | } | |||
6820 | ||||
6821 | if (Swap) | |||
6822 | std::swap(Op0, Op1); | |||
6823 | ||||
6824 | // If one of the operands is a constant vector zero, attempt to fold the | |||
6825 | // comparison to a specialized compare-against-zero form. | |||
6826 | SDValue SingleOp; | |||
6827 | if (ISD::isBuildVectorAllZeros(Op1.getNode())) | |||
6828 | SingleOp = Op0; | |||
6829 | else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { | |||
6830 | if (Opc == ARMCC::GE) | |||
6831 | Opc = ARMCC::LE; | |||
6832 | else if (Opc == ARMCC::GT) | |||
6833 | Opc = ARMCC::LT; | |||
6834 | SingleOp = Op1; | |||
6835 | } | |||
6836 | ||||
6837 | SDValue Result; | |||
6838 | if (SingleOp.getNode()) { | |||
6839 | Result = DAG.getNode(ARMISD::VCMPZ, dl, CmpVT, SingleOp, | |||
6840 | DAG.getConstant(Opc, dl, MVT::i32)); | |||
6841 | } else { | |||
6842 | Result = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1, | |||
6843 | DAG.getConstant(Opc, dl, MVT::i32)); | |||
6844 | } | |||
6845 | ||||
6846 | Result = DAG.getSExtOrTrunc(Result, dl, VT); | |||
6847 | ||||
6848 | if (Invert) | |||
6849 | Result = DAG.getNOT(dl, Result, VT); | |||
6850 | ||||
6851 | return Result; | |||
6852 | } | |||
6853 | ||||
6854 | static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) { | |||
6855 | SDValue LHS = Op.getOperand(0); | |||
6856 | SDValue RHS = Op.getOperand(1); | |||
6857 | SDValue Carry = Op.getOperand(2); | |||
6858 | SDValue Cond = Op.getOperand(3); | |||
6859 | SDLoc DL(Op); | |||
6860 | ||||
6861 | assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.")(static_cast <bool> (LHS.getSimpleValueType().isInteger () && "SETCCCARRY is integer only.") ? void (0) : __assert_fail ("LHS.getSimpleValueType().isInteger() && \"SETCCCARRY is integer only.\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6861, __extension__ __PRETTY_FUNCTION__)); | |||
6862 | ||||
6863 | // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we | |||
6864 | // have to invert the carry first. | |||
6865 | Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, | |||
6866 | DAG.getConstant(1, DL, MVT::i32), Carry); | |||
6867 | // This converts the boolean value carry into the carry flag. | |||
6868 | Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); | |||
6869 | ||||
6870 | SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); | |||
6871 | SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry); | |||
6872 | ||||
6873 | SDValue FVal = DAG.getConstant(0, DL, MVT::i32); | |||
6874 | SDValue TVal = DAG.getConstant(1, DL, MVT::i32); | |||
6875 | SDValue ARMcc = DAG.getConstant( | |||
6876 | IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32); | |||
6877 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); | |||
6878 | SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR, | |||
6879 | Cmp.getValue(1), SDValue()); | |||
6880 | return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc, | |||
6881 | CCR, Chain.getValue(1)); | |||
6882 | } | |||
6883 | ||||
6884 | /// isVMOVModifiedImm - Check if the specified splat value corresponds to a | |||
6885 | /// valid vector constant for a NEON or MVE instruction with a "modified | |||
6886 | /// immediate" operand (e.g., VMOV). If so, return the encoded value. | |||
6887 | static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, | |||
6888 | unsigned SplatBitSize, SelectionDAG &DAG, | |||
6889 | const SDLoc &dl, EVT &VT, EVT VectorVT, | |||
6890 | VMOVModImmType type) { | |||
6891 | unsigned OpCmode, Imm; | |||
6892 | bool is128Bits = VectorVT.is128BitVector(); | |||
6893 | ||||
6894 | // SplatBitSize is set to the smallest size that splats the vector, so a | |||
6895 | // zero vector will always have SplatBitSize == 8. However, NEON modified | |||
6896 | // immediate instructions others than VMOV do not support the 8-bit encoding | |||
6897 | // of a zero vector, and the default encoding of zero is supposed to be the | |||
6898 | // 32-bit version. | |||
6899 | if (SplatBits == 0) | |||
6900 | SplatBitSize = 32; | |||
6901 | ||||
6902 | switch (SplatBitSize) { | |||
6903 | case 8: | |||
6904 | if (type != VMOVModImm) | |||
6905 | return SDValue(); | |||
6906 | // Any 1-byte value is OK. Op=0, Cmode=1110. | |||
6907 | assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big")(static_cast <bool> ((SplatBits & ~0xff) == 0 && "one byte splat value is too big") ? void (0) : __assert_fail ("(SplatBits & ~0xff) == 0 && \"one byte splat value is too big\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 6907, __extension__ __PRETTY_FUNCTION__)); | |||
6908 | OpCmode = 0xe; | |||
6909 | Imm = SplatBits; | |||
6910 | VT = is128Bits ? MVT::v16i8 : MVT::v8i8; | |||
6911 | break; | |||
6912 | ||||
6913 | case 16: | |||
6914 | // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. | |||
6915 | VT = is128Bits ? MVT::v8i16 : MVT::v4i16; | |||
6916 | if ((SplatBits & ~0xff) == 0) { | |||
6917 | // Value = 0x00nn: Op=x, Cmode=100x. | |||
6918 | OpCmode = 0x8; | |||
6919 | Imm = SplatBits; | |||
6920 | break; | |||
6921 | } | |||
6922 | if ((SplatBits & ~0xff00) == 0) { | |||
6923 | // Value = 0xnn00: Op=x, Cmode=101x. | |||
6924 | OpCmode = 0xa; | |||
6925 | Imm = SplatBits >> 8; | |||
6926 | break; | |||
6927 | } | |||
6928 | return SDValue(); | |||
6929 | ||||
6930 | case 32: | |||
6931 | // NEON's 32-bit VMOV supports splat values where: | |||
6932 | // * only one byte is nonzero, or | |||
6933 | // * the least significant byte is 0xff and the second byte is nonzero, or | |||
6934 | // * the least significant 2 bytes are 0xff and the third is nonzero. | |||
6935 | VT = is128Bits ? MVT::v4i32 : MVT::v2i32; | |||
6936 | if ((SplatBits & ~0xff) == 0) { | |||
6937 | // Value = 0x000000nn: Op=x, Cmode=000x. | |||
6938 | OpCmode = 0; | |||
6939 | Imm = SplatBits; | |||
6940 | break; | |||
6941 | } | |||
6942 | if ((SplatBits & ~0xff00) == 0) { | |||
6943 | // Value = 0x0000nn00: Op=x, Cmode=001x. | |||
6944 | OpCmode = 0x2; | |||
6945 | Imm = SplatBits >> 8; | |||
6946 | break; | |||
6947 | } | |||
6948 | if ((SplatBits & ~0xff0000) == 0) { | |||
6949 | // Value = 0x00nn0000: Op=x, Cmode=010x. | |||
6950 | OpCmode = 0x4; | |||
6951 | Imm = SplatBits >> 16; | |||
6952 | break; | |||
6953 | } | |||
6954 | if ((SplatBits & ~0xff000000) == 0) { | |||
6955 | // Value = 0xnn000000: Op=x, Cmode=011x. | |||
6956 | OpCmode = 0x6; | |||
6957 | Imm = SplatBits >> 24; | |||
6958 | break; | |||
6959 | } | |||
6960 | ||||
6961 | // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC | |||
6962 | if (type == OtherModImm) return SDValue(); | |||
6963 | ||||
6964 | if ((SplatBits & ~0xffff) == 0 && | |||
6965 | ((SplatBits | SplatUndef) & 0xff) == 0xff) { | |||
6966 | // Value = 0x0000nnff: Op=x, Cmode=1100. | |||
6967 | OpCmode = 0xc; | |||
6968 | Imm = SplatBits >> 8; | |||
6969 | break; | |||
6970 | } | |||
6971 | ||||
6972 | // cmode == 0b1101 is not supported for MVE VMVN | |||
6973 | if (type == MVEVMVNModImm) | |||
6974 | return SDValue(); | |||
6975 | ||||
6976 | if ((SplatBits & ~0xffffff) == 0 && | |||
6977 | ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { | |||
6978 | // Value = 0x00nnffff: Op=x, Cmode=1101. | |||
6979 | OpCmode = 0xd; | |||
6980 | Imm = SplatBits >> 16; | |||
6981 | break; | |||
6982 | } | |||
6983 | ||||
6984 | // Note: there are a few 32-bit splat values (specifically: 00ffff00, | |||
6985 | // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not | |||
6986 | // VMOV.I32. A (very) minor optimization would be to replicate the value | |||
6987 | // and fall through here to test for a valid 64-bit splat. But, then the | |||
6988 | // caller would also need to check and handle the change in size. | |||
6989 | return SDValue(); | |||
6990 | ||||
6991 | case 64: { | |||
6992 | if (type != VMOVModImm) | |||
6993 | return SDValue(); | |||
6994 | // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. | |||
6995 | uint64_t BitMask = 0xff; | |||
6996 | unsigned ImmMask = 1; | |||
6997 | Imm = 0; | |||
6998 | for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { | |||
6999 | if (((SplatBits | SplatUndef) & BitMask) == BitMask) { | |||
7000 | Imm |= ImmMask; | |||
7001 | } else if ((SplatBits & BitMask) != 0) { | |||
7002 | return SDValue(); | |||
7003 | } | |||
7004 | BitMask <<= 8; | |||
7005 | ImmMask <<= 1; | |||
7006 | } | |||
7007 | ||||
7008 | if (DAG.getDataLayout().isBigEndian()) { | |||
7009 | // Reverse the order of elements within the vector. | |||
7010 | unsigned BytesPerElem = VectorVT.getScalarSizeInBits() / 8; | |||
7011 | unsigned Mask = (1 << BytesPerElem) - 1; | |||
7012 | unsigned NumElems = 8 / BytesPerElem; | |||
7013 | unsigned NewImm = 0; | |||
7014 | for (unsigned ElemNum = 0; ElemNum < NumElems; ++ElemNum) { | |||
7015 | unsigned Elem = ((Imm >> ElemNum * BytesPerElem) & Mask); | |||
7016 | NewImm |= Elem << (NumElems - ElemNum - 1) * BytesPerElem; | |||
7017 | } | |||
7018 | Imm = NewImm; | |||
7019 | } | |||
7020 | ||||
7021 | // Op=1, Cmode=1110. | |||
7022 | OpCmode = 0x1e; | |||
7023 | VT = is128Bits ? MVT::v2i64 : MVT::v1i64; | |||
7024 | break; | |||
7025 | } | |||
7026 | ||||
7027 | default: | |||
7028 | llvm_unreachable("unexpected size for isVMOVModifiedImm")::llvm::llvm_unreachable_internal("unexpected size for isVMOVModifiedImm" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 7028); | |||
7029 | } | |||
7030 | ||||
7031 | unsigned EncodedVal = ARM_AM::createVMOVModImm(OpCmode, Imm); | |||
7032 | return DAG.getTargetConstant(EncodedVal, dl, MVT::i32); | |||
7033 | } | |||
7034 | ||||
7035 | SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, | |||
7036 | const ARMSubtarget *ST) const { | |||
7037 | EVT VT = Op.getValueType(); | |||
7038 | bool IsDouble = (VT == MVT::f64); | |||
7039 | ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op); | |||
7040 | const APFloat &FPVal = CFP->getValueAPF(); | |||
7041 | ||||
7042 | // Prevent floating-point constants from using literal loads | |||
7043 | // when execute-only is enabled. | |||
7044 | if (ST->genExecuteOnly()) { | |||
7045 | // If we can represent the constant as an immediate, don't lower it | |||
7046 | if (isFPImmLegal(FPVal, VT)) | |||
7047 | return Op; | |||
7048 | // Otherwise, construct as integer, and move to float register | |||
7049 | APInt INTVal = FPVal.bitcastToAPInt(); | |||
7050 | SDLoc DL(CFP); | |||
7051 | switch (VT.getSimpleVT().SimpleTy) { | |||
7052 | default: | |||
7053 | llvm_unreachable("Unknown floating point type!")::llvm::llvm_unreachable_internal("Unknown floating point type!" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 7053); | |||
7054 | break; | |||
7055 | case MVT::f64: { | |||
7056 | SDValue Lo = DAG.getConstant(INTVal.trunc(32), DL, MVT::i32); | |||
7057 | SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32); | |||
7058 | return DAG.getNode(ARMISD::VMOVDRR, DL, MVT::f64, Lo, Hi); | |||
7059 | } | |||
7060 | case MVT::f32: | |||
7061 | return DAG.getNode(ARMISD::VMOVSR, DL, VT, | |||
7062 | DAG.getConstant(INTVal, DL, MVT::i32)); | |||
7063 | } | |||
7064 | } | |||
7065 | ||||
7066 | if (!ST->hasVFP3Base()) | |||
7067 | return SDValue(); | |||
7068 | ||||
7069 | // Use the default (constant pool) lowering for double constants when we have | |||
7070 | // an SP-only FPU | |||
7071 | if (IsDouble && !Subtarget->hasFP64()) | |||
7072 | return SDValue(); | |||
7073 | ||||
7074 | // Try splatting with a VMOV.f32... | |||
7075 | int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal); | |||
7076 | ||||
7077 | if (ImmVal != -1) { | |||
7078 | if (IsDouble || !ST->useNEONForSinglePrecisionFP()) { | |||
7079 | // We have code in place to select a valid ConstantFP already, no need to | |||
7080 | // do any mangling. | |||
7081 | return Op; | |||
7082 | } | |||
7083 | ||||
7084 | // It's a float and we are trying to use NEON operations where | |||
7085 | // possible. Lower it to a splat followed by an extract. | |||
7086 | SDLoc DL(Op); | |||
7087 | SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32); | |||
7088 | SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32, | |||
7089 | NewVal); | |||
7090 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant, | |||
7091 | DAG.getConstant(0, DL, MVT::i32)); | |||
7092 | } | |||
7093 | ||||
7094 | // The rest of our options are NEON only, make sure that's allowed before | |||
7095 | // proceeding.. | |||
7096 | if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP())) | |||
7097 | return SDValue(); | |||
7098 | ||||
7099 | EVT VMovVT; | |||
7100 | uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue(); | |||
7101 | ||||
7102 | // It wouldn't really be worth bothering for doubles except for one very | |||
7103 | // important value, which does happen to match: 0.0. So make sure we don't do | |||
7104 | // anything stupid. | |||
7105 | if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32)) | |||
7106 | return SDValue(); | |||
7107 | ||||
7108 | // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too). | |||
7109 | SDValue NewVal = isVMOVModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), | |||
7110 | VMovVT, VT, VMOVModImm); | |||
7111 | if (NewVal != SDValue()) { | |||
7112 | SDLoc DL(Op); | |||
7113 | SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT, | |||
7114 | NewVal); | |||
7115 | if (IsDouble) | |||
7116 | return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); | |||
7117 | ||||
7118 | // It's a float: cast and extract a vector element. | |||
7119 | SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, | |||
7120 | VecConstant); | |||
7121 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, | |||
7122 | DAG.getConstant(0, DL, MVT::i32)); | |||
7123 | } | |||
7124 | ||||
7125 | // Finally, try a VMVN.i32 | |||
7126 | NewVal = isVMOVModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT, | |||
7127 | VT, VMVNModImm); | |||
7128 | if (NewVal != SDValue()) { | |||
7129 | SDLoc DL(Op); | |||
7130 | SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal); | |||
7131 | ||||
7132 | if (IsDouble) | |||
7133 | return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); | |||
7134 | ||||
7135 | // It's a float: cast and extract a vector element. | |||
7136 | SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, | |||
7137 | VecConstant); | |||
7138 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, | |||
7139 | DAG.getConstant(0, DL, MVT::i32)); | |||
7140 | } | |||
7141 | ||||
7142 | return SDValue(); | |||
7143 | } | |||
7144 | ||||
7145 | // check if an VEXT instruction can handle the shuffle mask when the | |||
7146 | // vector sources of the shuffle are the same. | |||
7147 | static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { | |||
7148 | unsigned NumElts = VT.getVectorNumElements(); | |||
7149 | ||||
7150 | // Assume that the first shuffle index is not UNDEF. Fail if it is. | |||
7151 | if (M[0] < 0) | |||
7152 | return false; | |||
7153 | ||||
7154 | Imm = M[0]; | |||
7155 | ||||
7156 | // If this is a VEXT shuffle, the immediate value is the index of the first | |||
7157 | // element. The other shuffle indices must be the successive elements after | |||
7158 | // the first one. | |||
7159 | unsigned ExpectedElt = Imm; | |||
7160 | for (unsigned i = 1; i < NumElts; ++i) { | |||
7161 | // Increment the expected index. If it wraps around, just follow it | |||
7162 | // back to index zero and keep going. | |||
7163 | ++ExpectedElt; | |||
7164 | if (ExpectedElt == NumElts) | |||
7165 | ExpectedElt = 0; | |||
7166 | ||||
7167 | if (M[i] < 0) continue; // ignore UNDEF indices | |||
7168 | if (ExpectedElt != static_cast<unsigned>(M[i])) | |||
7169 | return false; | |||
7170 | } | |||
7171 | ||||
7172 | return true; | |||
7173 | } | |||
7174 | ||||
7175 | static bool isVEXTMask(ArrayRef<int> M, EVT VT, | |||
7176 | bool &ReverseVEXT, unsigned &Imm) { | |||
7177 | unsigned NumElts = VT.getVectorNumElements(); | |||
7178 | ReverseVEXT = false; | |||
7179 | ||||
7180 | // Assume that the first shuffle index is not UNDEF. Fail if it is. | |||
7181 | if (M[0] < 0) | |||
7182 | return false; | |||
7183 | ||||
7184 | Imm = M[0]; | |||
7185 | ||||
7186 | // If this is a VEXT shuffle, the immediate value is the index of the first | |||
7187 | // element. The other shuffle indices must be the successive elements after | |||
7188 | // the first one. | |||
7189 | unsigned ExpectedElt = Imm; | |||
7190 | for (unsigned i = 1; i < NumElts; ++i) { | |||
7191 | // Increment the expected index. If it wraps around, it may still be | |||
7192 | // a VEXT but the source vectors must be swapped. | |||
7193 | ExpectedElt += 1; | |||
7194 | if (ExpectedElt == NumElts * 2) { | |||
7195 | ExpectedElt = 0; | |||
7196 | ReverseVEXT = true; | |||
7197 | } | |||
7198 | ||||
7199 | if (M[i] < 0) continue; // ignore UNDEF indices | |||
7200 | if (ExpectedElt != static_cast<unsigned>(M[i])) | |||
7201 | return false; | |||
7202 | } | |||
7203 | ||||
7204 | // Adjust the index value if the source operands will be swapped. | |||
7205 | if (ReverseVEXT) | |||
7206 | Imm -= NumElts; | |||
7207 | ||||
7208 | return true; | |||
7209 | } | |||
7210 | ||||
7211 | static bool isVTBLMask(ArrayRef<int> M, EVT VT) { | |||
7212 | // We can handle <8 x i8> vector shuffles. If the index in the mask is out of | |||
7213 | // range, then 0 is placed into the resulting vector. So pretty much any mask | |||
7214 | // of 8 elements can work here. | |||
7215 | return VT == MVT::v8i8 && M.size() == 8; | |||
7216 | } | |||
7217 | ||||
7218 | static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask, | |||
7219 | unsigned Index) { | |||
7220 | if (Mask.size() == Elements * 2) | |||
7221 | return Index / Elements; | |||
7222 | return Mask[Index] == 0 ? 0 : 1; | |||
7223 | } | |||
7224 | ||||
7225 | // Checks whether the shuffle mask represents a vector transpose (VTRN) by | |||
7226 | // checking that pairs of elements in the shuffle mask represent the same index | |||
7227 | // in each vector, incrementing the expected index by 2 at each step. | |||
7228 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6] | |||
7229 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g} | |||
7230 | // v2={e,f,g,h} | |||
7231 | // WhichResult gives the offset for each element in the mask based on which | |||
7232 | // of the two results it belongs to. | |||
7233 | // | |||
7234 | // The transpose can be represented either as: | |||
7235 | // result1 = shufflevector v1, v2, result1_shuffle_mask | |||
7236 | // result2 = shufflevector v1, v2, result2_shuffle_mask | |||
7237 | // where v1/v2 and the shuffle masks have the same number of elements | |||
7238 | // (here WhichResult (see below) indicates which result is being checked) | |||
7239 | // | |||
7240 | // or as: | |||
7241 | // results = shufflevector v1, v2, shuffle_mask | |||
7242 | // where both results are returned in one vector and the shuffle mask has twice | |||
7243 | // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we | |||
7244 | // want to check the low half and high half of the shuffle mask as if it were | |||
7245 | // the other case | |||
7246 | static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { | |||
7247 | unsigned EltSz = VT.getScalarSizeInBits(); | |||
7248 | if (EltSz == 64) | |||
7249 | return false; | |||
7250 | ||||
7251 | unsigned NumElts = VT.getVectorNumElements(); | |||
7252 | if (M.size() != NumElts && M.size() != NumElts*2) | |||
7253 | return false; | |||
7254 | ||||
7255 | // If the mask is twice as long as the input vector then we need to check the | |||
7256 | // upper and lower parts of the mask with a matching value for WhichResult | |||
7257 | // FIXME: A mask with only even values will be rejected in case the first | |||
7258 | // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only | |||
7259 | // M[0] is used to determine WhichResult | |||
7260 | for (unsigned i = 0; i < M.size(); i += NumElts) { | |||
7261 | WhichResult = SelectPairHalf(NumElts, M, i); | |||
7262 | for (unsigned j = 0; j < NumElts; j += 2) { | |||
7263 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || | |||
7264 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult)) | |||
7265 | return false; | |||
7266 | } | |||
7267 | } | |||
7268 | ||||
7269 | if (M.size() == NumElts*2) | |||
7270 | WhichResult = 0; | |||
7271 | ||||
7272 | return true; | |||
7273 | } | |||
7274 | ||||
7275 | /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of | |||
7276 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". | |||
7277 | /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. | |||
7278 | static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ | |||
7279 | unsigned EltSz = VT.getScalarSizeInBits(); | |||
7280 | if (EltSz == 64) | |||
7281 | return false; | |||
7282 | ||||
7283 | unsigned NumElts = VT.getVectorNumElements(); | |||
7284 | if (M.size() != NumElts && M.size() != NumElts*2) | |||
7285 | return false; | |||
7286 | ||||
7287 | for (unsigned i = 0; i < M.size(); i += NumElts) { | |||
7288 | WhichResult = SelectPairHalf(NumElts, M, i); | |||
7289 | for (unsigned j = 0; j < NumElts; j += 2) { | |||
7290 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || | |||
7291 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult)) | |||
7292 | return false; | |||
7293 | } | |||
7294 | } | |||
7295 | ||||
7296 | if (M.size() == NumElts*2) | |||
7297 | WhichResult = 0; | |||
7298 | ||||
7299 | return true; | |||
7300 | } | |||
7301 | ||||
7302 | // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking | |||
7303 | // that the mask elements are either all even and in steps of size 2 or all odd | |||
7304 | // and in steps of size 2. | |||
7305 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6] | |||
7306 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g} | |||
7307 | // v2={e,f,g,h} | |||
7308 | // Requires similar checks to that of isVTRNMask with | |||
7309 | // respect the how results are returned. | |||
7310 | static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { | |||
7311 | unsigned EltSz = VT.getScalarSizeInBits(); | |||
7312 | if (EltSz == 64) | |||
7313 | return false; | |||
7314 | ||||
7315 | unsigned NumElts = VT.getVectorNumElements(); | |||
7316 | if (M.size() != NumElts && M.size() != NumElts*2) | |||
7317 | return false; | |||
7318 | ||||
7319 | for (unsigned i = 0; i < M.size(); i += NumElts) { | |||
7320 | WhichResult = SelectPairHalf(NumElts, M, i); | |||
7321 | for (unsigned j = 0; j < NumElts; ++j) { | |||
7322 | if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult) | |||
7323 | return false; | |||
7324 | } | |||
7325 | } | |||
7326 | ||||
7327 | if (M.size() == NumElts*2) | |||
7328 | WhichResult = 0; | |||
7329 | ||||
7330 | // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. | |||
7331 | if (VT.is64BitVector() && EltSz == 32) | |||
7332 | return false; | |||
7333 | ||||
7334 | return true; | |||
7335 | } | |||
7336 | ||||
7337 | /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of | |||
7338 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". | |||
7339 | /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, | |||
7340 | static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ | |||
7341 | unsigned EltSz = VT.getScalarSizeInBits(); | |||
7342 | if (EltSz == 64) | |||
7343 | return false; | |||
7344 | ||||
7345 | unsigned NumElts = VT.getVectorNumElements(); | |||
7346 | if (M.size() != NumElts && M.size() != NumElts*2) | |||
7347 | return false; | |||
7348 | ||||
7349 | unsigned Half = NumElts / 2; | |||
7350 | for (unsigned i = 0; i < M.size(); i += NumElts) { | |||
7351 | WhichResult = SelectPairHalf(NumElts, M, i); | |||
7352 | for (unsigned j = 0; j < NumElts; j += Half) { | |||
7353 | unsigned Idx = WhichResult; | |||
7354 | for (unsigned k = 0; k < Half; ++k) { | |||
7355 | int MIdx = M[i + j + k]; | |||
7356 | if (MIdx >= 0 && (unsigned) MIdx != Idx) | |||
7357 | return false; | |||
7358 | Idx += 2; | |||
7359 | } | |||
7360 | } | |||
7361 | } | |||
7362 | ||||
7363 | if (M.size() == NumElts*2) | |||
7364 | WhichResult = 0; | |||
7365 | ||||
7366 | // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. | |||
7367 | if (VT.is64BitVector() && EltSz == 32) | |||
7368 | return false; | |||
7369 | ||||
7370 | return true; | |||
7371 | } | |||
7372 | ||||
7373 | // Checks whether the shuffle mask represents a vector zip (VZIP) by checking | |||
7374 | // that pairs of elements of the shufflemask represent the same index in each | |||
7375 | // vector incrementing sequentially through the vectors. | |||
7376 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5] | |||
7377 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f} | |||
7378 | // v2={e,f,g,h} | |||
7379 | // Requires similar checks to that of isVTRNMask with respect the how results | |||
7380 | // are returned. | |||
7381 | static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { | |||
7382 | unsigned EltSz = VT.getScalarSizeInBits(); | |||
7383 | if (EltSz == 64) | |||
7384 | return false; | |||
7385 | ||||
7386 | unsigned NumElts = VT.getVectorNumElements(); | |||
7387 | if (M.size() != NumElts && M.size() != NumElts*2) | |||
7388 | return false; | |||
7389 | ||||
7390 | for (unsigned i = 0; i < M.size(); i += NumElts) { | |||
7391 | WhichResult = SelectPairHalf(NumElts, M, i); | |||
7392 | unsigned Idx = WhichResult * NumElts / 2; | |||
7393 | for (unsigned j = 0; j < NumElts; j += 2) { | |||
7394 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || | |||
7395 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts)) | |||
7396 | return false; | |||
7397 | Idx += 1; | |||
7398 | } | |||
7399 | } | |||
7400 | ||||
7401 | if (M.size() == NumElts*2) | |||
7402 | WhichResult = 0; | |||
7403 | ||||
7404 | // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. | |||
7405 | if (VT.is64BitVector() && EltSz == 32) | |||
7406 | return false; | |||
7407 | ||||
7408 | return true; | |||
7409 | } | |||
7410 | ||||
7411 | /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of | |||
7412 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". | |||
7413 | /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. | |||
7414 | static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ | |||
7415 | unsigned EltSz = VT.getScalarSizeInBits(); | |||
7416 | if (EltSz == 64) | |||
7417 | return false; | |||
7418 | ||||
7419 | unsigned NumElts = VT.getVectorNumElements(); | |||
7420 | if (M.size() != NumElts && M.size() != NumElts*2) | |||
7421 | return false; | |||
7422 | ||||
7423 | for (unsigned i = 0; i < M.size(); i += NumElts) { | |||
7424 | WhichResult = SelectPairHalf(NumElts, M, i); | |||
7425 | unsigned Idx = WhichResult * NumElts / 2; | |||
7426 | for (unsigned j = 0; j < NumElts; j += 2) { | |||
7427 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || | |||
7428 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx)) | |||
7429 | return false; | |||
7430 | Idx += 1; | |||
7431 | } | |||
7432 | } | |||
7433 | ||||
7434 | if (M.size() == NumElts*2) | |||
7435 | WhichResult = 0; | |||
7436 | ||||
7437 | // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. | |||
7438 | if (VT.is64BitVector() && EltSz == 32) | |||
7439 | return false; | |||
7440 | ||||
7441 | return true; | |||
7442 | } | |||
7443 | ||||
7444 | /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), | |||
7445 | /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't. | |||
7446 | static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT, | |||
7447 | unsigned &WhichResult, | |||
7448 | bool &isV_UNDEF) { | |||
7449 | isV_UNDEF = false; | |||
7450 | if (isVTRNMask(ShuffleMask, VT, WhichResult)) | |||
7451 | return ARMISD::VTRN; | |||
7452 | if (isVUZPMask(ShuffleMask, VT, WhichResult)) | |||
7453 | return ARMISD::VUZP; | |||
7454 | if (isVZIPMask(ShuffleMask, VT, WhichResult)) | |||
7455 | return ARMISD::VZIP; | |||
7456 | ||||
7457 | isV_UNDEF = true; | |||
7458 | if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) | |||
7459 | return ARMISD::VTRN; | |||
7460 | if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) | |||
7461 | return ARMISD::VUZP; | |||
7462 | if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) | |||
7463 | return ARMISD::VZIP; | |||
7464 | ||||
7465 | return 0; | |||
7466 | } | |||
7467 | ||||
7468 | /// \return true if this is a reverse operation on an vector. | |||
7469 | static bool isReverseMask(ArrayRef<int> M, EVT VT) { | |||
7470 | unsigned NumElts = VT.getVectorNumElements(); | |||
7471 | // Make sure the mask has the right size. | |||
7472 | if (NumElts != M.size()) | |||
7473 | return false; | |||
7474 | ||||
7475 | // Look for <15, ..., 3, -1, 1, 0>. | |||
7476 | for (unsigned i = 0; i != NumElts; ++i) | |||
7477 | if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i)) | |||
7478 | return false; | |||
7479 | ||||
7480 | return true; | |||
7481 | } | |||
7482 | ||||
7483 | static bool isVMOVNMask(ArrayRef<int> M, EVT VT, bool Top, bool SingleSource) { | |||
7484 | unsigned NumElts = VT.getVectorNumElements(); | |||
7485 | // Make sure the mask has the right size. | |||
7486 | if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8)) | |||
7487 | return false; | |||
7488 | ||||
7489 | // If Top | |||
7490 | // Look for <0, N, 2, N+2, 4, N+4, ..>. | |||
7491 | // This inserts Input2 into Input1 | |||
7492 | // else if not Top | |||
7493 | // Look for <0, N+1, 2, N+3, 4, N+5, ..> | |||
7494 | // This inserts Input1 into Input2 | |||
7495 | unsigned Offset = Top ? 0 : 1; | |||
7496 | unsigned N = SingleSource ? 0 : NumElts; | |||
7497 | for (unsigned i = 0; i < NumElts; i += 2) { | |||
7498 | if (M[i] >= 0 && M[i] != (int)i) | |||
7499 | return false; | |||
7500 | if (M[i + 1] >= 0 && M[i + 1] != (int)(N + i + Offset)) | |||
7501 | return false; | |||
7502 | } | |||
7503 | ||||
7504 | return true; | |||
7505 | } | |||
7506 | ||||
7507 | static bool isVMOVNTruncMask(ArrayRef<int> M, EVT ToVT, bool rev) { | |||
7508 | unsigned NumElts = ToVT.getVectorNumElements(); | |||
7509 | if (NumElts != M.size()) | |||
7510 | return false; | |||
7511 | ||||
7512 | // Test if the Trunc can be convertable to a VMOVN with this shuffle. We are | |||
7513 | // looking for patterns of: | |||
7514 | // !rev: 0 N/2 1 N/2+1 2 N/2+2 ... | |||
7515 | // rev: N/2 0 N/2+1 1 N/2+2 2 ... | |||
7516 | ||||
7517 | unsigned Off0 = rev ? NumElts / 2 : 0; | |||
7518 | unsigned Off1 = rev ? 0 : NumElts / 2; | |||
7519 | for (unsigned i = 0; i < NumElts; i += 2) { | |||
7520 | if (M[i] >= 0 && M[i] != (int)(Off0 + i / 2)) | |||
7521 | return false; | |||
7522 | if (M[i + 1] >= 0 && M[i + 1] != (int)(Off1 + i / 2)) | |||
7523 | return false; | |||
7524 | } | |||
7525 | ||||
7526 | return true; | |||
7527 | } | |||
7528 | ||||
7529 | // Reconstruct an MVE VCVT from a BuildVector of scalar fptrunc, all extracted | |||
7530 | // from a pair of inputs. For example: | |||
7531 | // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0), | |||
7532 | // FP_ROUND(EXTRACT_ELT(Y, 0), | |||
7533 | // FP_ROUND(EXTRACT_ELT(X, 1), | |||
7534 | // FP_ROUND(EXTRACT_ELT(Y, 1), ...) | |||
7535 | static SDValue LowerBuildVectorOfFPTrunc(SDValue BV, SelectionDAG &DAG, | |||
7536 | const ARMSubtarget *ST) { | |||
7537 | assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!")(static_cast <bool> (BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!") ? void (0) : __assert_fail ("BV.getOpcode() == ISD::BUILD_VECTOR && \"Unknown opcode!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 7537, __extension__ __PRETTY_FUNCTION__)); | |||
7538 | if (!ST->hasMVEFloatOps()) | |||
7539 | return SDValue(); | |||
7540 | ||||
7541 | SDLoc dl(BV); | |||
7542 | EVT VT = BV.getValueType(); | |||
7543 | if (VT != MVT::v8f16) | |||
7544 | return SDValue(); | |||
7545 | ||||
7546 | // We are looking for a buildvector of fptrunc elements, where all the | |||
7547 | // elements are interleavingly extracted from two sources. Check the first two | |||
7548 | // items are valid enough and extract some info from them (they are checked | |||
7549 | // properly in the loop below). | |||
7550 | if (BV.getOperand(0).getOpcode() != ISD::FP_ROUND || | |||
7551 | BV.getOperand(0).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
7552 | BV.getOperand(0).getOperand(0).getConstantOperandVal(1) != 0) | |||
7553 | return SDValue(); | |||
7554 | if (BV.getOperand(1).getOpcode() != ISD::FP_ROUND || | |||
7555 | BV.getOperand(1).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
7556 | BV.getOperand(1).getOperand(0).getConstantOperandVal(1) != 0) | |||
7557 | return SDValue(); | |||
7558 | SDValue Op0 = BV.getOperand(0).getOperand(0).getOperand(0); | |||
7559 | SDValue Op1 = BV.getOperand(1).getOperand(0).getOperand(0); | |||
7560 | if (Op0.getValueType() != MVT::v4f32 || Op1.getValueType() != MVT::v4f32) | |||
7561 | return SDValue(); | |||
7562 | ||||
7563 | // Check all the values in the BuildVector line up with our expectations. | |||
7564 | for (unsigned i = 1; i < 4; i++) { | |||
7565 | auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) { | |||
7566 | return Trunc.getOpcode() == ISD::FP_ROUND && | |||
7567 | Trunc.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
7568 | Trunc.getOperand(0).getOperand(0) == Op && | |||
7569 | Trunc.getOperand(0).getConstantOperandVal(1) == Idx; | |||
7570 | }; | |||
7571 | if (!Check(BV.getOperand(i * 2 + 0), Op0, i)) | |||
7572 | return SDValue(); | |||
7573 | if (!Check(BV.getOperand(i * 2 + 1), Op1, i)) | |||
7574 | return SDValue(); | |||
7575 | } | |||
7576 | ||||
7577 | SDValue N1 = DAG.getNode(ARMISD::VCVTN, dl, VT, DAG.getUNDEF(VT), Op0, | |||
7578 | DAG.getConstant(0, dl, MVT::i32)); | |||
7579 | return DAG.getNode(ARMISD::VCVTN, dl, VT, N1, Op1, | |||
7580 | DAG.getConstant(1, dl, MVT::i32)); | |||
7581 | } | |||
7582 | ||||
7583 | // Reconstruct an MVE VCVT from a BuildVector of scalar fpext, all extracted | |||
7584 | // from a single input on alternating lanes. For example: | |||
7585 | // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0), | |||
7586 | // FP_ROUND(EXTRACT_ELT(X, 2), | |||
7587 | // FP_ROUND(EXTRACT_ELT(X, 4), ...) | |||
7588 | static SDValue LowerBuildVectorOfFPExt(SDValue BV, SelectionDAG &DAG, | |||
7589 | const ARMSubtarget *ST) { | |||
7590 | assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!")(static_cast <bool> (BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!") ? void (0) : __assert_fail ("BV.getOpcode() == ISD::BUILD_VECTOR && \"Unknown opcode!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 7590, __extension__ __PRETTY_FUNCTION__)); | |||
7591 | if (!ST->hasMVEFloatOps()) | |||
7592 | return SDValue(); | |||
7593 | ||||
7594 | SDLoc dl(BV); | |||
7595 | EVT VT = BV.getValueType(); | |||
7596 | if (VT != MVT::v4f32) | |||
7597 | return SDValue(); | |||
7598 | ||||
7599 | // We are looking for a buildvector of fptext elements, where all the | |||
7600 | // elements are alternating lanes from a single source. For example <0,2,4,6> | |||
7601 | // or <1,3,5,7>. Check the first two items are valid enough and extract some | |||
7602 | // info from them (they are checked properly in the loop below). | |||
7603 | if (BV.getOperand(0).getOpcode() != ISD::FP_EXTEND || | |||
7604 | BV.getOperand(0).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT) | |||
7605 | return SDValue(); | |||
7606 | SDValue Op0 = BV.getOperand(0).getOperand(0).getOperand(0); | |||
7607 | int Offset = BV.getOperand(0).getOperand(0).getConstantOperandVal(1); | |||
7608 | if (Op0.getValueType() != MVT::v8f16 || (Offset != 0 && Offset != 1)) | |||
7609 | return SDValue(); | |||
7610 | ||||
7611 | // Check all the values in the BuildVector line up with our expectations. | |||
7612 | for (unsigned i = 1; i < 4; i++) { | |||
7613 | auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) { | |||
7614 | return Trunc.getOpcode() == ISD::FP_EXTEND && | |||
7615 | Trunc.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
7616 | Trunc.getOperand(0).getOperand(0) == Op && | |||
7617 | Trunc.getOperand(0).getConstantOperandVal(1) == Idx; | |||
7618 | }; | |||
7619 | if (!Check(BV.getOperand(i), Op0, 2 * i + Offset)) | |||
7620 | return SDValue(); | |||
7621 | } | |||
7622 | ||||
7623 | return DAG.getNode(ARMISD::VCVTL, dl, VT, Op0, | |||
7624 | DAG.getConstant(Offset, dl, MVT::i32)); | |||
7625 | } | |||
7626 | ||||
7627 | // If N is an integer constant that can be moved into a register in one | |||
7628 | // instruction, return an SDValue of such a constant (will become a MOV | |||
7629 | // instruction). Otherwise return null. | |||
7630 | static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, | |||
7631 | const ARMSubtarget *ST, const SDLoc &dl) { | |||
7632 | uint64_t Val; | |||
7633 | if (!isa<ConstantSDNode>(N)) | |||
7634 | return SDValue(); | |||
7635 | Val = cast<ConstantSDNode>(N)->getZExtValue(); | |||
7636 | ||||
7637 | if (ST->isThumb1Only()) { | |||
7638 | if (Val <= 255 || ~Val <= 255) | |||
7639 | return DAG.getConstant(Val, dl, MVT::i32); | |||
7640 | } else { | |||
7641 | if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) | |||
7642 | return DAG.getConstant(Val, dl, MVT::i32); | |||
7643 | } | |||
7644 | return SDValue(); | |||
7645 | } | |||
7646 | ||||
7647 | static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG, | |||
7648 | const ARMSubtarget *ST) { | |||
7649 | SDLoc dl(Op); | |||
7650 | EVT VT = Op.getValueType(); | |||
7651 | ||||
7652 | assert(ST->hasMVEIntegerOps() && "LowerBUILD_VECTOR_i1 called without MVE!")(static_cast <bool> (ST->hasMVEIntegerOps() && "LowerBUILD_VECTOR_i1 called without MVE!") ? void (0) : __assert_fail ("ST->hasMVEIntegerOps() && \"LowerBUILD_VECTOR_i1 called without MVE!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 7652, __extension__ __PRETTY_FUNCTION__)); | |||
7653 | ||||
7654 | unsigned NumElts = VT.getVectorNumElements(); | |||
7655 | unsigned BoolMask; | |||
7656 | unsigned BitsPerBool; | |||
7657 | if (NumElts == 2) { | |||
7658 | BitsPerBool = 8; | |||
7659 | BoolMask = 0xff; | |||
7660 | } else if (NumElts == 4) { | |||
7661 | BitsPerBool = 4; | |||
7662 | BoolMask = 0xf; | |||
7663 | } else if (NumElts == 8) { | |||
7664 | BitsPerBool = 2; | |||
7665 | BoolMask = 0x3; | |||
7666 | } else if (NumElts == 16) { | |||
7667 | BitsPerBool = 1; | |||
7668 | BoolMask = 0x1; | |||
7669 | } else | |||
7670 | return SDValue(); | |||
7671 | ||||
7672 | // If this is a single value copied into all lanes (a splat), we can just sign | |||
7673 | // extend that single value | |||
7674 | SDValue FirstOp = Op.getOperand(0); | |||
7675 | if (!isa<ConstantSDNode>(FirstOp) && | |||
7676 | std::all_of(std::next(Op->op_begin()), Op->op_end(), | |||
7677 | [&FirstOp](SDUse &U) { | |||
7678 | return U.get().isUndef() || U.get() == FirstOp; | |||
7679 | })) { | |||
7680 | SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32, FirstOp, | |||
7681 | DAG.getValueType(MVT::i1)); | |||
7682 | return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), Ext); | |||
7683 | } | |||
7684 | ||||
7685 | // First create base with bits set where known | |||
7686 | unsigned Bits32 = 0; | |||
7687 | for (unsigned i = 0; i < NumElts; ++i) { | |||
7688 | SDValue V = Op.getOperand(i); | |||
7689 | if (!isa<ConstantSDNode>(V) && !V.isUndef()) | |||
7690 | continue; | |||
7691 | bool BitSet = V.isUndef() ? false : cast<ConstantSDNode>(V)->getZExtValue(); | |||
7692 | if (BitSet) | |||
7693 | Bits32 |= BoolMask << (i * BitsPerBool); | |||
7694 | } | |||
7695 | ||||
7696 | // Add in unknown nodes | |||
7697 | SDValue Base = DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, | |||
7698 | DAG.getConstant(Bits32, dl, MVT::i32)); | |||
7699 | for (unsigned i = 0; i < NumElts; ++i) { | |||
7700 | SDValue V = Op.getOperand(i); | |||
7701 | if (isa<ConstantSDNode>(V) || V.isUndef()) | |||
7702 | continue; | |||
7703 | Base = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Base, V, | |||
7704 | DAG.getConstant(i, dl, MVT::i32)); | |||
7705 | } | |||
7706 | ||||
7707 | return Base; | |||
7708 | } | |||
7709 | ||||
7710 | static SDValue LowerBUILD_VECTORToVIDUP(SDValue Op, SelectionDAG &DAG, | |||
7711 | const ARMSubtarget *ST) { | |||
7712 | if (!ST->hasMVEIntegerOps()) | |||
7713 | return SDValue(); | |||
7714 | ||||
7715 | // We are looking for a buildvector where each element is Op[0] + i*N | |||
7716 | EVT VT = Op.getValueType(); | |||
7717 | SDValue Op0 = Op.getOperand(0); | |||
7718 | unsigned NumElts = VT.getVectorNumElements(); | |||
7719 | ||||
7720 | // Get the increment value from operand 1 | |||
7721 | SDValue Op1 = Op.getOperand(1); | |||
7722 | if (Op1.getOpcode() != ISD::ADD || Op1.getOperand(0) != Op0 || | |||
7723 | !isa<ConstantSDNode>(Op1.getOperand(1))) | |||
7724 | return SDValue(); | |||
7725 | unsigned N = Op1.getConstantOperandVal(1); | |||
7726 | if (N != 1 && N != 2 && N != 4 && N != 8) | |||
7727 | return SDValue(); | |||
7728 | ||||
7729 | // Check that each other operand matches | |||
7730 | for (unsigned I = 2; I < NumElts; I++) { | |||
7731 | SDValue OpI = Op.getOperand(I); | |||
7732 | if (OpI.getOpcode() != ISD::ADD || OpI.getOperand(0) != Op0 || | |||
7733 | !isa<ConstantSDNode>(OpI.getOperand(1)) || | |||
7734 | OpI.getConstantOperandVal(1) != I * N) | |||
7735 | return SDValue(); | |||
7736 | } | |||
7737 | ||||
7738 | SDLoc DL(Op); | |||
7739 | return DAG.getNode(ARMISD::VIDUP, DL, DAG.getVTList(VT, MVT::i32), Op0, | |||
7740 | DAG.getConstant(N, DL, MVT::i32)); | |||
7741 | } | |||
7742 | ||||
7743 | // Returns true if the operation N can be treated as qr instruction variant at | |||
7744 | // operand Op. | |||
7745 | static bool IsQRMVEInstruction(const SDNode *N, const SDNode *Op) { | |||
7746 | switch (N->getOpcode()) { | |||
7747 | case ISD::ADD: | |||
7748 | case ISD::MUL: | |||
7749 | case ISD::SADDSAT: | |||
7750 | case ISD::UADDSAT: | |||
7751 | return true; | |||
7752 | case ISD::SUB: | |||
7753 | case ISD::SSUBSAT: | |||
7754 | case ISD::USUBSAT: | |||
7755 | return N->getOperand(1).getNode() == Op; | |||
7756 | case ISD::INTRINSIC_WO_CHAIN: | |||
7757 | switch (N->getConstantOperandVal(0)) { | |||
7758 | case Intrinsic::arm_mve_add_predicated: | |||
7759 | case Intrinsic::arm_mve_mul_predicated: | |||
7760 | case Intrinsic::arm_mve_qadd_predicated: | |||
7761 | case Intrinsic::arm_mve_vhadd: | |||
7762 | case Intrinsic::arm_mve_hadd_predicated: | |||
7763 | case Intrinsic::arm_mve_vqdmulh: | |||
7764 | case Intrinsic::arm_mve_qdmulh_predicated: | |||
7765 | case Intrinsic::arm_mve_vqrdmulh: | |||
7766 | case Intrinsic::arm_mve_qrdmulh_predicated: | |||
7767 | case Intrinsic::arm_mve_vqdmull: | |||
7768 | case Intrinsic::arm_mve_vqdmull_predicated: | |||
7769 | return true; | |||
7770 | case Intrinsic::arm_mve_sub_predicated: | |||
7771 | case Intrinsic::arm_mve_qsub_predicated: | |||
7772 | case Intrinsic::arm_mve_vhsub: | |||
7773 | case Intrinsic::arm_mve_hsub_predicated: | |||
7774 | return N->getOperand(2).getNode() == Op; | |||
7775 | default: | |||
7776 | return false; | |||
7777 | } | |||
7778 | default: | |||
7779 | return false; | |||
7780 | } | |||
7781 | } | |||
7782 | ||||
7783 | // If this is a case we can't handle, return null and let the default | |||
7784 | // expansion code take care of it. | |||
7785 | SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, | |||
7786 | const ARMSubtarget *ST) const { | |||
7787 | BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); | |||
7788 | SDLoc dl(Op); | |||
7789 | EVT VT = Op.getValueType(); | |||
7790 | ||||
7791 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) | |||
7792 | return LowerBUILD_VECTOR_i1(Op, DAG, ST); | |||
7793 | ||||
7794 | if (SDValue R = LowerBUILD_VECTORToVIDUP(Op, DAG, ST)) | |||
7795 | return R; | |||
7796 | ||||
7797 | APInt SplatBits, SplatUndef; | |||
7798 | unsigned SplatBitSize; | |||
7799 | bool HasAnyUndefs; | |||
7800 | if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { | |||
7801 | if (SplatUndef.isAllOnes()) | |||
7802 | return DAG.getUNDEF(VT); | |||
7803 | ||||
7804 | // If all the users of this constant splat are qr instruction variants, | |||
7805 | // generate a vdup of the constant. | |||
7806 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == SplatBitSize && | |||
7807 | (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32) && | |||
7808 | all_of(BVN->uses(), | |||
7809 | [BVN](const SDNode *U) { return IsQRMVEInstruction(U, BVN); })) { | |||
7810 | EVT DupVT = SplatBitSize == 32 ? MVT::v4i32 | |||
7811 | : SplatBitSize == 16 ? MVT::v8i16 | |||
7812 | : MVT::v16i8; | |||
7813 | SDValue Const = DAG.getConstant(SplatBits.getZExtValue(), dl, MVT::i32); | |||
7814 | SDValue VDup = DAG.getNode(ARMISD::VDUP, dl, DupVT, Const); | |||
7815 | return DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, VDup); | |||
7816 | } | |||
7817 | ||||
7818 | if ((ST->hasNEON() && SplatBitSize <= 64) || | |||
7819 | (ST->hasMVEIntegerOps() && SplatBitSize <= 64)) { | |||
7820 | // Check if an immediate VMOV works. | |||
7821 | EVT VmovVT; | |||
7822 | SDValue Val = | |||
7823 | isVMOVModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(), | |||
7824 | SplatBitSize, DAG, dl, VmovVT, VT, VMOVModImm); | |||
7825 | ||||
7826 | if (Val.getNode()) { | |||
7827 | SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); | |||
7828 | return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); | |||
7829 | } | |||
7830 | ||||
7831 | // Try an immediate VMVN. | |||
7832 | uint64_t NegatedImm = (~SplatBits).getZExtValue(); | |||
7833 | Val = isVMOVModifiedImm( | |||
7834 | NegatedImm, SplatUndef.getZExtValue(), SplatBitSize, DAG, dl, VmovVT, | |||
7835 | VT, ST->hasMVEIntegerOps() ? MVEVMVNModImm : VMVNModImm); | |||
7836 | if (Val.getNode()) { | |||
7837 | SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); | |||
7838 | return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); | |||
7839 | } | |||
7840 | ||||
7841 | // Use vmov.f32 to materialize other v2f32 and v4f32 splats. | |||
7842 | if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { | |||
7843 | int ImmVal = ARM_AM::getFP32Imm(SplatBits); | |||
7844 | if (ImmVal != -1) { | |||
7845 | SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32); | |||
7846 | return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val); | |||
7847 | } | |||
7848 | } | |||
7849 | ||||
7850 | // If we are under MVE, generate a VDUP(constant), bitcast to the original | |||
7851 | // type. | |||
7852 | if (ST->hasMVEIntegerOps() && | |||
7853 | (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32)) { | |||
7854 | EVT DupVT = SplatBitSize == 32 ? MVT::v4i32 | |||
7855 | : SplatBitSize == 16 ? MVT::v8i16 | |||
7856 | : MVT::v16i8; | |||
7857 | SDValue Const = DAG.getConstant(SplatBits.getZExtValue(), dl, MVT::i32); | |||
7858 | SDValue VDup = DAG.getNode(ARMISD::VDUP, dl, DupVT, Const); | |||
7859 | return DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, VDup); | |||
7860 | } | |||
7861 | } | |||
7862 | } | |||
7863 | ||||
7864 | // Scan through the operands to see if only one value is used. | |||
7865 | // | |||
7866 | // As an optimisation, even if more than one value is used it may be more | |||
7867 | // profitable to splat with one value then change some lanes. | |||
7868 | // | |||
7869 | // Heuristically we decide to do this if the vector has a "dominant" value, | |||
7870 | // defined as splatted to more than half of the lanes. | |||
7871 | unsigned NumElts = VT.getVectorNumElements(); | |||
7872 | bool isOnlyLowElement = true; | |||
7873 | bool usesOnlyOneValue = true; | |||
7874 | bool hasDominantValue = false; | |||
7875 | bool isConstant = true; | |||
7876 | ||||
7877 | // Map of the number of times a particular SDValue appears in the | |||
7878 | // element list. | |||
7879 | DenseMap<SDValue, unsigned> ValueCounts; | |||
7880 | SDValue Value; | |||
7881 | for (unsigned i = 0; i < NumElts; ++i) { | |||
7882 | SDValue V = Op.getOperand(i); | |||
7883 | if (V.isUndef()) | |||
7884 | continue; | |||
7885 | if (i > 0) | |||
7886 | isOnlyLowElement = false; | |||
7887 | if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) | |||
7888 | isConstant = false; | |||
7889 | ||||
7890 | ValueCounts.insert(std::make_pair(V, 0)); | |||
7891 | unsigned &Count = ValueCounts[V]; | |||
7892 | ||||
7893 | // Is this value dominant? (takes up more than half of the lanes) | |||
7894 | if (++Count > (NumElts / 2)) { | |||
7895 | hasDominantValue = true; | |||
7896 | Value = V; | |||
7897 | } | |||
7898 | } | |||
7899 | if (ValueCounts.size() != 1) | |||
7900 | usesOnlyOneValue = false; | |||
7901 | if (!Value.getNode() && !ValueCounts.empty()) | |||
7902 | Value = ValueCounts.begin()->first; | |||
7903 | ||||
7904 | if (ValueCounts.empty()) | |||
7905 | return DAG.getUNDEF(VT); | |||
7906 | ||||
7907 | // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR. | |||
7908 | // Keep going if we are hitting this case. | |||
7909 | if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) | |||
7910 | return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); | |||
7911 | ||||
7912 | unsigned EltSize = VT.getScalarSizeInBits(); | |||
7913 | ||||
7914 | // Use VDUP for non-constant splats. For f32 constant splats, reduce to | |||
7915 | // i32 and try again. | |||
7916 | if (hasDominantValue && EltSize <= 32) { | |||
7917 | if (!isConstant) { | |||
7918 | SDValue N; | |||
7919 | ||||
7920 | // If we are VDUPing a value that comes directly from a vector, that will | |||
7921 | // cause an unnecessary move to and from a GPR, where instead we could | |||
7922 | // just use VDUPLANE. We can only do this if the lane being extracted | |||
7923 | // is at a constant index, as the VDUP from lane instructions only have | |||
7924 | // constant-index forms. | |||
7925 | ConstantSDNode *constIndex; | |||
7926 | if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
7927 | (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) { | |||
7928 | // We need to create a new undef vector to use for the VDUPLANE if the | |||
7929 | // size of the vector from which we get the value is different than the | |||
7930 | // size of the vector that we need to create. We will insert the element | |||
7931 | // such that the register coalescer will remove unnecessary copies. | |||
7932 | if (VT != Value->getOperand(0).getValueType()) { | |||
7933 | unsigned index = constIndex->getAPIntValue().getLimitedValue() % | |||
7934 | VT.getVectorNumElements(); | |||
7935 | N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, | |||
7936 | DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT), | |||
7937 | Value, DAG.getConstant(index, dl, MVT::i32)), | |||
7938 | DAG.getConstant(index, dl, MVT::i32)); | |||
7939 | } else | |||
7940 | N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, | |||
7941 | Value->getOperand(0), Value->getOperand(1)); | |||
7942 | } else | |||
7943 | N = DAG.getNode(ARMISD::VDUP, dl, VT, Value); | |||
7944 | ||||
7945 | if (!usesOnlyOneValue) { | |||
7946 | // The dominant value was splatted as 'N', but we now have to insert | |||
7947 | // all differing elements. | |||
7948 | for (unsigned I = 0; I < NumElts; ++I) { | |||
7949 | if (Op.getOperand(I) == Value) | |||
7950 | continue; | |||
7951 | SmallVector<SDValue, 3> Ops; | |||
7952 | Ops.push_back(N); | |||
7953 | Ops.push_back(Op.getOperand(I)); | |||
7954 | Ops.push_back(DAG.getConstant(I, dl, MVT::i32)); | |||
7955 | N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops); | |||
7956 | } | |||
7957 | } | |||
7958 | return N; | |||
7959 | } | |||
7960 | if (VT.getVectorElementType().isFloatingPoint()) { | |||
7961 | SmallVector<SDValue, 8> Ops; | |||
7962 | MVT FVT = VT.getVectorElementType().getSimpleVT(); | |||
7963 | assert(FVT == MVT::f32 || FVT == MVT::f16)(static_cast <bool> (FVT == MVT::f32 || FVT == MVT::f16 ) ? void (0) : __assert_fail ("FVT == MVT::f32 || FVT == MVT::f16" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 7963, __extension__ __PRETTY_FUNCTION__)); | |||
7964 | MVT IVT = (FVT == MVT::f32) ? MVT::i32 : MVT::i16; | |||
7965 | for (unsigned i = 0; i < NumElts; ++i) | |||
7966 | Ops.push_back(DAG.getNode(ISD::BITCAST, dl, IVT, | |||
7967 | Op.getOperand(i))); | |||
7968 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), IVT, NumElts); | |||
7969 | SDValue Val = DAG.getBuildVector(VecVT, dl, Ops); | |||
7970 | Val = LowerBUILD_VECTOR(Val, DAG, ST); | |||
7971 | if (Val.getNode()) | |||
7972 | return DAG.getNode(ISD::BITCAST, dl, VT, Val); | |||
7973 | } | |||
7974 | if (usesOnlyOneValue) { | |||
7975 | SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); | |||
7976 | if (isConstant && Val.getNode()) | |||
7977 | return DAG.getNode(ARMISD::VDUP, dl, VT, Val); | |||
7978 | } | |||
7979 | } | |||
7980 | ||||
7981 | // If all elements are constants and the case above didn't get hit, fall back | |||
7982 | // to the default expansion, which will generate a load from the constant | |||
7983 | // pool. | |||
7984 | if (isConstant) | |||
7985 | return SDValue(); | |||
7986 | ||||
7987 | // Reconstruct the BUILDVECTOR to one of the legal shuffles (such as vext and | |||
7988 | // vmovn). Empirical tests suggest this is rarely worth it for vectors of | |||
7989 | // length <= 2. | |||
7990 | if (NumElts >= 4) | |||
7991 | if (SDValue shuffle = ReconstructShuffle(Op, DAG)) | |||
7992 | return shuffle; | |||
7993 | ||||
7994 | // Attempt to turn a buildvector of scalar fptrunc's or fpext's back into | |||
7995 | // VCVT's | |||
7996 | if (SDValue VCVT = LowerBuildVectorOfFPTrunc(Op, DAG, Subtarget)) | |||
7997 | return VCVT; | |||
7998 | if (SDValue VCVT = LowerBuildVectorOfFPExt(Op, DAG, Subtarget)) | |||
7999 | return VCVT; | |||
8000 | ||||
8001 | if (ST->hasNEON() && VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) { | |||
8002 | // If we haven't found an efficient lowering, try splitting a 128-bit vector | |||
8003 | // into two 64-bit vectors; we might discover a better way to lower it. | |||
8004 | SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts); | |||
8005 | EVT ExtVT = VT.getVectorElementType(); | |||
8006 | EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElts / 2); | |||
8007 | SDValue Lower = | |||
8008 | DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElts / 2)); | |||
8009 | if (Lower.getOpcode() == ISD::BUILD_VECTOR) | |||
8010 | Lower = LowerBUILD_VECTOR(Lower, DAG, ST); | |||
8011 | SDValue Upper = DAG.getBuildVector( | |||
8012 | HVT, dl, makeArrayRef(&Ops[NumElts / 2], NumElts / 2)); | |||
8013 | if (Upper.getOpcode() == ISD::BUILD_VECTOR) | |||
8014 | Upper = LowerBUILD_VECTOR(Upper, DAG, ST); | |||
8015 | if (Lower && Upper) | |||
8016 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lower, Upper); | |||
8017 | } | |||
8018 | ||||
8019 | // Vectors with 32- or 64-bit elements can be built by directly assigning | |||
8020 | // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands | |||
8021 | // will be legalized. | |||
8022 | if (EltSize >= 32) { | |||
8023 | // Do the expansion with floating-point types, since that is what the VFP | |||
8024 | // registers are defined to use, and since i64 is not legal. | |||
8025 | EVT EltVT = EVT::getFloatingPointVT(EltSize); | |||
8026 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); | |||
8027 | SmallVector<SDValue, 8> Ops; | |||
8028 | for (unsigned i = 0; i < NumElts; ++i) | |||
8029 | Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); | |||
8030 | SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); | |||
8031 | return DAG.getNode(ISD::BITCAST, dl, VT, Val); | |||
8032 | } | |||
8033 | ||||
8034 | // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we | |||
8035 | // know the default expansion would otherwise fall back on something even | |||
8036 | // worse. For a vector with one or two non-undef values, that's | |||
8037 | // scalar_to_vector for the elements followed by a shuffle (provided the | |||
8038 | // shuffle is valid for the target) and materialization element by element | |||
8039 | // on the stack followed by a load for everything else. | |||
8040 | if (!isConstant && !usesOnlyOneValue) { | |||
8041 | SDValue Vec = DAG.getUNDEF(VT); | |||
8042 | for (unsigned i = 0 ; i < NumElts; ++i) { | |||
8043 | SDValue V = Op.getOperand(i); | |||
8044 | if (V.isUndef()) | |||
8045 | continue; | |||
8046 | SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32); | |||
8047 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); | |||
8048 | } | |||
8049 | return Vec; | |||
8050 | } | |||
8051 | ||||
8052 | return SDValue(); | |||
8053 | } | |||
8054 | ||||
8055 | // Gather data to see if the operation can be modelled as a | |||
8056 | // shuffle in combination with VEXTs. | |||
8057 | SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, | |||
8058 | SelectionDAG &DAG) const { | |||
8059 | assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!")(static_cast <bool> (Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!") ? void (0) : __assert_fail ("Op.getOpcode() == ISD::BUILD_VECTOR && \"Unknown opcode!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8059, __extension__ __PRETTY_FUNCTION__)); | |||
8060 | SDLoc dl(Op); | |||
8061 | EVT VT = Op.getValueType(); | |||
8062 | unsigned NumElts = VT.getVectorNumElements(); | |||
8063 | ||||
8064 | struct ShuffleSourceInfo { | |||
8065 | SDValue Vec; | |||
8066 | unsigned MinElt = std::numeric_limits<unsigned>::max(); | |||
8067 | unsigned MaxElt = 0; | |||
8068 | ||||
8069 | // We may insert some combination of BITCASTs and VEXT nodes to force Vec to | |||
8070 | // be compatible with the shuffle we intend to construct. As a result | |||
8071 | // ShuffleVec will be some sliding window into the original Vec. | |||
8072 | SDValue ShuffleVec; | |||
8073 | ||||
8074 | // Code should guarantee that element i in Vec starts at element "WindowBase | |||
8075 | // + i * WindowScale in ShuffleVec". | |||
8076 | int WindowBase = 0; | |||
8077 | int WindowScale = 1; | |||
8078 | ||||
8079 | ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {} | |||
8080 | ||||
8081 | bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } | |||
8082 | }; | |||
8083 | ||||
8084 | // First gather all vectors used as an immediate source for this BUILD_VECTOR | |||
8085 | // node. | |||
8086 | SmallVector<ShuffleSourceInfo, 2> Sources; | |||
8087 | for (unsigned i = 0; i < NumElts; ++i) { | |||
8088 | SDValue V = Op.getOperand(i); | |||
8089 | if (V.isUndef()) | |||
8090 | continue; | |||
8091 | else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { | |||
8092 | // A shuffle can only come from building a vector from various | |||
8093 | // elements of other vectors. | |||
8094 | return SDValue(); | |||
8095 | } else if (!isa<ConstantSDNode>(V.getOperand(1))) { | |||
8096 | // Furthermore, shuffles require a constant mask, whereas extractelts | |||
8097 | // accept variable indices. | |||
8098 | return SDValue(); | |||
8099 | } | |||
8100 | ||||
8101 | // Add this element source to the list if it's not already there. | |||
8102 | SDValue SourceVec = V.getOperand(0); | |||
8103 | auto Source = llvm::find(Sources, SourceVec); | |||
8104 | if (Source == Sources.end()) | |||
8105 | Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec)); | |||
8106 | ||||
8107 | // Update the minimum and maximum lane number seen. | |||
8108 | unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); | |||
8109 | Source->MinElt = std::min(Source->MinElt, EltNo); | |||
8110 | Source->MaxElt = std::max(Source->MaxElt, EltNo); | |||
8111 | } | |||
8112 | ||||
8113 | // Currently only do something sane when at most two source vectors | |||
8114 | // are involved. | |||
8115 | if (Sources.size() > 2) | |||
8116 | return SDValue(); | |||
8117 | ||||
8118 | // Find out the smallest element size among result and two sources, and use | |||
8119 | // it as element size to build the shuffle_vector. | |||
8120 | EVT SmallestEltTy = VT.getVectorElementType(); | |||
8121 | for (auto &Source : Sources) { | |||
8122 | EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); | |||
8123 | if (SrcEltTy.bitsLT(SmallestEltTy)) | |||
8124 | SmallestEltTy = SrcEltTy; | |||
8125 | } | |||
8126 | unsigned ResMultiplier = | |||
8127 | VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits(); | |||
8128 | NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); | |||
8129 | EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts); | |||
8130 | ||||
8131 | // If the source vector is too wide or too narrow, we may nevertheless be able | |||
8132 | // to construct a compatible shuffle either by concatenating it with UNDEF or | |||
8133 | // extracting a suitable range of elements. | |||
8134 | for (auto &Src : Sources) { | |||
8135 | EVT SrcVT = Src.ShuffleVec.getValueType(); | |||
8136 | ||||
8137 | uint64_t SrcVTSize = SrcVT.getFixedSizeInBits(); | |||
8138 | uint64_t VTSize = VT.getFixedSizeInBits(); | |||
8139 | if (SrcVTSize == VTSize) | |||
8140 | continue; | |||
8141 | ||||
8142 | // This stage of the search produces a source with the same element type as | |||
8143 | // the original, but with a total width matching the BUILD_VECTOR output. | |||
8144 | EVT EltVT = SrcVT.getVectorElementType(); | |||
8145 | unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits(); | |||
8146 | EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); | |||
8147 | ||||
8148 | if (SrcVTSize < VTSize) { | |||
8149 | if (2 * SrcVTSize != VTSize) | |||
8150 | return SDValue(); | |||
8151 | // We can pad out the smaller vector for free, so if it's part of a | |||
8152 | // shuffle... | |||
8153 | Src.ShuffleVec = | |||
8154 | DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec, | |||
8155 | DAG.getUNDEF(Src.ShuffleVec.getValueType())); | |||
8156 | continue; | |||
8157 | } | |||
8158 | ||||
8159 | if (SrcVTSize != 2 * VTSize) | |||
8160 | return SDValue(); | |||
8161 | ||||
8162 | if (Src.MaxElt - Src.MinElt >= NumSrcElts) { | |||
8163 | // Span too large for a VEXT to cope | |||
8164 | return SDValue(); | |||
8165 | } | |||
8166 | ||||
8167 | if (Src.MinElt >= NumSrcElts) { | |||
8168 | // The extraction can just take the second half | |||
8169 | Src.ShuffleVec = | |||
8170 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, | |||
8171 | DAG.getConstant(NumSrcElts, dl, MVT::i32)); | |||
8172 | Src.WindowBase = -NumSrcElts; | |||
8173 | } else if (Src.MaxElt < NumSrcElts) { | |||
8174 | // The extraction can just take the first half | |||
8175 | Src.ShuffleVec = | |||
8176 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, | |||
8177 | DAG.getConstant(0, dl, MVT::i32)); | |||
8178 | } else { | |||
8179 | // An actual VEXT is needed | |||
8180 | SDValue VEXTSrc1 = | |||
8181 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, | |||
8182 | DAG.getConstant(0, dl, MVT::i32)); | |||
8183 | SDValue VEXTSrc2 = | |||
8184 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, | |||
8185 | DAG.getConstant(NumSrcElts, dl, MVT::i32)); | |||
8186 | ||||
8187 | Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1, | |||
8188 | VEXTSrc2, | |||
8189 | DAG.getConstant(Src.MinElt, dl, MVT::i32)); | |||
8190 | Src.WindowBase = -Src.MinElt; | |||
8191 | } | |||
8192 | } | |||
8193 | ||||
8194 | // Another possible incompatibility occurs from the vector element types. We | |||
8195 | // can fix this by bitcasting the source vectors to the same type we intend | |||
8196 | // for the shuffle. | |||
8197 | for (auto &Src : Sources) { | |||
8198 | EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); | |||
8199 | if (SrcEltTy == SmallestEltTy) | |||
8200 | continue; | |||
8201 | assert(ShuffleVT.getVectorElementType() == SmallestEltTy)(static_cast <bool> (ShuffleVT.getVectorElementType() == SmallestEltTy) ? void (0) : __assert_fail ("ShuffleVT.getVectorElementType() == SmallestEltTy" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8201, __extension__ __PRETTY_FUNCTION__)); | |||
8202 | Src.ShuffleVec = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, ShuffleVT, Src.ShuffleVec); | |||
8203 | Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits(); | |||
8204 | Src.WindowBase *= Src.WindowScale; | |||
8205 | } | |||
8206 | ||||
8207 | // Final check before we try to actually produce a shuffle. | |||
8208 | LLVM_DEBUG(for (auto Srcdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("arm-isel")) { for (auto Src : Sources) (static_cast <bool > (Src.ShuffleVec.getValueType() == ShuffleVT) ? void (0) : __assert_fail ("Src.ShuffleVec.getValueType() == ShuffleVT", "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8210, __extension__ __PRETTY_FUNCTION__));; } } while (false) | |||
8209 | : Sources)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("arm-isel")) { for (auto Src : Sources) (static_cast <bool > (Src.ShuffleVec.getValueType() == ShuffleVT) ? void (0) : __assert_fail ("Src.ShuffleVec.getValueType() == ShuffleVT", "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8210, __extension__ __PRETTY_FUNCTION__));; } } while (false) | |||
8210 | assert(Src.ShuffleVec.getValueType() == ShuffleVT);)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("arm-isel")) { for (auto Src : Sources) (static_cast <bool > (Src.ShuffleVec.getValueType() == ShuffleVT) ? void (0) : __assert_fail ("Src.ShuffleVec.getValueType() == ShuffleVT", "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8210, __extension__ __PRETTY_FUNCTION__));; } } while (false); | |||
8211 | ||||
8212 | // The stars all align, our next step is to produce the mask for the shuffle. | |||
8213 | SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); | |||
8214 | int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); | |||
8215 | for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { | |||
8216 | SDValue Entry = Op.getOperand(i); | |||
8217 | if (Entry.isUndef()) | |||
8218 | continue; | |||
8219 | ||||
8220 | auto Src = llvm::find(Sources, Entry.getOperand(0)); | |||
8221 | int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue(); | |||
8222 | ||||
8223 | // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit | |||
8224 | // trunc. So only std::min(SrcBits, DestBits) actually get defined in this | |||
8225 | // segment. | |||
8226 | EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType(); | |||
8227 | int BitsDefined = std::min(OrigEltTy.getScalarSizeInBits(), | |||
8228 | VT.getScalarSizeInBits()); | |||
8229 | int LanesDefined = BitsDefined / BitsPerShuffleLane; | |||
8230 | ||||
8231 | // This source is expected to fill ResMultiplier lanes of the final shuffle, | |||
8232 | // starting at the appropriate offset. | |||
8233 | int *LaneMask = &Mask[i * ResMultiplier]; | |||
8234 | ||||
8235 | int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase; | |||
8236 | ExtractBase += NumElts * (Src - Sources.begin()); | |||
8237 | for (int j = 0; j < LanesDefined; ++j) | |||
8238 | LaneMask[j] = ExtractBase + j; | |||
8239 | } | |||
8240 | ||||
8241 | ||||
8242 | // We can't handle more than two sources. This should have already | |||
8243 | // been checked before this point. | |||
8244 | assert(Sources.size() <= 2 && "Too many sources!")(static_cast <bool> (Sources.size() <= 2 && "Too many sources!" ) ? void (0) : __assert_fail ("Sources.size() <= 2 && \"Too many sources!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8244, __extension__ __PRETTY_FUNCTION__)); | |||
8245 | ||||
8246 | SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) }; | |||
8247 | for (unsigned i = 0; i < Sources.size(); ++i) | |||
8248 | ShuffleOps[i] = Sources[i].ShuffleVec; | |||
8249 | ||||
8250 | SDValue Shuffle = buildLegalVectorShuffle(ShuffleVT, dl, ShuffleOps[0], | |||
8251 | ShuffleOps[1], Mask, DAG); | |||
8252 | if (!Shuffle) | |||
8253 | return SDValue(); | |||
8254 | return DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, Shuffle); | |||
8255 | } | |||
8256 | ||||
8257 | enum ShuffleOpCodes { | |||
8258 | OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> | |||
8259 | OP_VREV, | |||
8260 | OP_VDUP0, | |||
8261 | OP_VDUP1, | |||
8262 | OP_VDUP2, | |||
8263 | OP_VDUP3, | |||
8264 | OP_VEXT1, | |||
8265 | OP_VEXT2, | |||
8266 | OP_VEXT3, | |||
8267 | OP_VUZPL, // VUZP, left result | |||
8268 | OP_VUZPR, // VUZP, right result | |||
8269 | OP_VZIPL, // VZIP, left result | |||
8270 | OP_VZIPR, // VZIP, right result | |||
8271 | OP_VTRNL, // VTRN, left result | |||
8272 | OP_VTRNR // VTRN, right result | |||
8273 | }; | |||
8274 | ||||
8275 | static bool isLegalMVEShuffleOp(unsigned PFEntry) { | |||
8276 | unsigned OpNum = (PFEntry >> 26) & 0x0F; | |||
8277 | switch (OpNum) { | |||
8278 | case OP_COPY: | |||
8279 | case OP_VREV: | |||
8280 | case OP_VDUP0: | |||
8281 | case OP_VDUP1: | |||
8282 | case OP_VDUP2: | |||
8283 | case OP_VDUP3: | |||
8284 | return true; | |||
8285 | } | |||
8286 | return false; | |||
8287 | } | |||
8288 | ||||
8289 | /// isShuffleMaskLegal - Targets can use this to indicate that they only | |||
8290 | /// support *some* VECTOR_SHUFFLE operations, those with specific masks. | |||
8291 | /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values | |||
8292 | /// are assumed to be legal. | |||
8293 | bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { | |||
8294 | if (VT.getVectorNumElements() == 4 && | |||
8295 | (VT.is128BitVector() || VT.is64BitVector())) { | |||
8296 | unsigned PFIndexes[4]; | |||
8297 | for (unsigned i = 0; i != 4; ++i) { | |||
8298 | if (M[i] < 0) | |||
8299 | PFIndexes[i] = 8; | |||
8300 | else | |||
8301 | PFIndexes[i] = M[i]; | |||
8302 | } | |||
8303 | ||||
8304 | // Compute the index in the perfect shuffle table. | |||
8305 | unsigned PFTableIndex = | |||
8306 | PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; | |||
8307 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; | |||
8308 | unsigned Cost = (PFEntry >> 30); | |||
8309 | ||||
8310 | if (Cost <= 4 && (Subtarget->hasNEON() || isLegalMVEShuffleOp(PFEntry))) | |||
8311 | return true; | |||
8312 | } | |||
8313 | ||||
8314 | bool ReverseVEXT, isV_UNDEF; | |||
8315 | unsigned Imm, WhichResult; | |||
8316 | ||||
8317 | unsigned EltSize = VT.getScalarSizeInBits(); | |||
8318 | if (EltSize >= 32 || | |||
8319 | ShuffleVectorSDNode::isSplatMask(&M[0], VT) || | |||
8320 | ShuffleVectorInst::isIdentityMask(M) || | |||
8321 | isVREVMask(M, VT, 64) || | |||
8322 | isVREVMask(M, VT, 32) || | |||
8323 | isVREVMask(M, VT, 16)) | |||
8324 | return true; | |||
8325 | else if (Subtarget->hasNEON() && | |||
8326 | (isVEXTMask(M, VT, ReverseVEXT, Imm) || | |||
8327 | isVTBLMask(M, VT) || | |||
8328 | isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF))) | |||
8329 | return true; | |||
8330 | else if ((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && | |||
8331 | isReverseMask(M, VT)) | |||
8332 | return true; | |||
8333 | else if (Subtarget->hasMVEIntegerOps() && | |||
8334 | (isVMOVNMask(M, VT, true, false) || | |||
8335 | isVMOVNMask(M, VT, false, false) || isVMOVNMask(M, VT, true, true))) | |||
8336 | return true; | |||
8337 | else | |||
8338 | return false; | |||
8339 | } | |||
8340 | ||||
8341 | /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit | |||
8342 | /// the specified operations to build the shuffle. | |||
8343 | static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, | |||
8344 | SDValue RHS, SelectionDAG &DAG, | |||
8345 | const SDLoc &dl) { | |||
8346 | unsigned OpNum = (PFEntry >> 26) & 0x0F; | |||
8347 | unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); | |||
8348 | unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); | |||
8349 | ||||
8350 | if (OpNum == OP_COPY) { | |||
8351 | if (LHSID == (1*9+2)*9+3) return LHS; | |||
8352 | assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!")(static_cast <bool> (LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!") ? void (0) : __assert_fail ("LHSID == ((4*9+5)*9+6)*9+7 && \"Illegal OP_COPY!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8352, __extension__ __PRETTY_FUNCTION__)); | |||
8353 | return RHS; | |||
8354 | } | |||
8355 | ||||
8356 | SDValue OpLHS, OpRHS; | |||
8357 | OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); | |||
8358 | OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); | |||
8359 | EVT VT = OpLHS.getValueType(); | |||
8360 | ||||
8361 | switch (OpNum) { | |||
8362 | default: llvm_unreachable("Unknown shuffle opcode!")::llvm::llvm_unreachable_internal("Unknown shuffle opcode!", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 8362); | |||
8363 | case OP_VREV: | |||
8364 | // VREV divides the vector in half and swaps within the half. | |||
8365 | if (VT.getVectorElementType() == MVT::i32 || | |||
8366 | VT.getVectorElementType() == MVT::f32) | |||
8367 | return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); | |||
8368 | // vrev <4 x i16> -> VREV32 | |||
8369 | if (VT.getVectorElementType() == MVT::i16 || | |||
8370 | VT.getVectorElementType() == MVT::f16) | |||
8371 | return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); | |||
8372 | // vrev <4 x i8> -> VREV16 | |||
8373 | assert(VT.getVectorElementType() == MVT::i8)(static_cast <bool> (VT.getVectorElementType() == MVT:: i8) ? void (0) : __assert_fail ("VT.getVectorElementType() == MVT::i8" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8373, __extension__ __PRETTY_FUNCTION__)); | |||
8374 | return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); | |||
8375 | case OP_VDUP0: | |||
8376 | case OP_VDUP1: | |||
8377 | case OP_VDUP2: | |||
8378 | case OP_VDUP3: | |||
8379 | return DAG.getNode(ARMISD::VDUPLANE, dl, VT, | |||
8380 | OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32)); | |||
8381 | case OP_VEXT1: | |||
8382 | case OP_VEXT2: | |||
8383 | case OP_VEXT3: | |||
8384 | return DAG.getNode(ARMISD::VEXT, dl, VT, | |||
8385 | OpLHS, OpRHS, | |||
8386 | DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32)); | |||
8387 | case OP_VUZPL: | |||
8388 | case OP_VUZPR: | |||
8389 | return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), | |||
8390 | OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); | |||
8391 | case OP_VZIPL: | |||
8392 | case OP_VZIPR: | |||
8393 | return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), | |||
8394 | OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); | |||
8395 | case OP_VTRNL: | |||
8396 | case OP_VTRNR: | |||
8397 | return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), | |||
8398 | OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); | |||
8399 | } | |||
8400 | } | |||
8401 | ||||
8402 | static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, | |||
8403 | ArrayRef<int> ShuffleMask, | |||
8404 | SelectionDAG &DAG) { | |||
8405 | // Check to see if we can use the VTBL instruction. | |||
8406 | SDValue V1 = Op.getOperand(0); | |||
8407 | SDValue V2 = Op.getOperand(1); | |||
8408 | SDLoc DL(Op); | |||
8409 | ||||
8410 | SmallVector<SDValue, 8> VTBLMask; | |||
8411 | for (int I : ShuffleMask) | |||
8412 | VTBLMask.push_back(DAG.getConstant(I, DL, MVT::i32)); | |||
8413 | ||||
8414 | if (V2.getNode()->isUndef()) | |||
8415 | return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, | |||
8416 | DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); | |||
8417 | ||||
8418 | return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, | |||
8419 | DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); | |||
8420 | } | |||
8421 | ||||
8422 | static SDValue LowerReverse_VECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { | |||
8423 | SDLoc DL(Op); | |||
8424 | EVT VT = Op.getValueType(); | |||
8425 | ||||
8426 | assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) &&(static_cast <bool> ((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && "Expect an v8i16/v16i8 type" ) ? void (0) : __assert_fail ("(VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && \"Expect an v8i16/v16i8 type\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8427, __extension__ __PRETTY_FUNCTION__)) | |||
8427 | "Expect an v8i16/v16i8 type")(static_cast <bool> ((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && "Expect an v8i16/v16i8 type" ) ? void (0) : __assert_fail ("(VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && \"Expect an v8i16/v16i8 type\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8427, __extension__ __PRETTY_FUNCTION__)); | |||
8428 | SDValue OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, Op.getOperand(0)); | |||
8429 | // For a v16i8 type: After the VREV, we have got <7, ..., 0, 15, ..., 8>. Now, | |||
8430 | // extract the first 8 bytes into the top double word and the last 8 bytes | |||
8431 | // into the bottom double word, through a new vector shuffle that will be | |||
8432 | // turned into a VEXT on Neon, or a couple of VMOVDs on MVE. | |||
8433 | std::vector<int> NewMask; | |||
8434 | for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) | |||
8435 | NewMask.push_back(VT.getVectorNumElements() / 2 + i); | |||
8436 | for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) | |||
8437 | NewMask.push_back(i); | |||
8438 | return DAG.getVectorShuffle(VT, DL, OpLHS, OpLHS, NewMask); | |||
8439 | } | |||
8440 | ||||
8441 | static EVT getVectorTyFromPredicateVector(EVT VT) { | |||
8442 | switch (VT.getSimpleVT().SimpleTy) { | |||
8443 | case MVT::v2i1: | |||
8444 | return MVT::v2f64; | |||
8445 | case MVT::v4i1: | |||
8446 | return MVT::v4i32; | |||
8447 | case MVT::v8i1: | |||
8448 | return MVT::v8i16; | |||
8449 | case MVT::v16i1: | |||
8450 | return MVT::v16i8; | |||
8451 | default: | |||
8452 | llvm_unreachable("Unexpected vector predicate type")::llvm::llvm_unreachable_internal("Unexpected vector predicate type" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8452); | |||
8453 | } | |||
8454 | } | |||
8455 | ||||
8456 | static SDValue PromoteMVEPredVector(SDLoc dl, SDValue Pred, EVT VT, | |||
8457 | SelectionDAG &DAG) { | |||
8458 | // Converting from boolean predicates to integers involves creating a vector | |||
8459 | // of all ones or all zeroes and selecting the lanes based upon the real | |||
8460 | // predicate. | |||
8461 | SDValue AllOnes = | |||
8462 | DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff), dl, MVT::i32); | |||
8463 | AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllOnes); | |||
8464 | ||||
8465 | SDValue AllZeroes = | |||
8466 | DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0x0), dl, MVT::i32); | |||
8467 | AllZeroes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllZeroes); | |||
8468 | ||||
8469 | // Get full vector type from predicate type | |||
8470 | EVT NewVT = getVectorTyFromPredicateVector(VT); | |||
8471 | ||||
8472 | SDValue RecastV1; | |||
8473 | // If the real predicate is an v8i1 or v4i1 (not v16i1) then we need to recast | |||
8474 | // this to a v16i1. This cannot be done with an ordinary bitcast because the | |||
8475 | // sizes are not the same. We have to use a MVE specific PREDICATE_CAST node, | |||
8476 | // since we know in hardware the sizes are really the same. | |||
8477 | if (VT != MVT::v16i1) | |||
8478 | RecastV1 = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Pred); | |||
8479 | else | |||
8480 | RecastV1 = Pred; | |||
8481 | ||||
8482 | // Select either all ones or zeroes depending upon the real predicate bits. | |||
8483 | SDValue PredAsVector = | |||
8484 | DAG.getNode(ISD::VSELECT, dl, MVT::v16i8, RecastV1, AllOnes, AllZeroes); | |||
8485 | ||||
8486 | // Recast our new predicate-as-integer v16i8 vector into something | |||
8487 | // appropriate for the shuffle, i.e. v4i32 for a real v4i1 predicate. | |||
8488 | return DAG.getNode(ISD::BITCAST, dl, NewVT, PredAsVector); | |||
8489 | } | |||
8490 | ||||
8491 | static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG, | |||
8492 | const ARMSubtarget *ST) { | |||
8493 | EVT VT = Op.getValueType(); | |||
8494 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); | |||
8495 | ArrayRef<int> ShuffleMask = SVN->getMask(); | |||
8496 | ||||
8497 | assert(ST->hasMVEIntegerOps() &&(static_cast <bool> (ST->hasMVEIntegerOps() && "No support for vector shuffle of boolean predicates") ? void (0) : __assert_fail ("ST->hasMVEIntegerOps() && \"No support for vector shuffle of boolean predicates\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8498, __extension__ __PRETTY_FUNCTION__)) | |||
8498 | "No support for vector shuffle of boolean predicates")(static_cast <bool> (ST->hasMVEIntegerOps() && "No support for vector shuffle of boolean predicates") ? void (0) : __assert_fail ("ST->hasMVEIntegerOps() && \"No support for vector shuffle of boolean predicates\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8498, __extension__ __PRETTY_FUNCTION__)); | |||
8499 | ||||
8500 | SDValue V1 = Op.getOperand(0); | |||
8501 | SDLoc dl(Op); | |||
8502 | if (isReverseMask(ShuffleMask, VT)) { | |||
8503 | SDValue cast = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, V1); | |||
8504 | SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, cast); | |||
8505 | SDValue srl = DAG.getNode(ISD::SRL, dl, MVT::i32, rbit, | |||
8506 | DAG.getConstant(16, dl, MVT::i32)); | |||
8507 | return DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, srl); | |||
8508 | } | |||
8509 | ||||
8510 | // Until we can come up with optimised cases for every single vector | |||
8511 | // shuffle in existence we have chosen the least painful strategy. This is | |||
8512 | // to essentially promote the boolean predicate to a 8-bit integer, where | |||
8513 | // each predicate represents a byte. Then we fall back on a normal integer | |||
8514 | // vector shuffle and convert the result back into a predicate vector. In | |||
8515 | // many cases the generated code might be even better than scalar code | |||
8516 | // operating on bits. Just imagine trying to shuffle 8 arbitrary 2-bit | |||
8517 | // fields in a register into 8 other arbitrary 2-bit fields! | |||
8518 | SDValue PredAsVector = PromoteMVEPredVector(dl, V1, VT, DAG); | |||
8519 | EVT NewVT = PredAsVector.getValueType(); | |||
8520 | ||||
8521 | // Do the shuffle! | |||
8522 | SDValue Shuffled = DAG.getVectorShuffle(NewVT, dl, PredAsVector, | |||
8523 | DAG.getUNDEF(NewVT), ShuffleMask); | |||
8524 | ||||
8525 | // Now return the result of comparing the shuffled vector with zero, | |||
8526 | // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. For a v2i1 | |||
8527 | // we convert to a v4i1 compare to fill in the two halves of the i64 as i32s. | |||
8528 | if (VT == MVT::v2i1) { | |||
8529 | SDValue BC = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Shuffled); | |||
8530 | SDValue Cmp = DAG.getNode(ARMISD::VCMPZ, dl, MVT::v4i1, BC, | |||
8531 | DAG.getConstant(ARMCC::NE, dl, MVT::i32)); | |||
8532 | return DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v2i1, Cmp); | |||
8533 | } | |||
8534 | return DAG.getNode(ARMISD::VCMPZ, dl, VT, Shuffled, | |||
8535 | DAG.getConstant(ARMCC::NE, dl, MVT::i32)); | |||
8536 | } | |||
8537 | ||||
8538 | static SDValue LowerVECTOR_SHUFFLEUsingMovs(SDValue Op, | |||
8539 | ArrayRef<int> ShuffleMask, | |||
8540 | SelectionDAG &DAG) { | |||
8541 | // Attempt to lower the vector shuffle using as many whole register movs as | |||
8542 | // possible. This is useful for types smaller than 32bits, which would | |||
8543 | // often otherwise become a series for grp movs. | |||
8544 | SDLoc dl(Op); | |||
8545 | EVT VT = Op.getValueType(); | |||
8546 | if (VT.getScalarSizeInBits() >= 32) | |||
8547 | return SDValue(); | |||
8548 | ||||
8549 | assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) &&(static_cast <bool> ((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && "Unexpected vector type") ? void (0) : __assert_fail ("(VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && \"Unexpected vector type\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8550, __extension__ __PRETTY_FUNCTION__)) | |||
8550 | "Unexpected vector type")(static_cast <bool> ((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && "Unexpected vector type") ? void (0) : __assert_fail ("(VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && \"Unexpected vector type\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8550, __extension__ __PRETTY_FUNCTION__)); | |||
8551 | int NumElts = VT.getVectorNumElements(); | |||
8552 | int QuarterSize = NumElts / 4; | |||
8553 | // The four final parts of the vector, as i32's | |||
8554 | SDValue Parts[4]; | |||
8555 | ||||
8556 | // Look for full lane vmovs like <0,1,2,3> or <u,5,6,7> etc, (but not | |||
8557 | // <u,u,u,u>), returning the vmov lane index | |||
8558 | auto getMovIdx = [](ArrayRef<int> ShuffleMask, int Start, int Length) { | |||
8559 | // Detect which mov lane this would be from the first non-undef element. | |||
8560 | int MovIdx = -1; | |||
8561 | for (int i = 0; i < Length; i++) { | |||
8562 | if (ShuffleMask[Start + i] >= 0) { | |||
8563 | if (ShuffleMask[Start + i] % Length != i) | |||
8564 | return -1; | |||
8565 | MovIdx = ShuffleMask[Start + i] / Length; | |||
8566 | break; | |||
8567 | } | |||
8568 | } | |||
8569 | // If all items are undef, leave this for other combines | |||
8570 | if (MovIdx == -1) | |||
8571 | return -1; | |||
8572 | // Check the remaining values are the correct part of the same mov | |||
8573 | for (int i = 1; i < Length; i++) { | |||
8574 | if (ShuffleMask[Start + i] >= 0 && | |||
8575 | (ShuffleMask[Start + i] / Length != MovIdx || | |||
8576 | ShuffleMask[Start + i] % Length != i)) | |||
8577 | return -1; | |||
8578 | } | |||
8579 | return MovIdx; | |||
8580 | }; | |||
8581 | ||||
8582 | for (int Part = 0; Part < 4; ++Part) { | |||
8583 | // Does this part look like a mov | |||
8584 | int Elt = getMovIdx(ShuffleMask, Part * QuarterSize, QuarterSize); | |||
8585 | if (Elt != -1) { | |||
8586 | SDValue Input = Op->getOperand(0); | |||
8587 | if (Elt >= 4) { | |||
8588 | Input = Op->getOperand(1); | |||
8589 | Elt -= 4; | |||
8590 | } | |||
8591 | SDValue BitCast = DAG.getBitcast(MVT::v4f32, Input); | |||
8592 | Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, BitCast, | |||
8593 | DAG.getConstant(Elt, dl, MVT::i32)); | |||
8594 | } | |||
8595 | } | |||
8596 | ||||
8597 | // Nothing interesting found, just return | |||
8598 | if (!Parts[0] && !Parts[1] && !Parts[2] && !Parts[3]) | |||
8599 | return SDValue(); | |||
8600 | ||||
8601 | // The other parts need to be built with the old shuffle vector, cast to a | |||
8602 | // v4i32 and extract_vector_elts | |||
8603 | if (!Parts[0] || !Parts[1] || !Parts[2] || !Parts[3]) { | |||
8604 | SmallVector<int, 16> NewShuffleMask; | |||
8605 | for (int Part = 0; Part < 4; ++Part) | |||
8606 | for (int i = 0; i < QuarterSize; i++) | |||
8607 | NewShuffleMask.push_back( | |||
8608 | Parts[Part] ? -1 : ShuffleMask[Part * QuarterSize + i]); | |||
8609 | SDValue NewShuffle = DAG.getVectorShuffle( | |||
8610 | VT, dl, Op->getOperand(0), Op->getOperand(1), NewShuffleMask); | |||
8611 | SDValue BitCast = DAG.getBitcast(MVT::v4f32, NewShuffle); | |||
8612 | ||||
8613 | for (int Part = 0; Part < 4; ++Part) | |||
8614 | if (!Parts[Part]) | |||
8615 | Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, | |||
8616 | BitCast, DAG.getConstant(Part, dl, MVT::i32)); | |||
8617 | } | |||
8618 | // Build a vector out of the various parts and bitcast it back to the original | |||
8619 | // type. | |||
8620 | SDValue NewVec = DAG.getNode(ARMISD::BUILD_VECTOR, dl, MVT::v4f32, Parts); | |||
8621 | return DAG.getBitcast(VT, NewVec); | |||
8622 | } | |||
8623 | ||||
8624 | static SDValue LowerVECTOR_SHUFFLEUsingOneOff(SDValue Op, | |||
8625 | ArrayRef<int> ShuffleMask, | |||
8626 | SelectionDAG &DAG) { | |||
8627 | SDValue V1 = Op.getOperand(0); | |||
8628 | SDValue V2 = Op.getOperand(1); | |||
8629 | EVT VT = Op.getValueType(); | |||
8630 | unsigned NumElts = VT.getVectorNumElements(); | |||
8631 | ||||
8632 | // An One-Off Identity mask is one that is mostly an identity mask from as | |||
8633 | // single source but contains a single element out-of-place, either from a | |||
8634 | // different vector or from another position in the same vector. As opposed to | |||
8635 | // lowering this via a ARMISD::BUILD_VECTOR we can generate an extract/insert | |||
8636 | // pair directly. | |||
8637 | auto isOneOffIdentityMask = [](ArrayRef<int> Mask, EVT VT, int BaseOffset, | |||
8638 | int &OffElement) { | |||
8639 | OffElement = -1; | |||
8640 | int NonUndef = 0; | |||
8641 | for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { | |||
8642 | if (Mask[i] == -1) | |||
8643 | continue; | |||
8644 | NonUndef++; | |||
8645 | if (Mask[i] != i + BaseOffset) { | |||
8646 | if (OffElement == -1) | |||
8647 | OffElement = i; | |||
8648 | else | |||
8649 | return false; | |||
8650 | } | |||
8651 | } | |||
8652 | return NonUndef > 2 && OffElement != -1; | |||
8653 | }; | |||
8654 | int OffElement; | |||
8655 | SDValue VInput; | |||
8656 | if (isOneOffIdentityMask(ShuffleMask, VT, 0, OffElement)) | |||
8657 | VInput = V1; | |||
8658 | else if (isOneOffIdentityMask(ShuffleMask, VT, NumElts, OffElement)) | |||
8659 | VInput = V2; | |||
8660 | else | |||
8661 | return SDValue(); | |||
8662 | ||||
8663 | SDLoc dl(Op); | |||
8664 | EVT SVT = VT.getScalarType() == MVT::i8 || VT.getScalarType() == MVT::i16 | |||
8665 | ? MVT::i32 | |||
8666 | : VT.getScalarType(); | |||
8667 | SDValue Elt = DAG.getNode( | |||
8668 | ISD::EXTRACT_VECTOR_ELT, dl, SVT, | |||
8669 | ShuffleMask[OffElement] < (int)NumElts ? V1 : V2, | |||
8670 | DAG.getVectorIdxConstant(ShuffleMask[OffElement] % NumElts, dl)); | |||
8671 | return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, VInput, Elt, | |||
8672 | DAG.getVectorIdxConstant(OffElement % NumElts, dl)); | |||
8673 | } | |||
8674 | ||||
8675 | static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, | |||
8676 | const ARMSubtarget *ST) { | |||
8677 | SDValue V1 = Op.getOperand(0); | |||
8678 | SDValue V2 = Op.getOperand(1); | |||
8679 | SDLoc dl(Op); | |||
8680 | EVT VT = Op.getValueType(); | |||
8681 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); | |||
8682 | unsigned EltSize = VT.getScalarSizeInBits(); | |||
8683 | ||||
8684 | if (ST->hasMVEIntegerOps() && EltSize == 1) | |||
8685 | return LowerVECTOR_SHUFFLE_i1(Op, DAG, ST); | |||
8686 | ||||
8687 | // Convert shuffles that are directly supported on NEON to target-specific | |||
8688 | // DAG nodes, instead of keeping them as shuffles and matching them again | |||
8689 | // during code selection. This is more efficient and avoids the possibility | |||
8690 | // of inconsistencies between legalization and selection. | |||
8691 | // FIXME: floating-point vectors should be canonicalized to integer vectors | |||
8692 | // of the same time so that they get CSEd properly. | |||
8693 | ArrayRef<int> ShuffleMask = SVN->getMask(); | |||
8694 | ||||
8695 | if (EltSize <= 32) { | |||
8696 | if (SVN->isSplat()) { | |||
8697 | int Lane = SVN->getSplatIndex(); | |||
8698 | // If this is undef splat, generate it via "just" vdup, if possible. | |||
8699 | if (Lane == -1) Lane = 0; | |||
8700 | ||||
8701 | // Test if V1 is a SCALAR_TO_VECTOR. | |||
8702 | if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { | |||
8703 | return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); | |||
8704 | } | |||
8705 | // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR | |||
8706 | // (and probably will turn into a SCALAR_TO_VECTOR once legalization | |||
8707 | // reaches it). | |||
8708 | if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && | |||
8709 | !isa<ConstantSDNode>(V1.getOperand(0))) { | |||
8710 | bool IsScalarToVector = true; | |||
8711 | for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) | |||
8712 | if (!V1.getOperand(i).isUndef()) { | |||
8713 | IsScalarToVector = false; | |||
8714 | break; | |||
8715 | } | |||
8716 | if (IsScalarToVector) | |||
8717 | return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); | |||
8718 | } | |||
8719 | return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, | |||
8720 | DAG.getConstant(Lane, dl, MVT::i32)); | |||
8721 | } | |||
8722 | ||||
8723 | bool ReverseVEXT = false; | |||
8724 | unsigned Imm = 0; | |||
8725 | if (ST->hasNEON() && isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { | |||
8726 | if (ReverseVEXT) | |||
8727 | std::swap(V1, V2); | |||
8728 | return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, | |||
8729 | DAG.getConstant(Imm, dl, MVT::i32)); | |||
8730 | } | |||
8731 | ||||
8732 | if (isVREVMask(ShuffleMask, VT, 64)) | |||
8733 | return DAG.getNode(ARMISD::VREV64, dl, VT, V1); | |||
8734 | if (isVREVMask(ShuffleMask, VT, 32)) | |||
8735 | return DAG.getNode(ARMISD::VREV32, dl, VT, V1); | |||
8736 | if (isVREVMask(ShuffleMask, VT, 16)) | |||
8737 | return DAG.getNode(ARMISD::VREV16, dl, VT, V1); | |||
8738 | ||||
8739 | if (ST->hasNEON() && V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) { | |||
8740 | return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1, | |||
8741 | DAG.getConstant(Imm, dl, MVT::i32)); | |||
8742 | } | |||
8743 | ||||
8744 | // Check for Neon shuffles that modify both input vectors in place. | |||
8745 | // If both results are used, i.e., if there are two shuffles with the same | |||
8746 | // source operands and with masks corresponding to both results of one of | |||
8747 | // these operations, DAG memoization will ensure that a single node is | |||
8748 | // used for both shuffles. | |||
8749 | unsigned WhichResult = 0; | |||
8750 | bool isV_UNDEF = false; | |||
8751 | if (ST->hasNEON()) { | |||
8752 | if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( | |||
8753 | ShuffleMask, VT, WhichResult, isV_UNDEF)) { | |||
8754 | if (isV_UNDEF) | |||
8755 | V2 = V1; | |||
8756 | return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2) | |||
8757 | .getValue(WhichResult); | |||
8758 | } | |||
8759 | } | |||
8760 | if (ST->hasMVEIntegerOps()) { | |||
8761 | if (isVMOVNMask(ShuffleMask, VT, false, false)) | |||
8762 | return DAG.getNode(ARMISD::VMOVN, dl, VT, V2, V1, | |||
8763 | DAG.getConstant(0, dl, MVT::i32)); | |||
8764 | if (isVMOVNMask(ShuffleMask, VT, true, false)) | |||
8765 | return DAG.getNode(ARMISD::VMOVN, dl, VT, V1, V2, | |||
8766 | DAG.getConstant(1, dl, MVT::i32)); | |||
8767 | if (isVMOVNMask(ShuffleMask, VT, true, true)) | |||
8768 | return DAG.getNode(ARMISD::VMOVN, dl, VT, V1, V1, | |||
8769 | DAG.getConstant(1, dl, MVT::i32)); | |||
8770 | } | |||
8771 | ||||
8772 | // Also check for these shuffles through CONCAT_VECTORS: we canonicalize | |||
8773 | // shuffles that produce a result larger than their operands with: | |||
8774 | // shuffle(concat(v1, undef), concat(v2, undef)) | |||
8775 | // -> | |||
8776 | // shuffle(concat(v1, v2), undef) | |||
8777 | // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine). | |||
8778 | // | |||
8779 | // This is useful in the general case, but there are special cases where | |||
8780 | // native shuffles produce larger results: the two-result ops. | |||
8781 | // | |||
8782 | // Look through the concat when lowering them: | |||
8783 | // shuffle(concat(v1, v2), undef) | |||
8784 | // -> | |||
8785 | // concat(VZIP(v1, v2):0, :1) | |||
8786 | // | |||
8787 | if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) { | |||
8788 | SDValue SubV1 = V1->getOperand(0); | |||
8789 | SDValue SubV2 = V1->getOperand(1); | |||
8790 | EVT SubVT = SubV1.getValueType(); | |||
8791 | ||||
8792 | // We expect these to have been canonicalized to -1. | |||
8793 | assert(llvm::all_of(ShuffleMask, [&](int i) {(static_cast <bool> (llvm::all_of(ShuffleMask, [&]( int i) { return i < (int)VT.getVectorNumElements(); }) && "Unexpected shuffle index into UNDEF operand!") ? void (0) : __assert_fail ("llvm::all_of(ShuffleMask, [&](int i) { return i < (int)VT.getVectorNumElements(); }) && \"Unexpected shuffle index into UNDEF operand!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8795, __extension__ __PRETTY_FUNCTION__)) | |||
8794 | return i < (int)VT.getVectorNumElements();(static_cast <bool> (llvm::all_of(ShuffleMask, [&]( int i) { return i < (int)VT.getVectorNumElements(); }) && "Unexpected shuffle index into UNDEF operand!") ? void (0) : __assert_fail ("llvm::all_of(ShuffleMask, [&](int i) { return i < (int)VT.getVectorNumElements(); }) && \"Unexpected shuffle index into UNDEF operand!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8795, __extension__ __PRETTY_FUNCTION__)) | |||
8795 | }) && "Unexpected shuffle index into UNDEF operand!")(static_cast <bool> (llvm::all_of(ShuffleMask, [&]( int i) { return i < (int)VT.getVectorNumElements(); }) && "Unexpected shuffle index into UNDEF operand!") ? void (0) : __assert_fail ("llvm::all_of(ShuffleMask, [&](int i) { return i < (int)VT.getVectorNumElements(); }) && \"Unexpected shuffle index into UNDEF operand!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8795, __extension__ __PRETTY_FUNCTION__)); | |||
8796 | ||||
8797 | if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( | |||
8798 | ShuffleMask, SubVT, WhichResult, isV_UNDEF)) { | |||
8799 | if (isV_UNDEF) | |||
8800 | SubV2 = SubV1; | |||
8801 | assert((WhichResult == 0) &&(static_cast <bool> ((WhichResult == 0) && "In-place shuffle of concat can only have one result!" ) ? void (0) : __assert_fail ("(WhichResult == 0) && \"In-place shuffle of concat can only have one result!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8802, __extension__ __PRETTY_FUNCTION__)) | |||
8802 | "In-place shuffle of concat can only have one result!")(static_cast <bool> ((WhichResult == 0) && "In-place shuffle of concat can only have one result!" ) ? void (0) : __assert_fail ("(WhichResult == 0) && \"In-place shuffle of concat can only have one result!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8802, __extension__ __PRETTY_FUNCTION__)); | |||
8803 | SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT), | |||
8804 | SubV1, SubV2); | |||
8805 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0), | |||
8806 | Res.getValue(1)); | |||
8807 | } | |||
8808 | } | |||
8809 | } | |||
8810 | ||||
8811 | if (ST->hasMVEIntegerOps() && EltSize <= 32) | |||
8812 | if (SDValue V = LowerVECTOR_SHUFFLEUsingOneOff(Op, ShuffleMask, DAG)) | |||
8813 | return V; | |||
8814 | ||||
8815 | // If the shuffle is not directly supported and it has 4 elements, use | |||
8816 | // the PerfectShuffle-generated table to synthesize it from other shuffles. | |||
8817 | unsigned NumElts = VT.getVectorNumElements(); | |||
8818 | if (NumElts == 4) { | |||
8819 | unsigned PFIndexes[4]; | |||
8820 | for (unsigned i = 0; i != 4; ++i) { | |||
8821 | if (ShuffleMask[i] < 0) | |||
8822 | PFIndexes[i] = 8; | |||
8823 | else | |||
8824 | PFIndexes[i] = ShuffleMask[i]; | |||
8825 | } | |||
8826 | ||||
8827 | // Compute the index in the perfect shuffle table. | |||
8828 | unsigned PFTableIndex = | |||
8829 | PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; | |||
8830 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; | |||
8831 | unsigned Cost = (PFEntry >> 30); | |||
8832 | ||||
8833 | if (Cost <= 4) { | |||
8834 | if (ST->hasNEON()) | |||
8835 | return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); | |||
8836 | else if (isLegalMVEShuffleOp(PFEntry)) { | |||
8837 | unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); | |||
8838 | unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); | |||
8839 | unsigned PFEntryLHS = PerfectShuffleTable[LHSID]; | |||
8840 | unsigned PFEntryRHS = PerfectShuffleTable[RHSID]; | |||
8841 | if (isLegalMVEShuffleOp(PFEntryLHS) && isLegalMVEShuffleOp(PFEntryRHS)) | |||
8842 | return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); | |||
8843 | } | |||
8844 | } | |||
8845 | } | |||
8846 | ||||
8847 | // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. | |||
8848 | if (EltSize >= 32) { | |||
8849 | // Do the expansion with floating-point types, since that is what the VFP | |||
8850 | // registers are defined to use, and since i64 is not legal. | |||
8851 | EVT EltVT = EVT::getFloatingPointVT(EltSize); | |||
8852 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); | |||
8853 | V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); | |||
8854 | V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); | |||
8855 | SmallVector<SDValue, 8> Ops; | |||
8856 | for (unsigned i = 0; i < NumElts; ++i) { | |||
8857 | if (ShuffleMask[i] < 0) | |||
8858 | Ops.push_back(DAG.getUNDEF(EltVT)); | |||
8859 | else | |||
8860 | Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, | |||
8861 | ShuffleMask[i] < (int)NumElts ? V1 : V2, | |||
8862 | DAG.getConstant(ShuffleMask[i] & (NumElts-1), | |||
8863 | dl, MVT::i32))); | |||
8864 | } | |||
8865 | SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); | |||
8866 | return DAG.getNode(ISD::BITCAST, dl, VT, Val); | |||
8867 | } | |||
8868 | ||||
8869 | if ((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && | |||
8870 | isReverseMask(ShuffleMask, VT)) | |||
8871 | return LowerReverse_VECTOR_SHUFFLE(Op, DAG); | |||
8872 | ||||
8873 | if (ST->hasNEON() && VT == MVT::v8i8) | |||
8874 | if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG)) | |||
8875 | return NewOp; | |||
8876 | ||||
8877 | if (ST->hasMVEIntegerOps()) | |||
8878 | if (SDValue NewOp = LowerVECTOR_SHUFFLEUsingMovs(Op, ShuffleMask, DAG)) | |||
8879 | return NewOp; | |||
8880 | ||||
8881 | return SDValue(); | |||
8882 | } | |||
8883 | ||||
8884 | static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG, | |||
8885 | const ARMSubtarget *ST) { | |||
8886 | EVT VecVT = Op.getOperand(0).getValueType(); | |||
8887 | SDLoc dl(Op); | |||
8888 | ||||
8889 | assert(ST->hasMVEIntegerOps() &&(static_cast <bool> (ST->hasMVEIntegerOps() && "LowerINSERT_VECTOR_ELT_i1 called without MVE!") ? void (0) : __assert_fail ("ST->hasMVEIntegerOps() && \"LowerINSERT_VECTOR_ELT_i1 called without MVE!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8890, __extension__ __PRETTY_FUNCTION__)) | |||
8890 | "LowerINSERT_VECTOR_ELT_i1 called without MVE!")(static_cast <bool> (ST->hasMVEIntegerOps() && "LowerINSERT_VECTOR_ELT_i1 called without MVE!") ? void (0) : __assert_fail ("ST->hasMVEIntegerOps() && \"LowerINSERT_VECTOR_ELT_i1 called without MVE!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8890, __extension__ __PRETTY_FUNCTION__)); | |||
8891 | ||||
8892 | SDValue Conv = | |||
8893 | DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0)); | |||
8894 | unsigned Lane = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); | |||
8895 | unsigned LaneWidth = | |||
8896 | getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8; | |||
8897 | unsigned Mask = ((1 << LaneWidth) - 1) << Lane * LaneWidth; | |||
8898 | SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32, | |||
8899 | Op.getOperand(1), DAG.getValueType(MVT::i1)); | |||
8900 | SDValue BFI = DAG.getNode(ARMISD::BFI, dl, MVT::i32, Conv, Ext, | |||
8901 | DAG.getConstant(~Mask, dl, MVT::i32)); | |||
8902 | return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), BFI); | |||
8903 | } | |||
8904 | ||||
8905 | SDValue ARMTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, | |||
8906 | SelectionDAG &DAG) const { | |||
8907 | // INSERT_VECTOR_ELT is legal only for immediate indexes. | |||
8908 | SDValue Lane = Op.getOperand(2); | |||
8909 | if (!isa<ConstantSDNode>(Lane)) | |||
8910 | return SDValue(); | |||
8911 | ||||
8912 | SDValue Elt = Op.getOperand(1); | |||
8913 | EVT EltVT = Elt.getValueType(); | |||
8914 | ||||
8915 | if (Subtarget->hasMVEIntegerOps() && | |||
8916 | Op.getValueType().getScalarSizeInBits() == 1) | |||
8917 | return LowerINSERT_VECTOR_ELT_i1(Op, DAG, Subtarget); | |||
8918 | ||||
8919 | if (getTypeAction(*DAG.getContext(), EltVT) == | |||
8920 | TargetLowering::TypePromoteFloat) { | |||
8921 | // INSERT_VECTOR_ELT doesn't want f16 operands promoting to f32, | |||
8922 | // but the type system will try to do that if we don't intervene. | |||
8923 | // Reinterpret any such vector-element insertion as one with the | |||
8924 | // corresponding integer types. | |||
8925 | ||||
8926 | SDLoc dl(Op); | |||
8927 | ||||
8928 | EVT IEltVT = MVT::getIntegerVT(EltVT.getScalarSizeInBits()); | |||
8929 | assert(getTypeAction(*DAG.getContext(), IEltVT) !=(static_cast <bool> (getTypeAction(*DAG.getContext(), IEltVT ) != TargetLowering::TypePromoteFloat) ? void (0) : __assert_fail ("getTypeAction(*DAG.getContext(), IEltVT) != TargetLowering::TypePromoteFloat" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8930, __extension__ __PRETTY_FUNCTION__)) | |||
8930 | TargetLowering::TypePromoteFloat)(static_cast <bool> (getTypeAction(*DAG.getContext(), IEltVT ) != TargetLowering::TypePromoteFloat) ? void (0) : __assert_fail ("getTypeAction(*DAG.getContext(), IEltVT) != TargetLowering::TypePromoteFloat" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8930, __extension__ __PRETTY_FUNCTION__)); | |||
8931 | ||||
8932 | SDValue VecIn = Op.getOperand(0); | |||
8933 | EVT VecVT = VecIn.getValueType(); | |||
8934 | EVT IVecVT = EVT::getVectorVT(*DAG.getContext(), IEltVT, | |||
8935 | VecVT.getVectorNumElements()); | |||
8936 | ||||
8937 | SDValue IElt = DAG.getNode(ISD::BITCAST, dl, IEltVT, Elt); | |||
8938 | SDValue IVecIn = DAG.getNode(ISD::BITCAST, dl, IVecVT, VecIn); | |||
8939 | SDValue IVecOut = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, IVecVT, | |||
8940 | IVecIn, IElt, Lane); | |||
8941 | return DAG.getNode(ISD::BITCAST, dl, VecVT, IVecOut); | |||
8942 | } | |||
8943 | ||||
8944 | return Op; | |||
8945 | } | |||
8946 | ||||
8947 | static SDValue LowerEXTRACT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG, | |||
8948 | const ARMSubtarget *ST) { | |||
8949 | EVT VecVT = Op.getOperand(0).getValueType(); | |||
8950 | SDLoc dl(Op); | |||
8951 | ||||
8952 | assert(ST->hasMVEIntegerOps() &&(static_cast <bool> (ST->hasMVEIntegerOps() && "LowerINSERT_VECTOR_ELT_i1 called without MVE!") ? void (0) : __assert_fail ("ST->hasMVEIntegerOps() && \"LowerINSERT_VECTOR_ELT_i1 called without MVE!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8953, __extension__ __PRETTY_FUNCTION__)) | |||
8953 | "LowerINSERT_VECTOR_ELT_i1 called without MVE!")(static_cast <bool> (ST->hasMVEIntegerOps() && "LowerINSERT_VECTOR_ELT_i1 called without MVE!") ? void (0) : __assert_fail ("ST->hasMVEIntegerOps() && \"LowerINSERT_VECTOR_ELT_i1 called without MVE!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8953, __extension__ __PRETTY_FUNCTION__)); | |||
8954 | ||||
8955 | SDValue Conv = | |||
8956 | DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0)); | |||
8957 | unsigned Lane = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); | |||
8958 | unsigned LaneWidth = | |||
8959 | getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8; | |||
8960 | SDValue Shift = DAG.getNode(ISD::SRL, dl, MVT::i32, Conv, | |||
8961 | DAG.getConstant(Lane * LaneWidth, dl, MVT::i32)); | |||
8962 | return Shift; | |||
8963 | } | |||
8964 | ||||
8965 | static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG, | |||
8966 | const ARMSubtarget *ST) { | |||
8967 | // EXTRACT_VECTOR_ELT is legal only for immediate indexes. | |||
8968 | SDValue Lane = Op.getOperand(1); | |||
8969 | if (!isa<ConstantSDNode>(Lane)) | |||
8970 | return SDValue(); | |||
8971 | ||||
8972 | SDValue Vec = Op.getOperand(0); | |||
8973 | EVT VT = Vec.getValueType(); | |||
8974 | ||||
8975 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) | |||
8976 | return LowerEXTRACT_VECTOR_ELT_i1(Op, DAG, ST); | |||
8977 | ||||
8978 | if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) { | |||
8979 | SDLoc dl(Op); | |||
8980 | return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); | |||
8981 | } | |||
8982 | ||||
8983 | return Op; | |||
8984 | } | |||
8985 | ||||
8986 | static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG, | |||
8987 | const ARMSubtarget *ST) { | |||
8988 | SDLoc dl(Op); | |||
8989 | assert(Op.getValueType().getScalarSizeInBits() == 1 &&(static_cast <bool> (Op.getValueType().getScalarSizeInBits () == 1 && "Unexpected custom CONCAT_VECTORS lowering" ) ? void (0) : __assert_fail ("Op.getValueType().getScalarSizeInBits() == 1 && \"Unexpected custom CONCAT_VECTORS lowering\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8990, __extension__ __PRETTY_FUNCTION__)) | |||
8990 | "Unexpected custom CONCAT_VECTORS lowering")(static_cast <bool> (Op.getValueType().getScalarSizeInBits () == 1 && "Unexpected custom CONCAT_VECTORS lowering" ) ? void (0) : __assert_fail ("Op.getValueType().getScalarSizeInBits() == 1 && \"Unexpected custom CONCAT_VECTORS lowering\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8990, __extension__ __PRETTY_FUNCTION__)); | |||
8991 | assert(isPowerOf2_32(Op.getNumOperands()) &&(static_cast <bool> (isPowerOf2_32(Op.getNumOperands()) && "Unexpected custom CONCAT_VECTORS lowering") ? void (0) : __assert_fail ("isPowerOf2_32(Op.getNumOperands()) && \"Unexpected custom CONCAT_VECTORS lowering\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8992, __extension__ __PRETTY_FUNCTION__)) | |||
8992 | "Unexpected custom CONCAT_VECTORS lowering")(static_cast <bool> (isPowerOf2_32(Op.getNumOperands()) && "Unexpected custom CONCAT_VECTORS lowering") ? void (0) : __assert_fail ("isPowerOf2_32(Op.getNumOperands()) && \"Unexpected custom CONCAT_VECTORS lowering\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8992, __extension__ __PRETTY_FUNCTION__)); | |||
8993 | assert(ST->hasMVEIntegerOps() &&(static_cast <bool> (ST->hasMVEIntegerOps() && "CONCAT_VECTORS lowering only supported for MVE") ? void (0) : __assert_fail ("ST->hasMVEIntegerOps() && \"CONCAT_VECTORS lowering only supported for MVE\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8994, __extension__ __PRETTY_FUNCTION__)) | |||
8994 | "CONCAT_VECTORS lowering only supported for MVE")(static_cast <bool> (ST->hasMVEIntegerOps() && "CONCAT_VECTORS lowering only supported for MVE") ? void (0) : __assert_fail ("ST->hasMVEIntegerOps() && \"CONCAT_VECTORS lowering only supported for MVE\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8994, __extension__ __PRETTY_FUNCTION__)); | |||
8995 | ||||
8996 | auto ConcatPair = [&](SDValue V1, SDValue V2) { | |||
8997 | EVT Op1VT = V1.getValueType(); | |||
8998 | EVT Op2VT = V2.getValueType(); | |||
8999 | assert(Op1VT == Op2VT && "Operand types don't match!")(static_cast <bool> (Op1VT == Op2VT && "Operand types don't match!" ) ? void (0) : __assert_fail ("Op1VT == Op2VT && \"Operand types don't match!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 8999, __extension__ __PRETTY_FUNCTION__)); | |||
9000 | EVT VT = Op1VT.getDoubleNumVectorElementsVT(*DAG.getContext()); | |||
9001 | ||||
9002 | SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG); | |||
9003 | SDValue NewV2 = PromoteMVEPredVector(dl, V2, Op2VT, DAG); | |||
9004 | ||||
9005 | // We now have Op1 + Op2 promoted to vectors of integers, where v8i1 gets | |||
9006 | // promoted to v8i16, etc. | |||
9007 | MVT ElType = | |||
9008 | getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT(); | |||
9009 | unsigned NumElts = 2 * Op1VT.getVectorNumElements(); | |||
9010 | ||||
9011 | // Extract the vector elements from Op1 and Op2 one by one and truncate them | |||
9012 | // to be the right size for the destination. For example, if Op1 is v4i1 | |||
9013 | // then the promoted vector is v4i32. The result of concatentation gives a | |||
9014 | // v8i1, which when promoted is v8i16. That means each i32 element from Op1 | |||
9015 | // needs truncating to i16 and inserting in the result. | |||
9016 | EVT ConcatVT = MVT::getVectorVT(ElType, NumElts); | |||
9017 | SDValue ConVec = DAG.getNode(ISD::UNDEF, dl, ConcatVT); | |||
9018 | auto ExtractInto = [&DAG, &dl](SDValue NewV, SDValue ConVec, unsigned &j) { | |||
9019 | EVT NewVT = NewV.getValueType(); | |||
9020 | EVT ConcatVT = ConVec.getValueType(); | |||
9021 | for (unsigned i = 0, e = NewVT.getVectorNumElements(); i < e; i++, j++) { | |||
9022 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV, | |||
9023 | DAG.getIntPtrConstant(i, dl)); | |||
9024 | ConVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ConcatVT, ConVec, Elt, | |||
9025 | DAG.getConstant(j, dl, MVT::i32)); | |||
9026 | } | |||
9027 | return ConVec; | |||
9028 | }; | |||
9029 | unsigned j = 0; | |||
9030 | ConVec = ExtractInto(NewV1, ConVec, j); | |||
9031 | ConVec = ExtractInto(NewV2, ConVec, j); | |||
9032 | ||||
9033 | // Now return the result of comparing the subvector with zero, which will | |||
9034 | // generate a real predicate, i.e. v4i1, v8i1 or v16i1. For a v2i1 we | |||
9035 | // convert to a v4i1 compare to fill in the two halves of the i64 as i32s. | |||
9036 | if (VT == MVT::v2i1) { | |||
9037 | SDValue BC = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, ConVec); | |||
9038 | SDValue Cmp = DAG.getNode(ARMISD::VCMPZ, dl, MVT::v4i1, BC, | |||
9039 | DAG.getConstant(ARMCC::NE, dl, MVT::i32)); | |||
9040 | return DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v2i1, Cmp); | |||
9041 | } | |||
9042 | return DAG.getNode(ARMISD::VCMPZ, dl, VT, ConVec, | |||
9043 | DAG.getConstant(ARMCC::NE, dl, MVT::i32)); | |||
9044 | }; | |||
9045 | ||||
9046 | // Concat each pair of subvectors and pack into the lower half of the array. | |||
9047 | SmallVector<SDValue> ConcatOps(Op->op_begin(), Op->op_end()); | |||
9048 | while (ConcatOps.size() > 1) { | |||
9049 | for (unsigned I = 0, E = ConcatOps.size(); I != E; I += 2) { | |||
9050 | SDValue V1 = ConcatOps[I]; | |||
9051 | SDValue V2 = ConcatOps[I + 1]; | |||
9052 | ConcatOps[I / 2] = ConcatPair(V1, V2); | |||
9053 | } | |||
9054 | ConcatOps.resize(ConcatOps.size() / 2); | |||
9055 | } | |||
9056 | return ConcatOps[0]; | |||
9057 | } | |||
9058 | ||||
9059 | static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, | |||
9060 | const ARMSubtarget *ST) { | |||
9061 | EVT VT = Op->getValueType(0); | |||
9062 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) | |||
9063 | return LowerCONCAT_VECTORS_i1(Op, DAG, ST); | |||
9064 | ||||
9065 | // The only time a CONCAT_VECTORS operation can have legal types is when | |||
9066 | // two 64-bit vectors are concatenated to a 128-bit vector. | |||
9067 | assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&(static_cast <bool> (Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && "unexpected CONCAT_VECTORS" ) ? void (0) : __assert_fail ("Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && \"unexpected CONCAT_VECTORS\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9068, __extension__ __PRETTY_FUNCTION__)) | |||
9068 | "unexpected CONCAT_VECTORS")(static_cast <bool> (Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && "unexpected CONCAT_VECTORS" ) ? void (0) : __assert_fail ("Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && \"unexpected CONCAT_VECTORS\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9068, __extension__ __PRETTY_FUNCTION__)); | |||
9069 | SDLoc dl(Op); | |||
9070 | SDValue Val = DAG.getUNDEF(MVT::v2f64); | |||
9071 | SDValue Op0 = Op.getOperand(0); | |||
9072 | SDValue Op1 = Op.getOperand(1); | |||
9073 | if (!Op0.isUndef()) | |||
9074 | Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, | |||
9075 | DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), | |||
9076 | DAG.getIntPtrConstant(0, dl)); | |||
9077 | if (!Op1.isUndef()) | |||
9078 | Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, | |||
9079 | DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), | |||
9080 | DAG.getIntPtrConstant(1, dl)); | |||
9081 | return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); | |||
9082 | } | |||
9083 | ||||
9084 | static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG, | |||
9085 | const ARMSubtarget *ST) { | |||
9086 | SDValue V1 = Op.getOperand(0); | |||
9087 | SDValue V2 = Op.getOperand(1); | |||
9088 | SDLoc dl(Op); | |||
9089 | EVT VT = Op.getValueType(); | |||
9090 | EVT Op1VT = V1.getValueType(); | |||
9091 | unsigned NumElts = VT.getVectorNumElements(); | |||
9092 | unsigned Index = cast<ConstantSDNode>(V2)->getZExtValue(); | |||
9093 | ||||
9094 | assert(VT.getScalarSizeInBits() == 1 &&(static_cast <bool> (VT.getScalarSizeInBits() == 1 && "Unexpected custom EXTRACT_SUBVECTOR lowering") ? void (0) : __assert_fail ("VT.getScalarSizeInBits() == 1 && \"Unexpected custom EXTRACT_SUBVECTOR lowering\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9095, __extension__ __PRETTY_FUNCTION__)) | |||
9095 | "Unexpected custom EXTRACT_SUBVECTOR lowering")(static_cast <bool> (VT.getScalarSizeInBits() == 1 && "Unexpected custom EXTRACT_SUBVECTOR lowering") ? void (0) : __assert_fail ("VT.getScalarSizeInBits() == 1 && \"Unexpected custom EXTRACT_SUBVECTOR lowering\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9095, __extension__ __PRETTY_FUNCTION__)); | |||
9096 | assert(ST->hasMVEIntegerOps() &&(static_cast <bool> (ST->hasMVEIntegerOps() && "EXTRACT_SUBVECTOR lowering only supported for MVE") ? void ( 0) : __assert_fail ("ST->hasMVEIntegerOps() && \"EXTRACT_SUBVECTOR lowering only supported for MVE\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9097, __extension__ __PRETTY_FUNCTION__)) | |||
9097 | "EXTRACT_SUBVECTOR lowering only supported for MVE")(static_cast <bool> (ST->hasMVEIntegerOps() && "EXTRACT_SUBVECTOR lowering only supported for MVE") ? void ( 0) : __assert_fail ("ST->hasMVEIntegerOps() && \"EXTRACT_SUBVECTOR lowering only supported for MVE\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9097, __extension__ __PRETTY_FUNCTION__)); | |||
9098 | ||||
9099 | SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG); | |||
9100 | ||||
9101 | // We now have Op1 promoted to a vector of integers, where v8i1 gets | |||
9102 | // promoted to v8i16, etc. | |||
9103 | ||||
9104 | MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT(); | |||
9105 | ||||
9106 | if (NumElts == 2) { | |||
9107 | EVT SubVT = MVT::v4i32; | |||
9108 | SDValue SubVec = DAG.getNode(ISD::UNDEF, dl, SubVT); | |||
9109 | for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j += 2) { | |||
9110 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV1, | |||
9111 | DAG.getIntPtrConstant(i, dl)); | |||
9112 | SubVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubVT, SubVec, Elt, | |||
9113 | DAG.getConstant(j, dl, MVT::i32)); | |||
9114 | SubVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubVT, SubVec, Elt, | |||
9115 | DAG.getConstant(j + 1, dl, MVT::i32)); | |||
9116 | } | |||
9117 | SDValue Cmp = DAG.getNode(ARMISD::VCMPZ, dl, MVT::v4i1, SubVec, | |||
9118 | DAG.getConstant(ARMCC::NE, dl, MVT::i32)); | |||
9119 | return DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v2i1, Cmp); | |||
9120 | } | |||
9121 | ||||
9122 | EVT SubVT = MVT::getVectorVT(ElType, NumElts); | |||
9123 | SDValue SubVec = DAG.getNode(ISD::UNDEF, dl, SubVT); | |||
9124 | for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j++) { | |||
9125 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV1, | |||
9126 | DAG.getIntPtrConstant(i, dl)); | |||
9127 | SubVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubVT, SubVec, Elt, | |||
9128 | DAG.getConstant(j, dl, MVT::i32)); | |||
9129 | } | |||
9130 | ||||
9131 | // Now return the result of comparing the subvector with zero, | |||
9132 | // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. | |||
9133 | return DAG.getNode(ARMISD::VCMPZ, dl, VT, SubVec, | |||
9134 | DAG.getConstant(ARMCC::NE, dl, MVT::i32)); | |||
9135 | } | |||
9136 | ||||
9137 | // Turn a truncate into a predicate (an i1 vector) into icmp(and(x, 1), 0). | |||
9138 | static SDValue LowerTruncatei1(SDNode *N, SelectionDAG &DAG, | |||
9139 | const ARMSubtarget *ST) { | |||
9140 | assert(ST->hasMVEIntegerOps() && "Expected MVE!")(static_cast <bool> (ST->hasMVEIntegerOps() && "Expected MVE!") ? void (0) : __assert_fail ("ST->hasMVEIntegerOps() && \"Expected MVE!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9140, __extension__ __PRETTY_FUNCTION__)); | |||
9141 | EVT VT = N->getValueType(0); | |||
9142 | assert((VT == MVT::v16i1 || VT == MVT::v8i1 || VT == MVT::v4i1) &&(static_cast <bool> ((VT == MVT::v16i1 || VT == MVT::v8i1 || VT == MVT::v4i1) && "Expected a vector i1 type!") ? void (0) : __assert_fail ("(VT == MVT::v16i1 || VT == MVT::v8i1 || VT == MVT::v4i1) && \"Expected a vector i1 type!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9143, __extension__ __PRETTY_FUNCTION__)) | |||
9143 | "Expected a vector i1 type!")(static_cast <bool> ((VT == MVT::v16i1 || VT == MVT::v8i1 || VT == MVT::v4i1) && "Expected a vector i1 type!") ? void (0) : __assert_fail ("(VT == MVT::v16i1 || VT == MVT::v8i1 || VT == MVT::v4i1) && \"Expected a vector i1 type!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9143, __extension__ __PRETTY_FUNCTION__)); | |||
9144 | SDValue Op = N->getOperand(0); | |||
9145 | EVT FromVT = Op.getValueType(); | |||
9146 | SDLoc DL(N); | |||
9147 | ||||
9148 | SDValue And = | |||
9149 | DAG.getNode(ISD::AND, DL, FromVT, Op, DAG.getConstant(1, DL, FromVT)); | |||
9150 | return DAG.getNode(ISD::SETCC, DL, VT, And, DAG.getConstant(0, DL, FromVT), | |||
9151 | DAG.getCondCode(ISD::SETNE)); | |||
9152 | } | |||
9153 | ||||
9154 | static SDValue LowerTruncate(SDNode *N, SelectionDAG &DAG, | |||
9155 | const ARMSubtarget *Subtarget) { | |||
9156 | if (!Subtarget->hasMVEIntegerOps()) | |||
9157 | return SDValue(); | |||
9158 | ||||
9159 | EVT ToVT = N->getValueType(0); | |||
9160 | if (ToVT.getScalarType() == MVT::i1) | |||
9161 | return LowerTruncatei1(N, DAG, Subtarget); | |||
9162 | ||||
9163 | // MVE does not have a single instruction to perform the truncation of a v4i32 | |||
9164 | // into the lower half of a v8i16, in the same way that a NEON vmovn would. | |||
9165 | // Most of the instructions in MVE follow the 'Beats' system, where moving | |||
9166 | // values from different lanes is usually something that the instructions | |||
9167 | // avoid. | |||
9168 | // | |||
9169 | // Instead it has top/bottom instructions such as VMOVLT/B and VMOVNT/B, | |||
9170 | // which take a the top/bottom half of a larger lane and extend it (or do the | |||
9171 | // opposite, truncating into the top/bottom lane from a larger lane). Note | |||
9172 | // that because of the way we widen lanes, a v4i16 is really a v4i32 using the | |||
9173 | // bottom 16bits from each vector lane. This works really well with T/B | |||
9174 | // instructions, but that doesn't extend to v8i32->v8i16 where the lanes need | |||
9175 | // to move order. | |||
9176 | // | |||
9177 | // But truncates and sext/zext are always going to be fairly common from llvm. | |||
9178 | // We have several options for how to deal with them: | |||
9179 | // - Wherever possible combine them into an instruction that makes them | |||
9180 | // "free". This includes loads/stores, which can perform the trunc as part | |||
9181 | // of the memory operation. Or certain shuffles that can be turned into | |||
9182 | // VMOVN/VMOVL. | |||
9183 | // - Lane Interleaving to transform blocks surrounded by ext/trunc. So | |||
9184 | // trunc(mul(sext(a), sext(b))) may become | |||
9185 | // VMOVNT(VMUL(VMOVLB(a), VMOVLB(b)), VMUL(VMOVLT(a), VMOVLT(b))). (Which in | |||
9186 | // this case can use VMULL). This is performed in the | |||
9187 | // MVELaneInterleavingPass. | |||
9188 | // - Otherwise we have an option. By default we would expand the | |||
9189 | // zext/sext/trunc into a series of lane extract/inserts going via GPR | |||
9190 | // registers. One for each vector lane in the vector. This can obviously be | |||
9191 | // very expensive. | |||
9192 | // - The other option is to use the fact that loads/store can extend/truncate | |||
9193 | // to turn a trunc into two truncating stack stores and a stack reload. This | |||
9194 | // becomes 3 back-to-back memory operations, but at least that is less than | |||
9195 | // all the insert/extracts. | |||
9196 | // | |||
9197 | // In order to do the last, we convert certain trunc's into MVETRUNC, which | |||
9198 | // are either optimized where they can be, or eventually lowered into stack | |||
9199 | // stores/loads. This prevents us from splitting a v8i16 trunc into two stores | |||
9200 | // two early, where other instructions would be better, and stops us from | |||
9201 | // having to reconstruct multiple buildvector shuffles into loads/stores. | |||
9202 | if (ToVT != MVT::v8i16 && ToVT != MVT::v16i8) | |||
9203 | return SDValue(); | |||
9204 | EVT FromVT = N->getOperand(0).getValueType(); | |||
9205 | if (FromVT != MVT::v8i32 && FromVT != MVT::v16i16) | |||
9206 | return SDValue(); | |||
9207 | ||||
9208 | SDValue Lo, Hi; | |||
9209 | std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0); | |||
9210 | SDLoc DL(N); | |||
9211 | return DAG.getNode(ARMISD::MVETRUNC, DL, ToVT, Lo, Hi); | |||
9212 | } | |||
9213 | ||||
9214 | static SDValue LowerVectorExtend(SDNode *N, SelectionDAG &DAG, | |||
9215 | const ARMSubtarget *Subtarget) { | |||
9216 | if (!Subtarget->hasMVEIntegerOps()) | |||
9217 | return SDValue(); | |||
9218 | ||||
9219 | // See LowerTruncate above for an explanation of MVEEXT/MVETRUNC. | |||
9220 | ||||
9221 | EVT ToVT = N->getValueType(0); | |||
9222 | if (ToVT != MVT::v16i32 && ToVT != MVT::v8i32 && ToVT != MVT::v16i16) | |||
9223 | return SDValue(); | |||
9224 | SDValue Op = N->getOperand(0); | |||
9225 | EVT FromVT = Op.getValueType(); | |||
9226 | if (FromVT != MVT::v8i16 && FromVT != MVT::v16i8) | |||
9227 | return SDValue(); | |||
9228 | ||||
9229 | SDLoc DL(N); | |||
9230 | EVT ExtVT = ToVT.getHalfNumVectorElementsVT(*DAG.getContext()); | |||
9231 | if (ToVT.getScalarType() == MVT::i32 && FromVT.getScalarType() == MVT::i8) | |||
9232 | ExtVT = MVT::v8i16; | |||
9233 | ||||
9234 | unsigned Opcode = | |||
9235 | N->getOpcode() == ISD::SIGN_EXTEND ? ARMISD::MVESEXT : ARMISD::MVEZEXT; | |||
9236 | SDValue Ext = DAG.getNode(Opcode, DL, DAG.getVTList(ExtVT, ExtVT), Op); | |||
9237 | SDValue Ext1 = Ext.getValue(1); | |||
9238 | ||||
9239 | if (ToVT.getScalarType() == MVT::i32 && FromVT.getScalarType() == MVT::i8) { | |||
9240 | Ext = DAG.getNode(N->getOpcode(), DL, MVT::v8i32, Ext); | |||
9241 | Ext1 = DAG.getNode(N->getOpcode(), DL, MVT::v8i32, Ext1); | |||
9242 | } | |||
9243 | ||||
9244 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, ToVT, Ext, Ext1); | |||
9245 | } | |||
9246 | ||||
9247 | /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each | |||
9248 | /// element has been zero/sign-extended, depending on the isSigned parameter, | |||
9249 | /// from an integer type half its size. | |||
9250 | static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, | |||
9251 | bool isSigned) { | |||
9252 | // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. | |||
9253 | EVT VT = N->getValueType(0); | |||
9254 | if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { | |||
9255 | SDNode *BVN = N->getOperand(0).getNode(); | |||
9256 | if (BVN->getValueType(0) != MVT::v4i32 || | |||
9257 | BVN->getOpcode() != ISD::BUILD_VECTOR) | |||
9258 | return false; | |||
9259 | unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; | |||
9260 | unsigned HiElt = 1 - LoElt; | |||
9261 | ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); | |||
9262 | ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); | |||
9263 | ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); | |||
9264 | ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); | |||
9265 | if (!Lo0 || !Hi0 || !Lo1 || !Hi1) | |||
9266 | return false; | |||
9267 | if (isSigned) { | |||
9268 | if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && | |||
9269 | Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) | |||
9270 | return true; | |||
9271 | } else { | |||
9272 | if (Hi0->isZero() && Hi1->isZero()) | |||
9273 | return true; | |||
9274 | } | |||
9275 | return false; | |||
9276 | } | |||
9277 | ||||
9278 | if (N->getOpcode() != ISD::BUILD_VECTOR) | |||
9279 | return false; | |||
9280 | ||||
9281 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { | |||
9282 | SDNode *Elt = N->getOperand(i).getNode(); | |||
9283 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { | |||
9284 | unsigned EltSize = VT.getScalarSizeInBits(); | |||
9285 | unsigned HalfSize = EltSize / 2; | |||
9286 | if (isSigned) { | |||
9287 | if (!isIntN(HalfSize, C->getSExtValue())) | |||
9288 | return false; | |||
9289 | } else { | |||
9290 | if (!isUIntN(HalfSize, C->getZExtValue())) | |||
9291 | return false; | |||
9292 | } | |||
9293 | continue; | |||
9294 | } | |||
9295 | return false; | |||
9296 | } | |||
9297 | ||||
9298 | return true; | |||
9299 | } | |||
9300 | ||||
9301 | /// isSignExtended - Check if a node is a vector value that is sign-extended | |||
9302 | /// or a constant BUILD_VECTOR with sign-extended elements. | |||
9303 | static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { | |||
9304 | if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) | |||
9305 | return true; | |||
9306 | if (isExtendedBUILD_VECTOR(N, DAG, true)) | |||
9307 | return true; | |||
9308 | return false; | |||
9309 | } | |||
9310 | ||||
9311 | /// isZeroExtended - Check if a node is a vector value that is zero-extended (or | |||
9312 | /// any-extended) or a constant BUILD_VECTOR with zero-extended elements. | |||
9313 | static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { | |||
9314 | if (N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND || | |||
9315 | ISD::isZEXTLoad(N)) | |||
9316 | return true; | |||
9317 | if (isExtendedBUILD_VECTOR(N, DAG, false)) | |||
9318 | return true; | |||
9319 | return false; | |||
9320 | } | |||
9321 | ||||
9322 | static EVT getExtensionTo64Bits(const EVT &OrigVT) { | |||
9323 | if (OrigVT.getSizeInBits() >= 64) | |||
9324 | return OrigVT; | |||
9325 | ||||
9326 | assert(OrigVT.isSimple() && "Expecting a simple value type")(static_cast <bool> (OrigVT.isSimple() && "Expecting a simple value type" ) ? void (0) : __assert_fail ("OrigVT.isSimple() && \"Expecting a simple value type\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9326, __extension__ __PRETTY_FUNCTION__)); | |||
9327 | ||||
9328 | MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; | |||
9329 | switch (OrigSimpleTy) { | |||
9330 | default: llvm_unreachable("Unexpected Vector Type")::llvm::llvm_unreachable_internal("Unexpected Vector Type", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 9330); | |||
9331 | case MVT::v2i8: | |||
9332 | case MVT::v2i16: | |||
9333 | return MVT::v2i32; | |||
9334 | case MVT::v4i8: | |||
9335 | return MVT::v4i16; | |||
9336 | } | |||
9337 | } | |||
9338 | ||||
9339 | /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total | |||
9340 | /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL. | |||
9341 | /// We insert the required extension here to get the vector to fill a D register. | |||
9342 | static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, | |||
9343 | const EVT &OrigTy, | |||
9344 | const EVT &ExtTy, | |||
9345 | unsigned ExtOpcode) { | |||
9346 | // The vector originally had a size of OrigTy. It was then extended to ExtTy. | |||
9347 | // We expect the ExtTy to be 128-bits total. If the OrigTy is less than | |||
9348 | // 64-bits we need to insert a new extension so that it will be 64-bits. | |||
9349 | assert(ExtTy.is128BitVector() && "Unexpected extension size")(static_cast <bool> (ExtTy.is128BitVector() && "Unexpected extension size" ) ? void (0) : __assert_fail ("ExtTy.is128BitVector() && \"Unexpected extension size\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9349, __extension__ __PRETTY_FUNCTION__)); | |||
9350 | if (OrigTy.getSizeInBits() >= 64) | |||
9351 | return N; | |||
9352 | ||||
9353 | // Must extend size to at least 64 bits to be used as an operand for VMULL. | |||
9354 | EVT NewVT = getExtensionTo64Bits(OrigTy); | |||
9355 | ||||
9356 | return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N); | |||
9357 | } | |||
9358 | ||||
9359 | /// SkipLoadExtensionForVMULL - return a load of the original vector size that | |||
9360 | /// does not do any sign/zero extension. If the original vector is less | |||
9361 | /// than 64 bits, an appropriate extension will be added after the load to | |||
9362 | /// reach a total size of 64 bits. We have to add the extension separately | |||
9363 | /// because ARM does not have a sign/zero extending load for vectors. | |||
9364 | static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { | |||
9365 | EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT()); | |||
9366 | ||||
9367 | // The load already has the right type. | |||
9368 | if (ExtendedTy == LD->getMemoryVT()) | |||
9369 | return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(), | |||
9370 | LD->getBasePtr(), LD->getPointerInfo(), | |||
9371 | LD->getAlignment(), LD->getMemOperand()->getFlags()); | |||
9372 | ||||
9373 | // We need to create a zextload/sextload. We cannot just create a load | |||
9374 | // followed by a zext/zext node because LowerMUL is also run during normal | |||
9375 | // operation legalization where we can't create illegal types. | |||
9376 | return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy, | |||
9377 | LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(), | |||
9378 | LD->getMemoryVT(), LD->getAlignment(), | |||
9379 | LD->getMemOperand()->getFlags()); | |||
9380 | } | |||
9381 | ||||
9382 | /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, | |||
9383 | /// ANY_EXTEND, extending load, or BUILD_VECTOR with extended elements, return | |||
9384 | /// the unextended value. The unextended vector should be 64 bits so that it can | |||
9385 | /// be used as an operand to a VMULL instruction. If the original vector size | |||
9386 | /// before extension is less than 64 bits we add a an extension to resize | |||
9387 | /// the vector to 64 bits. | |||
9388 | static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { | |||
9389 | if (N->getOpcode() == ISD::SIGN_EXTEND || | |||
9390 | N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND) | |||
9391 | return AddRequiredExtensionForVMULL(N->getOperand(0), DAG, | |||
9392 | N->getOperand(0)->getValueType(0), | |||
9393 | N->getValueType(0), | |||
9394 | N->getOpcode()); | |||
9395 | ||||
9396 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { | |||
9397 | assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) &&(static_cast <bool> ((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad (LD)) && "Expected extending load") ? void (0) : __assert_fail ("(ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) && \"Expected extending load\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9398, __extension__ __PRETTY_FUNCTION__)) | |||
9398 | "Expected extending load")(static_cast <bool> ((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad (LD)) && "Expected extending load") ? void (0) : __assert_fail ("(ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) && \"Expected extending load\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9398, __extension__ __PRETTY_FUNCTION__)); | |||
9399 | ||||
9400 | SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG); | |||
9401 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), newLoad.getValue(1)); | |||
9402 | unsigned Opcode = ISD::isSEXTLoad(LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; | |||
9403 | SDValue extLoad = | |||
9404 | DAG.getNode(Opcode, SDLoc(newLoad), LD->getValueType(0), newLoad); | |||
9405 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 0), extLoad); | |||
9406 | ||||
9407 | return newLoad; | |||
9408 | } | |||
9409 | ||||
9410 | // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will | |||
9411 | // have been legalized as a BITCAST from v4i32. | |||
9412 | if (N->getOpcode() == ISD::BITCAST) { | |||
9413 | SDNode *BVN = N->getOperand(0).getNode(); | |||
9414 | assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&(static_cast <bool> (BVN->getOpcode() == ISD::BUILD_VECTOR && BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR" ) ? void (0) : __assert_fail ("BVN->getOpcode() == ISD::BUILD_VECTOR && BVN->getValueType(0) == MVT::v4i32 && \"expected v4i32 BUILD_VECTOR\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9415, __extension__ __PRETTY_FUNCTION__)) | |||
9415 | BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR")(static_cast <bool> (BVN->getOpcode() == ISD::BUILD_VECTOR && BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR" ) ? void (0) : __assert_fail ("BVN->getOpcode() == ISD::BUILD_VECTOR && BVN->getValueType(0) == MVT::v4i32 && \"expected v4i32 BUILD_VECTOR\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9415, __extension__ __PRETTY_FUNCTION__)); | |||
9416 | unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; | |||
9417 | return DAG.getBuildVector( | |||
9418 | MVT::v2i32, SDLoc(N), | |||
9419 | {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)}); | |||
9420 | } | |||
9421 | // Construct a new BUILD_VECTOR with elements truncated to half the size. | |||
9422 | assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR")(static_cast <bool> (N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR") ? void (0) : __assert_fail ("N->getOpcode() == ISD::BUILD_VECTOR && \"expected BUILD_VECTOR\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9422, __extension__ __PRETTY_FUNCTION__)); | |||
9423 | EVT VT = N->getValueType(0); | |||
9424 | unsigned EltSize = VT.getScalarSizeInBits() / 2; | |||
9425 | unsigned NumElts = VT.getVectorNumElements(); | |||
9426 | MVT TruncVT = MVT::getIntegerVT(EltSize); | |||
9427 | SmallVector<SDValue, 8> Ops; | |||
9428 | SDLoc dl(N); | |||
9429 | for (unsigned i = 0; i != NumElts; ++i) { | |||
9430 | ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); | |||
9431 | const APInt &CInt = C->getAPIntValue(); | |||
9432 | // Element types smaller than 32 bits are not legal, so use i32 elements. | |||
9433 | // The values are implicitly truncated so sext vs. zext doesn't matter. | |||
9434 | Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32)); | |||
9435 | } | |||
9436 | return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops); | |||
9437 | } | |||
9438 | ||||
9439 | static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { | |||
9440 | unsigned Opcode = N->getOpcode(); | |||
9441 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { | |||
9442 | SDNode *N0 = N->getOperand(0).getNode(); | |||
9443 | SDNode *N1 = N->getOperand(1).getNode(); | |||
9444 | return N0->hasOneUse() && N1->hasOneUse() && | |||
9445 | isSignExtended(N0, DAG) && isSignExtended(N1, DAG); | |||
9446 | } | |||
9447 | return false; | |||
9448 | } | |||
9449 | ||||
9450 | static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { | |||
9451 | unsigned Opcode = N->getOpcode(); | |||
9452 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { | |||
9453 | SDNode *N0 = N->getOperand(0).getNode(); | |||
9454 | SDNode *N1 = N->getOperand(1).getNode(); | |||
9455 | return N0->hasOneUse() && N1->hasOneUse() && | |||
9456 | isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); | |||
9457 | } | |||
9458 | return false; | |||
9459 | } | |||
9460 | ||||
9461 | static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { | |||
9462 | // Multiplications are only custom-lowered for 128-bit vectors so that | |||
9463 | // VMULL can be detected. Otherwise v2i64 multiplications are not legal. | |||
9464 | EVT VT = Op.getValueType(); | |||
9465 | assert(VT.is128BitVector() && VT.isInteger() &&(static_cast <bool> (VT.is128BitVector() && VT. isInteger() && "unexpected type for custom-lowering ISD::MUL" ) ? void (0) : __assert_fail ("VT.is128BitVector() && VT.isInteger() && \"unexpected type for custom-lowering ISD::MUL\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9466, __extension__ __PRETTY_FUNCTION__)) | |||
9466 | "unexpected type for custom-lowering ISD::MUL")(static_cast <bool> (VT.is128BitVector() && VT. isInteger() && "unexpected type for custom-lowering ISD::MUL" ) ? void (0) : __assert_fail ("VT.is128BitVector() && VT.isInteger() && \"unexpected type for custom-lowering ISD::MUL\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9466, __extension__ __PRETTY_FUNCTION__)); | |||
9467 | SDNode *N0 = Op.getOperand(0).getNode(); | |||
9468 | SDNode *N1 = Op.getOperand(1).getNode(); | |||
9469 | unsigned NewOpc = 0; | |||
9470 | bool isMLA = false; | |||
9471 | bool isN0SExt = isSignExtended(N0, DAG); | |||
9472 | bool isN1SExt = isSignExtended(N1, DAG); | |||
9473 | if (isN0SExt && isN1SExt) | |||
9474 | NewOpc = ARMISD::VMULLs; | |||
9475 | else { | |||
9476 | bool isN0ZExt = isZeroExtended(N0, DAG); | |||
9477 | bool isN1ZExt = isZeroExtended(N1, DAG); | |||
9478 | if (isN0ZExt && isN1ZExt) | |||
9479 | NewOpc = ARMISD::VMULLu; | |||
9480 | else if (isN1SExt || isN1ZExt) { | |||
9481 | // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these | |||
9482 | // into (s/zext A * s/zext C) + (s/zext B * s/zext C) | |||
9483 | if (isN1SExt && isAddSubSExt(N0, DAG)) { | |||
9484 | NewOpc = ARMISD::VMULLs; | |||
9485 | isMLA = true; | |||
9486 | } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { | |||
9487 | NewOpc = ARMISD::VMULLu; | |||
9488 | isMLA = true; | |||
9489 | } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { | |||
9490 | std::swap(N0, N1); | |||
9491 | NewOpc = ARMISD::VMULLu; | |||
9492 | isMLA = true; | |||
9493 | } | |||
9494 | } | |||
9495 | ||||
9496 | if (!NewOpc) { | |||
9497 | if (VT == MVT::v2i64) | |||
9498 | // Fall through to expand this. It is not legal. | |||
9499 | return SDValue(); | |||
9500 | else | |||
9501 | // Other vector multiplications are legal. | |||
9502 | return Op; | |||
9503 | } | |||
9504 | } | |||
9505 | ||||
9506 | // Legalize to a VMULL instruction. | |||
9507 | SDLoc DL(Op); | |||
9508 | SDValue Op0; | |||
9509 | SDValue Op1 = SkipExtensionForVMULL(N1, DAG); | |||
9510 | if (!isMLA) { | |||
9511 | Op0 = SkipExtensionForVMULL(N0, DAG); | |||
9512 | assert(Op0.getValueType().is64BitVector() &&(static_cast <bool> (Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && "unexpected types for extended operands to VMULL" ) ? void (0) : __assert_fail ("Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && \"unexpected types for extended operands to VMULL\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9514, __extension__ __PRETTY_FUNCTION__)) | |||
9513 | Op1.getValueType().is64BitVector() &&(static_cast <bool> (Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && "unexpected types for extended operands to VMULL" ) ? void (0) : __assert_fail ("Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && \"unexpected types for extended operands to VMULL\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9514, __extension__ __PRETTY_FUNCTION__)) | |||
9514 | "unexpected types for extended operands to VMULL")(static_cast <bool> (Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && "unexpected types for extended operands to VMULL" ) ? void (0) : __assert_fail ("Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && \"unexpected types for extended operands to VMULL\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9514, __extension__ __PRETTY_FUNCTION__)); | |||
9515 | return DAG.getNode(NewOpc, DL, VT, Op0, Op1); | |||
9516 | } | |||
9517 | ||||
9518 | // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during | |||
9519 | // isel lowering to take advantage of no-stall back to back vmul + vmla. | |||
9520 | // vmull q0, d4, d6 | |||
9521 | // vmlal q0, d5, d6 | |||
9522 | // is faster than | |||
9523 | // vaddl q0, d4, d5 | |||
9524 | // vmovl q1, d6 | |||
9525 | // vmul q0, q0, q1 | |||
9526 | SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG); | |||
9527 | SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG); | |||
9528 | EVT Op1VT = Op1.getValueType(); | |||
9529 | return DAG.getNode(N0->getOpcode(), DL, VT, | |||
9530 | DAG.getNode(NewOpc, DL, VT, | |||
9531 | DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), | |||
9532 | DAG.getNode(NewOpc, DL, VT, | |||
9533 | DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); | |||
9534 | } | |||
9535 | ||||
9536 | static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl, | |||
9537 | SelectionDAG &DAG) { | |||
9538 | // TODO: Should this propagate fast-math-flags? | |||
9539 | ||||
9540 | // Convert to float | |||
9541 | // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); | |||
9542 | // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); | |||
9543 | X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); | |||
9544 | Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); | |||
9545 | X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); | |||
9546 | Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); | |||
9547 | // Get reciprocal estimate. | |||
9548 | // float4 recip = vrecpeq_f32(yf); | |||
9549 | Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, | |||
9550 | DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), | |||
9551 | Y); | |||
9552 | // Because char has a smaller range than uchar, we can actually get away | |||
9553 | // without any newton steps. This requires that we use a weird bias | |||
9554 | // of 0xb000, however (again, this has been exhaustively tested). | |||
9555 | // float4 result = as_float4(as_int4(xf*recip) + 0xb000); | |||
9556 | X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); | |||
9557 | X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); | |||
9558 | Y = DAG.getConstant(0xb000, dl, MVT::v4i32); | |||
9559 | X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); | |||
9560 | X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); | |||
9561 | // Convert back to short. | |||
9562 | X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); | |||
9563 | X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); | |||
9564 | return X; | |||
9565 | } | |||
9566 | ||||
9567 | static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl, | |||
9568 | SelectionDAG &DAG) { | |||
9569 | // TODO: Should this propagate fast-math-flags? | |||
9570 | ||||
9571 | SDValue N2; | |||
9572 | // Convert to float. | |||
9573 | // float4 yf = vcvt_f32_s32(vmovl_s16(y)); | |||
9574 | // float4 xf = vcvt_f32_s32(vmovl_s16(x)); | |||
9575 | N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); | |||
9576 | N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); | |||
9577 | N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); | |||
9578 | N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); | |||
9579 | ||||
9580 | // Use reciprocal estimate and one refinement step. | |||
9581 | // float4 recip = vrecpeq_f32(yf); | |||
9582 | // recip *= vrecpsq_f32(yf, recip); | |||
9583 | N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, | |||
9584 | DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), | |||
9585 | N1); | |||
9586 | N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, | |||
9587 | DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), | |||
9588 | N1, N2); | |||
9589 | N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); | |||
9590 | // Because short has a smaller range than ushort, we can actually get away | |||
9591 | // with only a single newton step. This requires that we use a weird bias | |||
9592 | // of 89, however (again, this has been exhaustively tested). | |||
9593 | // float4 result = as_float4(as_int4(xf*recip) + 0x89); | |||
9594 | N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); | |||
9595 | N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); | |||
9596 | N1 = DAG.getConstant(0x89, dl, MVT::v4i32); | |||
9597 | N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); | |||
9598 | N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); | |||
9599 | // Convert back to integer and return. | |||
9600 | // return vmovn_s32(vcvt_s32_f32(result)); | |||
9601 | N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); | |||
9602 | N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); | |||
9603 | return N0; | |||
9604 | } | |||
9605 | ||||
9606 | static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG, | |||
9607 | const ARMSubtarget *ST) { | |||
9608 | EVT VT = Op.getValueType(); | |||
9609 | assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&(static_cast <bool> ((VT == MVT::v4i16 || VT == MVT::v8i8 ) && "unexpected type for custom-lowering ISD::SDIV") ? void (0) : __assert_fail ("(VT == MVT::v4i16 || VT == MVT::v8i8) && \"unexpected type for custom-lowering ISD::SDIV\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9610, __extension__ __PRETTY_FUNCTION__)) | |||
9610 | "unexpected type for custom-lowering ISD::SDIV")(static_cast <bool> ((VT == MVT::v4i16 || VT == MVT::v8i8 ) && "unexpected type for custom-lowering ISD::SDIV") ? void (0) : __assert_fail ("(VT == MVT::v4i16 || VT == MVT::v8i8) && \"unexpected type for custom-lowering ISD::SDIV\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9610, __extension__ __PRETTY_FUNCTION__)); | |||
9611 | ||||
9612 | SDLoc dl(Op); | |||
9613 | SDValue N0 = Op.getOperand(0); | |||
9614 | SDValue N1 = Op.getOperand(1); | |||
9615 | SDValue N2, N3; | |||
9616 | ||||
9617 | if (VT == MVT::v8i8) { | |||
9618 | N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); | |||
9619 | N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); | |||
9620 | ||||
9621 | N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, | |||
9622 | DAG.getIntPtrConstant(4, dl)); | |||
9623 | N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, | |||
9624 | DAG.getIntPtrConstant(4, dl)); | |||
9625 | N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, | |||
9626 | DAG.getIntPtrConstant(0, dl)); | |||
9627 | N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, | |||
9628 | DAG.getIntPtrConstant(0, dl)); | |||
9629 | ||||
9630 | N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 | |||
9631 | N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 | |||
9632 | ||||
9633 | N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); | |||
9634 | N0 = LowerCONCAT_VECTORS(N0, DAG, ST); | |||
9635 | ||||
9636 | N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); | |||
9637 | return N0; | |||
9638 | } | |||
9639 | return LowerSDIV_v4i16(N0, N1, dl, DAG); | |||
9640 | } | |||
9641 | ||||
9642 | static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG, | |||
9643 | const ARMSubtarget *ST) { | |||
9644 | // TODO: Should this propagate fast-math-flags? | |||
9645 | EVT VT = Op.getValueType(); | |||
9646 | assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&(static_cast <bool> ((VT == MVT::v4i16 || VT == MVT::v8i8 ) && "unexpected type for custom-lowering ISD::UDIV") ? void (0) : __assert_fail ("(VT == MVT::v4i16 || VT == MVT::v8i8) && \"unexpected type for custom-lowering ISD::UDIV\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9647, __extension__ __PRETTY_FUNCTION__)) | |||
9647 | "unexpected type for custom-lowering ISD::UDIV")(static_cast <bool> ((VT == MVT::v4i16 || VT == MVT::v8i8 ) && "unexpected type for custom-lowering ISD::UDIV") ? void (0) : __assert_fail ("(VT == MVT::v4i16 || VT == MVT::v8i8) && \"unexpected type for custom-lowering ISD::UDIV\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9647, __extension__ __PRETTY_FUNCTION__)); | |||
9648 | ||||
9649 | SDLoc dl(Op); | |||
9650 | SDValue N0 = Op.getOperand(0); | |||
9651 | SDValue N1 = Op.getOperand(1); | |||
9652 | SDValue N2, N3; | |||
9653 | ||||
9654 | if (VT == MVT::v8i8) { | |||
9655 | N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); | |||
9656 | N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); | |||
9657 | ||||
9658 | N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, | |||
9659 | DAG.getIntPtrConstant(4, dl)); | |||
9660 | N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, | |||
9661 | DAG.getIntPtrConstant(4, dl)); | |||
9662 | N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, | |||
9663 | DAG.getIntPtrConstant(0, dl)); | |||
9664 | N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, | |||
9665 | DAG.getIntPtrConstant(0, dl)); | |||
9666 | ||||
9667 | N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 | |||
9668 | N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 | |||
9669 | ||||
9670 | N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); | |||
9671 | N0 = LowerCONCAT_VECTORS(N0, DAG, ST); | |||
9672 | ||||
9673 | N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, | |||
9674 | DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl, | |||
9675 | MVT::i32), | |||
9676 | N0); | |||
9677 | return N0; | |||
9678 | } | |||
9679 | ||||
9680 | // v4i16 sdiv ... Convert to float. | |||
9681 | // float4 yf = vcvt_f32_s32(vmovl_u16(y)); | |||
9682 | // float4 xf = vcvt_f32_s32(vmovl_u16(x)); | |||
9683 | N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); | |||
9684 | N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); | |||
9685 | N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); | |||
9686 | SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); | |||
9687 | ||||
9688 | // Use reciprocal estimate and two refinement steps. | |||
9689 | // float4 recip = vrecpeq_f32(yf); | |||
9690 | // recip *= vrecpsq_f32(yf, recip); | |||
9691 | // recip *= vrecpsq_f32(yf, recip); | |||
9692 | N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, | |||
9693 | DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), | |||
9694 | BN1); | |||
9695 | N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, | |||
9696 | DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), | |||
9697 | BN1, N2); | |||
9698 | N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); | |||
9699 | N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, | |||
9700 | DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), | |||
9701 | BN1, N2); | |||
9702 | N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); | |||
9703 | // Simply multiplying by the reciprocal estimate can leave us a few ulps | |||
9704 | // too low, so we add 2 ulps (exhaustive testing shows that this is enough, | |||
9705 | // and that it will never cause us to return an answer too large). | |||
9706 | // float4 result = as_float4(as_int4(xf*recip) + 2); | |||
9707 | N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); | |||
9708 | N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); | |||
9709 | N1 = DAG.getConstant(2, dl, MVT::v4i32); | |||
9710 | N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); | |||
9711 | N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); | |||
9712 | // Convert back to integer and return. | |||
9713 | // return vmovn_u32(vcvt_s32_f32(result)); | |||
9714 | N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); | |||
9715 | N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); | |||
9716 | return N0; | |||
9717 | } | |||
9718 | ||||
9719 | static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) { | |||
9720 | SDNode *N = Op.getNode(); | |||
9721 | EVT VT = N->getValueType(0); | |||
9722 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); | |||
9723 | ||||
9724 | SDValue Carry = Op.getOperand(2); | |||
9725 | ||||
9726 | SDLoc DL(Op); | |||
9727 | ||||
9728 | SDValue Result; | |||
9729 | if (Op.getOpcode() == ISD::ADDCARRY) { | |||
9730 | // This converts the boolean value carry into the carry flag. | |||
9731 | Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); | |||
9732 | ||||
9733 | // Do the addition proper using the carry flag we wanted. | |||
9734 | Result = DAG.getNode(ARMISD::ADDE, DL, VTs, Op.getOperand(0), | |||
9735 | Op.getOperand(1), Carry); | |||
9736 | ||||
9737 | // Now convert the carry flag into a boolean value. | |||
9738 | Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG); | |||
9739 | } else { | |||
9740 | // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we | |||
9741 | // have to invert the carry first. | |||
9742 | Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, | |||
9743 | DAG.getConstant(1, DL, MVT::i32), Carry); | |||
9744 | // This converts the boolean value carry into the carry flag. | |||
9745 | Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); | |||
9746 | ||||
9747 | // Do the subtraction proper using the carry flag we wanted. | |||
9748 | Result = DAG.getNode(ARMISD::SUBE, DL, VTs, Op.getOperand(0), | |||
9749 | Op.getOperand(1), Carry); | |||
9750 | ||||
9751 | // Now convert the carry flag into a boolean value. | |||
9752 | Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG); | |||
9753 | // But the carry returned by ARMISD::SUBE is not a borrow as expected | |||
9754 | // by ISD::SUBCARRY, so compute 1 - C. | |||
9755 | Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, | |||
9756 | DAG.getConstant(1, DL, MVT::i32), Carry); | |||
9757 | } | |||
9758 | ||||
9759 | // Return both values. | |||
9760 | return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Carry); | |||
9761 | } | |||
9762 | ||||
9763 | SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { | |||
9764 | assert(Subtarget->isTargetDarwin())(static_cast <bool> (Subtarget->isTargetDarwin()) ? void (0) : __assert_fail ("Subtarget->isTargetDarwin()", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 9764, __extension__ __PRETTY_FUNCTION__)); | |||
9765 | ||||
9766 | // For iOS, we want to call an alternative entry point: __sincos_stret, | |||
9767 | // return values are passed via sret. | |||
9768 | SDLoc dl(Op); | |||
9769 | SDValue Arg = Op.getOperand(0); | |||
9770 | EVT ArgVT = Arg.getValueType(); | |||
9771 | Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); | |||
9772 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | |||
9773 | ||||
9774 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); | |||
9775 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
9776 | ||||
9777 | // Pair of floats / doubles used to pass the result. | |||
9778 | Type *RetTy = StructType::get(ArgTy, ArgTy); | |||
9779 | auto &DL = DAG.getDataLayout(); | |||
9780 | ||||
9781 | ArgListTy Args; | |||
9782 | bool ShouldUseSRet = Subtarget->isAPCS_ABI(); | |||
9783 | SDValue SRet; | |||
9784 | if (ShouldUseSRet) { | |||
9785 | // Create stack object for sret. | |||
9786 | const uint64_t ByteSize = DL.getTypeAllocSize(RetTy); | |||
9787 | const Align StackAlign = DL.getPrefTypeAlign(RetTy); | |||
9788 | int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); | |||
9789 | SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL)); | |||
9790 | ||||
9791 | ArgListEntry Entry; | |||
9792 | Entry.Node = SRet; | |||
9793 | Entry.Ty = RetTy->getPointerTo(); | |||
9794 | Entry.IsSExt = false; | |||
9795 | Entry.IsZExt = false; | |||
9796 | Entry.IsSRet = true; | |||
9797 | Args.push_back(Entry); | |||
9798 | RetTy = Type::getVoidTy(*DAG.getContext()); | |||
9799 | } | |||
9800 | ||||
9801 | ArgListEntry Entry; | |||
9802 | Entry.Node = Arg; | |||
9803 | Entry.Ty = ArgTy; | |||
9804 | Entry.IsSExt = false; | |||
9805 | Entry.IsZExt = false; | |||
9806 | Args.push_back(Entry); | |||
9807 | ||||
9808 | RTLIB::Libcall LC = | |||
9809 | (ArgVT == MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32; | |||
9810 | const char *LibcallName = getLibcallName(LC); | |||
9811 | CallingConv::ID CC = getLibcallCallingConv(LC); | |||
9812 | SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL)); | |||
9813 | ||||
9814 | TargetLowering::CallLoweringInfo CLI(DAG); | |||
9815 | CLI.setDebugLoc(dl) | |||
9816 | .setChain(DAG.getEntryNode()) | |||
9817 | .setCallee(CC, RetTy, Callee, std::move(Args)) | |||
9818 | .setDiscardResult(ShouldUseSRet); | |||
9819 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); | |||
9820 | ||||
9821 | if (!ShouldUseSRet) | |||
9822 | return CallResult.first; | |||
9823 | ||||
9824 | SDValue LoadSin = | |||
9825 | DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo()); | |||
9826 | ||||
9827 | // Address of cos field. | |||
9828 | SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet, | |||
9829 | DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl)); | |||
9830 | SDValue LoadCos = | |||
9831 | DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo()); | |||
9832 | ||||
9833 | SDVTList Tys = DAG.getVTList(ArgVT, ArgVT); | |||
9834 | return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, | |||
9835 | LoadSin.getValue(0), LoadCos.getValue(0)); | |||
9836 | } | |||
9837 | ||||
9838 | SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, | |||
9839 | bool Signed, | |||
9840 | SDValue &Chain) const { | |||
9841 | EVT VT = Op.getValueType(); | |||
9842 | assert((VT == MVT::i32 || VT == MVT::i64) &&(static_cast <bool> ((VT == MVT::i32 || VT == MVT::i64) && "unexpected type for custom lowering DIV") ? void (0) : __assert_fail ("(VT == MVT::i32 || VT == MVT::i64) && \"unexpected type for custom lowering DIV\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9843, __extension__ __PRETTY_FUNCTION__)) | |||
9843 | "unexpected type for custom lowering DIV")(static_cast <bool> ((VT == MVT::i32 || VT == MVT::i64) && "unexpected type for custom lowering DIV") ? void (0) : __assert_fail ("(VT == MVT::i32 || VT == MVT::i64) && \"unexpected type for custom lowering DIV\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9843, __extension__ __PRETTY_FUNCTION__)); | |||
9844 | SDLoc dl(Op); | |||
9845 | ||||
9846 | const auto &DL = DAG.getDataLayout(); | |||
9847 | const auto &TLI = DAG.getTargetLoweringInfo(); | |||
9848 | ||||
9849 | const char *Name = nullptr; | |||
9850 | if (Signed) | |||
9851 | Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64"; | |||
9852 | else | |||
9853 | Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64"; | |||
9854 | ||||
9855 | SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL)); | |||
9856 | ||||
9857 | ARMTargetLowering::ArgListTy Args; | |||
9858 | ||||
9859 | for (auto AI : {1, 0}) { | |||
9860 | ArgListEntry Arg; | |||
9861 | Arg.Node = Op.getOperand(AI); | |||
9862 | Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext()); | |||
9863 | Args.push_back(Arg); | |||
9864 | } | |||
9865 | ||||
9866 | CallLoweringInfo CLI(DAG); | |||
9867 | CLI.setDebugLoc(dl) | |||
9868 | .setChain(Chain) | |||
9869 | .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()), | |||
9870 | ES, std::move(Args)); | |||
9871 | ||||
9872 | return LowerCallTo(CLI).first; | |||
9873 | } | |||
9874 | ||||
9875 | // This is a code size optimisation: return the original SDIV node to | |||
9876 | // DAGCombiner when we don't want to expand SDIV into a sequence of | |||
9877 | // instructions, and an empty node otherwise which will cause the | |||
9878 | // SDIV to be expanded in DAGCombine. | |||
9879 | SDValue | |||
9880 | ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, | |||
9881 | SelectionDAG &DAG, | |||
9882 | SmallVectorImpl<SDNode *> &Created) const { | |||
9883 | // TODO: Support SREM | |||
9884 | if (N->getOpcode() != ISD::SDIV) | |||
9885 | return SDValue(); | |||
9886 | ||||
9887 | const auto &ST = static_cast<const ARMSubtarget&>(DAG.getSubtarget()); | |||
9888 | const bool MinSize = ST.hasMinSize(); | |||
9889 | const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode() | |||
9890 | : ST.hasDivideInARMMode(); | |||
9891 | ||||
9892 | // Don't touch vector types; rewriting this may lead to scalarizing | |||
9893 | // the int divs. | |||
9894 | if (N->getOperand(0).getValueType().isVector()) | |||
9895 | return SDValue(); | |||
9896 | ||||
9897 | // Bail if MinSize is not set, and also for both ARM and Thumb mode we need | |||
9898 | // hwdiv support for this to be really profitable. | |||
9899 | if (!(MinSize && HasDivide)) | |||
9900 | return SDValue(); | |||
9901 | ||||
9902 | // ARM mode is a bit simpler than Thumb: we can handle large power | |||
9903 | // of 2 immediates with 1 mov instruction; no further checks required, | |||
9904 | // just return the sdiv node. | |||
9905 | if (!ST.isThumb()) | |||
9906 | return SDValue(N, 0); | |||
9907 | ||||
9908 | // In Thumb mode, immediates larger than 128 need a wide 4-byte MOV, | |||
9909 | // and thus lose the code size benefits of a MOVS that requires only 2. | |||
9910 | // TargetTransformInfo and 'getIntImmCodeSizeCost' could be helpful here, | |||
9911 | // but as it's doing exactly this, it's not worth the trouble to get TTI. | |||
9912 | if (Divisor.sgt(128)) | |||
9913 | return SDValue(); | |||
9914 | ||||
9915 | return SDValue(N, 0); | |||
9916 | } | |||
9917 | ||||
9918 | SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, | |||
9919 | bool Signed) const { | |||
9920 | assert(Op.getValueType() == MVT::i32 &&(static_cast <bool> (Op.getValueType() == MVT::i32 && "unexpected type for custom lowering DIV") ? void (0) : __assert_fail ("Op.getValueType() == MVT::i32 && \"unexpected type for custom lowering DIV\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9921, __extension__ __PRETTY_FUNCTION__)) | |||
9921 | "unexpected type for custom lowering DIV")(static_cast <bool> (Op.getValueType() == MVT::i32 && "unexpected type for custom lowering DIV") ? void (0) : __assert_fail ("Op.getValueType() == MVT::i32 && \"unexpected type for custom lowering DIV\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9921, __extension__ __PRETTY_FUNCTION__)); | |||
9922 | SDLoc dl(Op); | |||
9923 | ||||
9924 | SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other, | |||
9925 | DAG.getEntryNode(), Op.getOperand(1)); | |||
9926 | ||||
9927 | return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); | |||
9928 | } | |||
9929 | ||||
9930 | static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) { | |||
9931 | SDLoc DL(N); | |||
9932 | SDValue Op = N->getOperand(1); | |||
9933 | if (N->getValueType(0) == MVT::i32) | |||
9934 | return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, Op); | |||
9935 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op, | |||
9936 | DAG.getConstant(0, DL, MVT::i32)); | |||
9937 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op, | |||
9938 | DAG.getConstant(1, DL, MVT::i32)); | |||
9939 | return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, | |||
9940 | DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi)); | |||
9941 | } | |||
9942 | ||||
9943 | void ARMTargetLowering::ExpandDIV_Windows( | |||
9944 | SDValue Op, SelectionDAG &DAG, bool Signed, | |||
9945 | SmallVectorImpl<SDValue> &Results) const { | |||
9946 | const auto &DL = DAG.getDataLayout(); | |||
9947 | const auto &TLI = DAG.getTargetLoweringInfo(); | |||
9948 | ||||
9949 | assert(Op.getValueType() == MVT::i64 &&(static_cast <bool> (Op.getValueType() == MVT::i64 && "unexpected type for custom lowering DIV") ? void (0) : __assert_fail ("Op.getValueType() == MVT::i64 && \"unexpected type for custom lowering DIV\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9950, __extension__ __PRETTY_FUNCTION__)) | |||
9950 | "unexpected type for custom lowering DIV")(static_cast <bool> (Op.getValueType() == MVT::i64 && "unexpected type for custom lowering DIV") ? void (0) : __assert_fail ("Op.getValueType() == MVT::i64 && \"unexpected type for custom lowering DIV\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9950, __extension__ __PRETTY_FUNCTION__)); | |||
9951 | SDLoc dl(Op); | |||
9952 | ||||
9953 | SDValue DBZCHK = WinDBZCheckDenominator(DAG, Op.getNode(), DAG.getEntryNode()); | |||
9954 | ||||
9955 | SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); | |||
9956 | ||||
9957 | SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result); | |||
9958 | SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result, | |||
9959 | DAG.getConstant(32, dl, TLI.getPointerTy(DL))); | |||
9960 | Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper); | |||
9961 | ||||
9962 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lower, Upper)); | |||
9963 | } | |||
9964 | ||||
9965 | static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG) { | |||
9966 | LoadSDNode *LD = cast<LoadSDNode>(Op.getNode()); | |||
9967 | EVT MemVT = LD->getMemoryVT(); | |||
9968 | assert((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 ||(static_cast <bool> ((MemVT == MVT::v2i1 || MemVT == MVT ::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && "Expected a predicate type!") ? void (0) : __assert_fail ("(MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && \"Expected a predicate type!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9970, __extension__ __PRETTY_FUNCTION__)) | |||
9969 | MemVT == MVT::v16i1) &&(static_cast <bool> ((MemVT == MVT::v2i1 || MemVT == MVT ::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && "Expected a predicate type!") ? void (0) : __assert_fail ("(MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && \"Expected a predicate type!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9970, __extension__ __PRETTY_FUNCTION__)) | |||
9970 | "Expected a predicate type!")(static_cast <bool> ((MemVT == MVT::v2i1 || MemVT == MVT ::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && "Expected a predicate type!") ? void (0) : __assert_fail ("(MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && \"Expected a predicate type!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9970, __extension__ __PRETTY_FUNCTION__)); | |||
9971 | assert(MemVT == Op.getValueType())(static_cast <bool> (MemVT == Op.getValueType()) ? void (0) : __assert_fail ("MemVT == Op.getValueType()", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 9971, __extension__ __PRETTY_FUNCTION__)); | |||
9972 | assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&(static_cast <bool> (LD->getExtensionType() == ISD:: NON_EXTLOAD && "Expected a non-extending load") ? void (0) : __assert_fail ("LD->getExtensionType() == ISD::NON_EXTLOAD && \"Expected a non-extending load\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9973, __extension__ __PRETTY_FUNCTION__)) | |||
9973 | "Expected a non-extending load")(static_cast <bool> (LD->getExtensionType() == ISD:: NON_EXTLOAD && "Expected a non-extending load") ? void (0) : __assert_fail ("LD->getExtensionType() == ISD::NON_EXTLOAD && \"Expected a non-extending load\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9973, __extension__ __PRETTY_FUNCTION__)); | |||
9974 | assert(LD->isUnindexed() && "Expected a unindexed load")(static_cast <bool> (LD->isUnindexed() && "Expected a unindexed load" ) ? void (0) : __assert_fail ("LD->isUnindexed() && \"Expected a unindexed load\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 9974, __extension__ __PRETTY_FUNCTION__)); | |||
9975 | ||||
9976 | // The basic MVE VLDR on a v2i1/v4i1/v8i1 actually loads the entire 16bit | |||
9977 | // predicate, with the "v4i1" bits spread out over the 16 bits loaded. We | |||
9978 | // need to make sure that 8/4/2 bits are actually loaded into the correct | |||
9979 | // place, which means loading the value and then shuffling the values into | |||
9980 | // the bottom bits of the predicate. | |||
9981 | // Equally, VLDR for an v16i1 will actually load 32bits (so will be incorrect | |||
9982 | // for BE). | |||
9983 | // Speaking of BE, apparently the rest of llvm will assume a reverse order to | |||
9984 | // a natural VMSR(load), so needs to be reversed. | |||
9985 | ||||
9986 | SDLoc dl(Op); | |||
9987 | SDValue Load = DAG.getExtLoad( | |||
9988 | ISD::EXTLOAD, dl, MVT::i32, LD->getChain(), LD->getBasePtr(), | |||
9989 | EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()), | |||
9990 | LD->getMemOperand()); | |||
9991 | SDValue Val = Load; | |||
9992 | if (DAG.getDataLayout().isBigEndian()) | |||
9993 | Val = DAG.getNode(ISD::SRL, dl, MVT::i32, | |||
9994 | DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, Load), | |||
9995 | DAG.getConstant(32 - MemVT.getSizeInBits(), dl, MVT::i32)); | |||
9996 | SDValue Pred = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Val); | |||
9997 | if (MemVT != MVT::v16i1) | |||
9998 | Pred = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MemVT, Pred, | |||
9999 | DAG.getConstant(0, dl, MVT::i32)); | |||
10000 | return DAG.getMergeValues({Pred, Load.getValue(1)}, dl); | |||
10001 | } | |||
10002 | ||||
10003 | void ARMTargetLowering::LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results, | |||
10004 | SelectionDAG &DAG) const { | |||
10005 | LoadSDNode *LD = cast<LoadSDNode>(N); | |||
10006 | EVT MemVT = LD->getMemoryVT(); | |||
10007 | assert(LD->isUnindexed() && "Loads should be unindexed at this point.")(static_cast <bool> (LD->isUnindexed() && "Loads should be unindexed at this point." ) ? void (0) : __assert_fail ("LD->isUnindexed() && \"Loads should be unindexed at this point.\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10007, __extension__ __PRETTY_FUNCTION__)); | |||
10008 | ||||
10009 | if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && | |||
10010 | !Subtarget->isThumb1Only() && LD->isVolatile()) { | |||
10011 | SDLoc dl(N); | |||
10012 | SDValue Result = DAG.getMemIntrinsicNode( | |||
10013 | ARMISD::LDRD, dl, DAG.getVTList({MVT::i32, MVT::i32, MVT::Other}), | |||
10014 | {LD->getChain(), LD->getBasePtr()}, MemVT, LD->getMemOperand()); | |||
10015 | SDValue Lo = Result.getValue(DAG.getDataLayout().isLittleEndian() ? 0 : 1); | |||
10016 | SDValue Hi = Result.getValue(DAG.getDataLayout().isLittleEndian() ? 1 : 0); | |||
10017 | SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); | |||
10018 | Results.append({Pair, Result.getValue(2)}); | |||
10019 | } | |||
10020 | } | |||
10021 | ||||
10022 | static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG) { | |||
10023 | StoreSDNode *ST = cast<StoreSDNode>(Op.getNode()); | |||
10024 | EVT MemVT = ST->getMemoryVT(); | |||
10025 | assert((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 ||(static_cast <bool> ((MemVT == MVT::v2i1 || MemVT == MVT ::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && "Expected a predicate type!") ? void (0) : __assert_fail ("(MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && \"Expected a predicate type!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10027, __extension__ __PRETTY_FUNCTION__)) | |||
10026 | MemVT == MVT::v16i1) &&(static_cast <bool> ((MemVT == MVT::v2i1 || MemVT == MVT ::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && "Expected a predicate type!") ? void (0) : __assert_fail ("(MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && \"Expected a predicate type!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10027, __extension__ __PRETTY_FUNCTION__)) | |||
10027 | "Expected a predicate type!")(static_cast <bool> ((MemVT == MVT::v2i1 || MemVT == MVT ::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && "Expected a predicate type!") ? void (0) : __assert_fail ("(MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && \"Expected a predicate type!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10027, __extension__ __PRETTY_FUNCTION__)); | |||
10028 | assert(MemVT == ST->getValue().getValueType())(static_cast <bool> (MemVT == ST->getValue().getValueType ()) ? void (0) : __assert_fail ("MemVT == ST->getValue().getValueType()" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10028, __extension__ __PRETTY_FUNCTION__)); | |||
10029 | assert(!ST->isTruncatingStore() && "Expected a non-extending store")(static_cast <bool> (!ST->isTruncatingStore() && "Expected a non-extending store") ? void (0) : __assert_fail ("!ST->isTruncatingStore() && \"Expected a non-extending store\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10029, __extension__ __PRETTY_FUNCTION__)); | |||
10030 | assert(ST->isUnindexed() && "Expected a unindexed store")(static_cast <bool> (ST->isUnindexed() && "Expected a unindexed store" ) ? void (0) : __assert_fail ("ST->isUnindexed() && \"Expected a unindexed store\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10030, __extension__ __PRETTY_FUNCTION__)); | |||
10031 | ||||
10032 | // Only store the v2i1 or v4i1 or v8i1 worth of bits, via a buildvector with | |||
10033 | // top bits unset and a scalar store. | |||
10034 | SDLoc dl(Op); | |||
10035 | SDValue Build = ST->getValue(); | |||
10036 | if (MemVT != MVT::v16i1) { | |||
10037 | SmallVector<SDValue, 16> Ops; | |||
10038 | for (unsigned I = 0; I < MemVT.getVectorNumElements(); I++) { | |||
10039 | unsigned Elt = DAG.getDataLayout().isBigEndian() | |||
10040 | ? MemVT.getVectorNumElements() - I - 1 | |||
10041 | : I; | |||
10042 | Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Build, | |||
10043 | DAG.getConstant(Elt, dl, MVT::i32))); | |||
10044 | } | |||
10045 | for (unsigned I = MemVT.getVectorNumElements(); I < 16; I++) | |||
10046 | Ops.push_back(DAG.getUNDEF(MVT::i32)); | |||
10047 | Build = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i1, Ops); | |||
10048 | } | |||
10049 | SDValue GRP = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Build); | |||
10050 | if (MemVT == MVT::v16i1 && DAG.getDataLayout().isBigEndian()) | |||
10051 | GRP = DAG.getNode(ISD::SRL, dl, MVT::i32, | |||
10052 | DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, GRP), | |||
10053 | DAG.getConstant(16, dl, MVT::i32)); | |||
10054 | return DAG.getTruncStore( | |||
10055 | ST->getChain(), dl, GRP, ST->getBasePtr(), | |||
10056 | EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()), | |||
10057 | ST->getMemOperand()); | |||
10058 | } | |||
10059 | ||||
10060 | static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, | |||
10061 | const ARMSubtarget *Subtarget) { | |||
10062 | StoreSDNode *ST = cast<StoreSDNode>(Op.getNode()); | |||
10063 | EVT MemVT = ST->getMemoryVT(); | |||
10064 | assert(ST->isUnindexed() && "Stores should be unindexed at this point.")(static_cast <bool> (ST->isUnindexed() && "Stores should be unindexed at this point." ) ? void (0) : __assert_fail ("ST->isUnindexed() && \"Stores should be unindexed at this point.\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10064, __extension__ __PRETTY_FUNCTION__)); | |||
10065 | ||||
10066 | if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && | |||
10067 | !Subtarget->isThumb1Only() && ST->isVolatile()) { | |||
10068 | SDNode *N = Op.getNode(); | |||
10069 | SDLoc dl(N); | |||
10070 | ||||
10071 | SDValue Lo = DAG.getNode( | |||
10072 | ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(), | |||
10073 | DAG.getTargetConstant(DAG.getDataLayout().isLittleEndian() ? 0 : 1, dl, | |||
10074 | MVT::i32)); | |||
10075 | SDValue Hi = DAG.getNode( | |||
10076 | ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(), | |||
10077 | DAG.getTargetConstant(DAG.getDataLayout().isLittleEndian() ? 1 : 0, dl, | |||
10078 | MVT::i32)); | |||
10079 | ||||
10080 | return DAG.getMemIntrinsicNode(ARMISD::STRD, dl, DAG.getVTList(MVT::Other), | |||
10081 | {ST->getChain(), Lo, Hi, ST->getBasePtr()}, | |||
10082 | MemVT, ST->getMemOperand()); | |||
10083 | } else if (Subtarget->hasMVEIntegerOps() && | |||
10084 | ((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || | |||
10085 | MemVT == MVT::v16i1))) { | |||
10086 | return LowerPredicateStore(Op, DAG); | |||
10087 | } | |||
10088 | ||||
10089 | return SDValue(); | |||
10090 | } | |||
10091 | ||||
10092 | static bool isZeroVector(SDValue N) { | |||
10093 | return (ISD::isBuildVectorAllZeros(N.getNode()) || | |||
10094 | (N->getOpcode() == ARMISD::VMOVIMM && | |||
10095 | isNullConstant(N->getOperand(0)))); | |||
10096 | } | |||
10097 | ||||
10098 | static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) { | |||
10099 | MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode()); | |||
10100 | MVT VT = Op.getSimpleValueType(); | |||
10101 | SDValue Mask = N->getMask(); | |||
10102 | SDValue PassThru = N->getPassThru(); | |||
10103 | SDLoc dl(Op); | |||
10104 | ||||
10105 | if (isZeroVector(PassThru)) | |||
10106 | return Op; | |||
10107 | ||||
10108 | // MVE Masked loads use zero as the passthru value. Here we convert undef to | |||
10109 | // zero too, and other values are lowered to a select. | |||
10110 | SDValue ZeroVec = DAG.getNode(ARMISD::VMOVIMM, dl, VT, | |||
10111 | DAG.getTargetConstant(0, dl, MVT::i32)); | |||
10112 | SDValue NewLoad = DAG.getMaskedLoad( | |||
10113 | VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask, ZeroVec, | |||
10114 | N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(), | |||
10115 | N->getExtensionType(), N->isExpandingLoad()); | |||
10116 | SDValue Combo = NewLoad; | |||
10117 | bool PassThruIsCastZero = (PassThru.getOpcode() == ISD::BITCAST || | |||
10118 | PassThru.getOpcode() == ARMISD::VECTOR_REG_CAST) && | |||
10119 | isZeroVector(PassThru->getOperand(0)); | |||
10120 | if (!PassThru.isUndef() && !PassThruIsCastZero) | |||
10121 | Combo = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru); | |||
10122 | return DAG.getMergeValues({Combo, NewLoad.getValue(1)}, dl); | |||
10123 | } | |||
10124 | ||||
10125 | static SDValue LowerVecReduce(SDValue Op, SelectionDAG &DAG, | |||
10126 | const ARMSubtarget *ST) { | |||
10127 | if (!ST->hasMVEIntegerOps()) | |||
10128 | return SDValue(); | |||
10129 | ||||
10130 | SDLoc dl(Op); | |||
10131 | unsigned BaseOpcode = 0; | |||
10132 | switch (Op->getOpcode()) { | |||
10133 | default: llvm_unreachable("Expected VECREDUCE opcode")::llvm::llvm_unreachable_internal("Expected VECREDUCE opcode" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10133); | |||
10134 | case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break; | |||
10135 | case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break; | |||
10136 | case ISD::VECREDUCE_MUL: BaseOpcode = ISD::MUL; break; | |||
10137 | case ISD::VECREDUCE_AND: BaseOpcode = ISD::AND; break; | |||
10138 | case ISD::VECREDUCE_OR: BaseOpcode = ISD::OR; break; | |||
10139 | case ISD::VECREDUCE_XOR: BaseOpcode = ISD::XOR; break; | |||
10140 | case ISD::VECREDUCE_FMAX: BaseOpcode = ISD::FMAXNUM; break; | |||
10141 | case ISD::VECREDUCE_FMIN: BaseOpcode = ISD::FMINNUM; break; | |||
10142 | } | |||
10143 | ||||
10144 | SDValue Op0 = Op->getOperand(0); | |||
10145 | EVT VT = Op0.getValueType(); | |||
10146 | EVT EltVT = VT.getVectorElementType(); | |||
10147 | unsigned NumElts = VT.getVectorNumElements(); | |||
10148 | unsigned NumActiveLanes = NumElts; | |||
10149 | ||||
10150 | assert((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 ||(static_cast <bool> ((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 || NumActiveLanes == 2) && "Only expected a power 2 vector size") ? void (0) : __assert_fail ("(NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 || NumActiveLanes == 2) && \"Only expected a power 2 vector size\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10152, __extension__ __PRETTY_FUNCTION__)) | |||
10151 | NumActiveLanes == 2) &&(static_cast <bool> ((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 || NumActiveLanes == 2) && "Only expected a power 2 vector size") ? void (0) : __assert_fail ("(NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 || NumActiveLanes == 2) && \"Only expected a power 2 vector size\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10152, __extension__ __PRETTY_FUNCTION__)) | |||
10152 | "Only expected a power 2 vector size")(static_cast <bool> ((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 || NumActiveLanes == 2) && "Only expected a power 2 vector size") ? void (0) : __assert_fail ("(NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 || NumActiveLanes == 2) && \"Only expected a power 2 vector size\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10152, __extension__ __PRETTY_FUNCTION__)); | |||
10153 | ||||
10154 | // Use Mul(X, Rev(X)) until 4 items remain. Going down to 4 vector elements | |||
10155 | // allows us to easily extract vector elements from the lanes. | |||
10156 | while (NumActiveLanes > 4) { | |||
10157 | unsigned RevOpcode = NumActiveLanes == 16 ? ARMISD::VREV16 : ARMISD::VREV32; | |||
10158 | SDValue Rev = DAG.getNode(RevOpcode, dl, VT, Op0); | |||
10159 | Op0 = DAG.getNode(BaseOpcode, dl, VT, Op0, Rev); | |||
10160 | NumActiveLanes /= 2; | |||
10161 | } | |||
10162 | ||||
10163 | SDValue Res; | |||
10164 | if (NumActiveLanes == 4) { | |||
10165 | // The remaining 4 elements are summed sequentially | |||
10166 | SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, | |||
10167 | DAG.getConstant(0 * NumElts / 4, dl, MVT::i32)); | |||
10168 | SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, | |||
10169 | DAG.getConstant(1 * NumElts / 4, dl, MVT::i32)); | |||
10170 | SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, | |||
10171 | DAG.getConstant(2 * NumElts / 4, dl, MVT::i32)); | |||
10172 | SDValue Ext3 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, | |||
10173 | DAG.getConstant(3 * NumElts / 4, dl, MVT::i32)); | |||
10174 | SDValue Res0 = DAG.getNode(BaseOpcode, dl, EltVT, Ext0, Ext1, Op->getFlags()); | |||
10175 | SDValue Res1 = DAG.getNode(BaseOpcode, dl, EltVT, Ext2, Ext3, Op->getFlags()); | |||
10176 | Res = DAG.getNode(BaseOpcode, dl, EltVT, Res0, Res1, Op->getFlags()); | |||
10177 | } else { | |||
10178 | SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, | |||
10179 | DAG.getConstant(0, dl, MVT::i32)); | |||
10180 | SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, | |||
10181 | DAG.getConstant(1, dl, MVT::i32)); | |||
10182 | Res = DAG.getNode(BaseOpcode, dl, EltVT, Ext0, Ext1, Op->getFlags()); | |||
10183 | } | |||
10184 | ||||
10185 | // Result type may be wider than element type. | |||
10186 | if (EltVT != Op->getValueType(0)) | |||
10187 | Res = DAG.getNode(ISD::ANY_EXTEND, dl, Op->getValueType(0), Res); | |||
10188 | return Res; | |||
10189 | } | |||
10190 | ||||
10191 | static SDValue LowerVecReduceF(SDValue Op, SelectionDAG &DAG, | |||
10192 | const ARMSubtarget *ST) { | |||
10193 | if (!ST->hasMVEFloatOps()) | |||
10194 | return SDValue(); | |||
10195 | return LowerVecReduce(Op, DAG, ST); | |||
10196 | } | |||
10197 | ||||
10198 | static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { | |||
10199 | if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering())) | |||
10200 | // Acquire/Release load/store is not legal for targets without a dmb or | |||
10201 | // equivalent available. | |||
10202 | return SDValue(); | |||
10203 | ||||
10204 | // Monotonic load/store is legal for all targets. | |||
10205 | return Op; | |||
10206 | } | |||
10207 | ||||
10208 | static void ReplaceREADCYCLECOUNTER(SDNode *N, | |||
10209 | SmallVectorImpl<SDValue> &Results, | |||
10210 | SelectionDAG &DAG, | |||
10211 | const ARMSubtarget *Subtarget) { | |||
10212 | SDLoc DL(N); | |||
10213 | // Under Power Management extensions, the cycle-count is: | |||
10214 | // mrc p15, #0, <Rt>, c9, c13, #0 | |||
10215 | SDValue Ops[] = { N->getOperand(0), // Chain | |||
10216 | DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32), | |||
10217 | DAG.getTargetConstant(15, DL, MVT::i32), | |||
10218 | DAG.getTargetConstant(0, DL, MVT::i32), | |||
10219 | DAG.getTargetConstant(9, DL, MVT::i32), | |||
10220 | DAG.getTargetConstant(13, DL, MVT::i32), | |||
10221 | DAG.getTargetConstant(0, DL, MVT::i32) | |||
10222 | }; | |||
10223 | ||||
10224 | SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, | |||
10225 | DAG.getVTList(MVT::i32, MVT::Other), Ops); | |||
10226 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32, | |||
10227 | DAG.getConstant(0, DL, MVT::i32))); | |||
10228 | Results.push_back(Cycles32.getValue(1)); | |||
10229 | } | |||
10230 | ||||
10231 | static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) { | |||
10232 | SDLoc dl(V.getNode()); | |||
10233 | SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32); | |||
10234 | SDValue VHi = DAG.getAnyExtOrTrunc( | |||
10235 | DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)), | |||
10236 | dl, MVT::i32); | |||
10237 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); | |||
10238 | if (isBigEndian) | |||
10239 | std::swap (VLo, VHi); | |||
10240 | SDValue RegClass = | |||
10241 | DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32); | |||
10242 | SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32); | |||
10243 | SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32); | |||
10244 | const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 }; | |||
10245 | return SDValue( | |||
10246 | DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0); | |||
10247 | } | |||
10248 | ||||
10249 | static void ReplaceCMP_SWAP_64Results(SDNode *N, | |||
10250 | SmallVectorImpl<SDValue> & Results, | |||
10251 | SelectionDAG &DAG) { | |||
10252 | assert(N->getValueType(0) == MVT::i64 &&(static_cast <bool> (N->getValueType(0) == MVT::i64 && "AtomicCmpSwap on types less than 64 should be legal") ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i64 && \"AtomicCmpSwap on types less than 64 should be legal\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10253, __extension__ __PRETTY_FUNCTION__)) | |||
10253 | "AtomicCmpSwap on types less than 64 should be legal")(static_cast <bool> (N->getValueType(0) == MVT::i64 && "AtomicCmpSwap on types less than 64 should be legal") ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i64 && \"AtomicCmpSwap on types less than 64 should be legal\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10253, __extension__ __PRETTY_FUNCTION__)); | |||
10254 | SDValue Ops[] = {N->getOperand(1), | |||
10255 | createGPRPairNode(DAG, N->getOperand(2)), | |||
10256 | createGPRPairNode(DAG, N->getOperand(3)), | |||
10257 | N->getOperand(0)}; | |||
10258 | SDNode *CmpSwap = DAG.getMachineNode( | |||
10259 | ARM::CMP_SWAP_64, SDLoc(N), | |||
10260 | DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops); | |||
10261 | ||||
10262 | MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand(); | |||
10263 | DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp}); | |||
10264 | ||||
10265 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); | |||
10266 | ||||
10267 | SDValue Lo = | |||
10268 | DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_1 : ARM::gsub_0, | |||
10269 | SDLoc(N), MVT::i32, SDValue(CmpSwap, 0)); | |||
10270 | SDValue Hi = | |||
10271 | DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_0 : ARM::gsub_1, | |||
10272 | SDLoc(N), MVT::i32, SDValue(CmpSwap, 0)); | |||
10273 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i64, Lo, Hi)); | |||
10274 | Results.push_back(SDValue(CmpSwap, 2)); | |||
10275 | } | |||
10276 | ||||
10277 | SDValue ARMTargetLowering::LowerFSETCC(SDValue Op, SelectionDAG &DAG) const { | |||
10278 | SDLoc dl(Op); | |||
10279 | EVT VT = Op.getValueType(); | |||
10280 | SDValue Chain = Op.getOperand(0); | |||
10281 | SDValue LHS = Op.getOperand(1); | |||
10282 | SDValue RHS = Op.getOperand(2); | |||
10283 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(3))->get(); | |||
10284 | bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS; | |||
10285 | ||||
10286 | // If we don't have instructions of this float type then soften to a libcall | |||
10287 | // and use SETCC instead. | |||
10288 | if (isUnsupportedFloatingType(LHS.getValueType())) { | |||
10289 | DAG.getTargetLoweringInfo().softenSetCCOperands( | |||
10290 | DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS, Chain, IsSignaling); | |||
10291 | if (!RHS.getNode()) { | |||
10292 | RHS = DAG.getConstant(0, dl, LHS.getValueType()); | |||
10293 | CC = ISD::SETNE; | |||
10294 | } | |||
10295 | SDValue Result = DAG.getNode(ISD::SETCC, dl, VT, LHS, RHS, | |||
10296 | DAG.getCondCode(CC)); | |||
10297 | return DAG.getMergeValues({Result, Chain}, dl); | |||
10298 | } | |||
10299 | ||||
10300 | ARMCC::CondCodes CondCode, CondCode2; | |||
10301 | FPCCToARMCC(CC, CondCode, CondCode2); | |||
10302 | ||||
10303 | // FIXME: Chain is not handled correctly here. Currently the FPSCR is implicit | |||
10304 | // in CMPFP and CMPFPE, but instead it should be made explicit by these | |||
10305 | // instructions using a chain instead of glue. This would also fix the problem | |||
10306 | // here (and also in LowerSELECT_CC) where we generate two comparisons when | |||
10307 | // CondCode2 != AL. | |||
10308 | SDValue True = DAG.getConstant(1, dl, VT); | |||
10309 | SDValue False = DAG.getConstant(0, dl, VT); | |||
10310 | SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); | |||
10311 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); | |||
10312 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling); | |||
10313 | SDValue Result = getCMOV(dl, VT, False, True, ARMcc, CCR, Cmp, DAG); | |||
10314 | if (CondCode2 != ARMCC::AL) { | |||
10315 | ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32); | |||
10316 | Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling); | |||
10317 | Result = getCMOV(dl, VT, Result, True, ARMcc, CCR, Cmp, DAG); | |||
10318 | } | |||
10319 | return DAG.getMergeValues({Result, Chain}, dl); | |||
10320 | } | |||
10321 | ||||
10322 | SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { | |||
10323 | LLVM_DEBUG(dbgs() << "Lowering node: "; Op.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("arm-isel")) { dbgs() << "Lowering node: "; Op.dump(); } } while (false); | |||
10324 | switch (Op.getOpcode()) { | |||
10325 | default: llvm_unreachable("Don't know how to custom lower this!")::llvm::llvm_unreachable_internal("Don't know how to custom lower this!" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10325); | |||
10326 | case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG); | |||
10327 | case ISD::ConstantPool: return LowerConstantPool(Op, DAG); | |||
10328 | case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); | |||
10329 | case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); | |||
10330 | case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); | |||
10331 | case ISD::SELECT: return LowerSELECT(Op, DAG); | |||
10332 | case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); | |||
10333 | case ISD::BRCOND: return LowerBRCOND(Op, DAG); | |||
10334 | case ISD::BR_CC: return LowerBR_CC(Op, DAG); | |||
10335 | case ISD::BR_JT: return LowerBR_JT(Op, DAG); | |||
10336 | case ISD::VASTART: return LowerVASTART(Op, DAG); | |||
10337 | case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); | |||
10338 | case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); | |||
10339 | case ISD::SINT_TO_FP: | |||
10340 | case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); | |||
10341 | case ISD::STRICT_FP_TO_SINT: | |||
10342 | case ISD::STRICT_FP_TO_UINT: | |||
10343 | case ISD::FP_TO_SINT: | |||
10344 | case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); | |||
10345 | case ISD::FP_TO_SINT_SAT: | |||
10346 | case ISD::FP_TO_UINT_SAT: return LowerFP_TO_INT_SAT(Op, DAG, Subtarget); | |||
10347 | case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); | |||
10348 | case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); | |||
10349 | case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); | |||
10350 | case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); | |||
10351 | case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); | |||
10352 | case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG); | |||
10353 | case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG, Subtarget); | |||
10354 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, | |||
10355 | Subtarget); | |||
10356 | case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG, Subtarget); | |||
10357 | case ISD::SHL: | |||
10358 | case ISD::SRL: | |||
10359 | case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); | |||
10360 | case ISD::SREM: return LowerREM(Op.getNode(), DAG); | |||
10361 | case ISD::UREM: return LowerREM(Op.getNode(), DAG); | |||
10362 | case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); | |||
10363 | case ISD::SRL_PARTS: | |||
10364 | case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); | |||
10365 | case ISD::CTTZ: | |||
10366 | case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget); | |||
10367 | case ISD::CTPOP: return LowerCTPOP(Op.getNode(), DAG, Subtarget); | |||
10368 | case ISD::SETCC: return LowerVSETCC(Op, DAG, Subtarget); | |||
10369 | case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG); | |||
10370 | case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget); | |||
10371 | case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); | |||
10372 | case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG, Subtarget); | |||
10373 | case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG, Subtarget); | |||
10374 | case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); | |||
10375 | case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG, Subtarget); | |||
10376 | case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG, Subtarget); | |||
10377 | case ISD::TRUNCATE: return LowerTruncate(Op.getNode(), DAG, Subtarget); | |||
10378 | case ISD::SIGN_EXTEND: | |||
10379 | case ISD::ZERO_EXTEND: return LowerVectorExtend(Op.getNode(), DAG, Subtarget); | |||
10380 | case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); | |||
10381 | case ISD::SET_ROUNDING: return LowerSET_ROUNDING(Op, DAG); | |||
10382 | case ISD::MUL: return LowerMUL(Op, DAG); | |||
10383 | case ISD::SDIV: | |||
10384 | if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) | |||
10385 | return LowerDIV_Windows(Op, DAG, /* Signed */ true); | |||
10386 | return LowerSDIV(Op, DAG, Subtarget); | |||
10387 | case ISD::UDIV: | |||
10388 | if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) | |||
10389 | return LowerDIV_Windows(Op, DAG, /* Signed */ false); | |||
10390 | return LowerUDIV(Op, DAG, Subtarget); | |||
10391 | case ISD::ADDCARRY: | |||
10392 | case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG); | |||
10393 | case ISD::SADDO: | |||
10394 | case ISD::SSUBO: | |||
10395 | return LowerSignedALUO(Op, DAG); | |||
10396 | case ISD::UADDO: | |||
10397 | case ISD::USUBO: | |||
10398 | return LowerUnsignedALUO(Op, DAG); | |||
10399 | case ISD::SADDSAT: | |||
10400 | case ISD::SSUBSAT: | |||
10401 | case ISD::UADDSAT: | |||
10402 | case ISD::USUBSAT: | |||
10403 | return LowerADDSUBSAT(Op, DAG, Subtarget); | |||
10404 | case ISD::LOAD: | |||
10405 | return LowerPredicateLoad(Op, DAG); | |||
10406 | case ISD::STORE: | |||
10407 | return LowerSTORE(Op, DAG, Subtarget); | |||
10408 | case ISD::MLOAD: | |||
10409 | return LowerMLOAD(Op, DAG); | |||
10410 | case ISD::VECREDUCE_MUL: | |||
10411 | case ISD::VECREDUCE_AND: | |||
10412 | case ISD::VECREDUCE_OR: | |||
10413 | case ISD::VECREDUCE_XOR: | |||
10414 | return LowerVecReduce(Op, DAG, Subtarget); | |||
10415 | case ISD::VECREDUCE_FADD: | |||
10416 | case ISD::VECREDUCE_FMUL: | |||
10417 | case ISD::VECREDUCE_FMIN: | |||
10418 | case ISD::VECREDUCE_FMAX: | |||
10419 | return LowerVecReduceF(Op, DAG, Subtarget); | |||
10420 | case ISD::ATOMIC_LOAD: | |||
10421 | case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); | |||
10422 | case ISD::FSINCOS: return LowerFSINCOS(Op, DAG); | |||
10423 | case ISD::SDIVREM: | |||
10424 | case ISD::UDIVREM: return LowerDivRem(Op, DAG); | |||
10425 | case ISD::DYNAMIC_STACKALLOC: | |||
10426 | if (Subtarget->isTargetWindows()) | |||
10427 | return LowerDYNAMIC_STACKALLOC(Op, DAG); | |||
10428 | llvm_unreachable("Don't know how to custom lower this!")::llvm::llvm_unreachable_internal("Don't know how to custom lower this!" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10428); | |||
10429 | case ISD::STRICT_FP_ROUND: | |||
10430 | case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); | |||
10431 | case ISD::STRICT_FP_EXTEND: | |||
10432 | case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); | |||
10433 | case ISD::STRICT_FSETCC: | |||
10434 | case ISD::STRICT_FSETCCS: return LowerFSETCC(Op, DAG); | |||
10435 | case ARMISD::WIN__DBZCHK: return SDValue(); | |||
10436 | } | |||
10437 | } | |||
10438 | ||||
10439 | static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results, | |||
10440 | SelectionDAG &DAG) { | |||
10441 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); | |||
10442 | unsigned Opc = 0; | |||
10443 | if (IntNo == Intrinsic::arm_smlald) | |||
10444 | Opc = ARMISD::SMLALD; | |||
10445 | else if (IntNo == Intrinsic::arm_smlaldx) | |||
10446 | Opc = ARMISD::SMLALDX; | |||
10447 | else if (IntNo == Intrinsic::arm_smlsld) | |||
10448 | Opc = ARMISD::SMLSLD; | |||
10449 | else if (IntNo == Intrinsic::arm_smlsldx) | |||
10450 | Opc = ARMISD::SMLSLDX; | |||
10451 | else | |||
10452 | return; | |||
10453 | ||||
10454 | SDLoc dl(N); | |||
10455 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, | |||
10456 | N->getOperand(3), | |||
10457 | DAG.getConstant(0, dl, MVT::i32)); | |||
10458 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, | |||
10459 | N->getOperand(3), | |||
10460 | DAG.getConstant(1, dl, MVT::i32)); | |||
10461 | ||||
10462 | SDValue LongMul = DAG.getNode(Opc, dl, | |||
10463 | DAG.getVTList(MVT::i32, MVT::i32), | |||
10464 | N->getOperand(1), N->getOperand(2), | |||
10465 | Lo, Hi); | |||
10466 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, | |||
10467 | LongMul.getValue(0), LongMul.getValue(1))); | |||
10468 | } | |||
10469 | ||||
10470 | /// ReplaceNodeResults - Replace the results of node with an illegal result | |||
10471 | /// type with new values built out of custom code. | |||
10472 | void ARMTargetLowering::ReplaceNodeResults(SDNode *N, | |||
10473 | SmallVectorImpl<SDValue> &Results, | |||
10474 | SelectionDAG &DAG) const { | |||
10475 | SDValue Res; | |||
10476 | switch (N->getOpcode()) { | |||
| ||||
10477 | default: | |||
10478 | llvm_unreachable("Don't know how to custom expand this!")::llvm::llvm_unreachable_internal("Don't know how to custom expand this!" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10478); | |||
10479 | case ISD::READ_REGISTER: | |||
10480 | ExpandREAD_REGISTER(N, Results, DAG); | |||
10481 | break; | |||
10482 | case ISD::BITCAST: | |||
10483 | Res = ExpandBITCAST(N, DAG, Subtarget); | |||
10484 | break; | |||
10485 | case ISD::SRL: | |||
10486 | case ISD::SRA: | |||
10487 | case ISD::SHL: | |||
10488 | Res = Expand64BitShift(N, DAG, Subtarget); | |||
10489 | break; | |||
10490 | case ISD::SREM: | |||
10491 | case ISD::UREM: | |||
10492 | Res = LowerREM(N, DAG); | |||
10493 | break; | |||
10494 | case ISD::SDIVREM: | |||
10495 | case ISD::UDIVREM: | |||
10496 | Res = LowerDivRem(SDValue(N, 0), DAG); | |||
10497 | assert(Res.getNumOperands() == 2 && "DivRem needs two values")(static_cast <bool> (Res.getNumOperands() == 2 && "DivRem needs two values") ? void (0) : __assert_fail ("Res.getNumOperands() == 2 && \"DivRem needs two values\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10497, __extension__ __PRETTY_FUNCTION__)); | |||
10498 | Results.push_back(Res.getValue(0)); | |||
10499 | Results.push_back(Res.getValue(1)); | |||
10500 | return; | |||
10501 | case ISD::SADDSAT: | |||
10502 | case ISD::SSUBSAT: | |||
10503 | case ISD::UADDSAT: | |||
10504 | case ISD::USUBSAT: | |||
10505 | Res = LowerADDSUBSAT(SDValue(N, 0), DAG, Subtarget); | |||
10506 | break; | |||
10507 | case ISD::READCYCLECOUNTER: | |||
10508 | ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget); | |||
10509 | return; | |||
10510 | case ISD::UDIV: | |||
10511 | case ISD::SDIV: | |||
10512 | assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows")(static_cast <bool> (Subtarget->isTargetWindows() && "can only expand DIV on Windows") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"can only expand DIV on Windows\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10512, __extension__ __PRETTY_FUNCTION__)); | |||
10513 | return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV, | |||
10514 | Results); | |||
10515 | case ISD::ATOMIC_CMP_SWAP: | |||
10516 | ReplaceCMP_SWAP_64Results(N, Results, DAG); | |||
10517 | return; | |||
10518 | case ISD::INTRINSIC_WO_CHAIN: | |||
10519 | return ReplaceLongIntrinsic(N, Results, DAG); | |||
10520 | case ISD::LOAD: | |||
10521 | LowerLOAD(N, Results, DAG); | |||
10522 | break; | |||
10523 | case ISD::TRUNCATE: | |||
10524 | Res = LowerTruncate(N, DAG, Subtarget); | |||
10525 | break; | |||
10526 | case ISD::SIGN_EXTEND: | |||
10527 | case ISD::ZERO_EXTEND: | |||
10528 | Res = LowerVectorExtend(N, DAG, Subtarget); | |||
10529 | break; | |||
10530 | case ISD::FP_TO_SINT_SAT: | |||
10531 | case ISD::FP_TO_UINT_SAT: | |||
10532 | Res = LowerFP_TO_INT_SAT(SDValue(N, 0), DAG, Subtarget); | |||
10533 | break; | |||
10534 | } | |||
10535 | if (Res.getNode()) | |||
10536 | Results.push_back(Res); | |||
10537 | } | |||
10538 | ||||
10539 | //===----------------------------------------------------------------------===// | |||
10540 | // ARM Scheduler Hooks | |||
10541 | //===----------------------------------------------------------------------===// | |||
10542 | ||||
10543 | /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and | |||
10544 | /// registers the function context. | |||
10545 | void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, | |||
10546 | MachineBasicBlock *MBB, | |||
10547 | MachineBasicBlock *DispatchBB, | |||
10548 | int FI) const { | |||
10549 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&(static_cast <bool> (!Subtarget->isROPI() && !Subtarget->isRWPI() && "ROPI/RWPI not currently supported with SjLj" ) ? void (0) : __assert_fail ("!Subtarget->isROPI() && !Subtarget->isRWPI() && \"ROPI/RWPI not currently supported with SjLj\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10550, __extension__ __PRETTY_FUNCTION__)) | |||
10550 | "ROPI/RWPI not currently supported with SjLj")(static_cast <bool> (!Subtarget->isROPI() && !Subtarget->isRWPI() && "ROPI/RWPI not currently supported with SjLj" ) ? void (0) : __assert_fail ("!Subtarget->isROPI() && !Subtarget->isRWPI() && \"ROPI/RWPI not currently supported with SjLj\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10550, __extension__ __PRETTY_FUNCTION__)); | |||
10551 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | |||
10552 | DebugLoc dl = MI.getDebugLoc(); | |||
10553 | MachineFunction *MF = MBB->getParent(); | |||
10554 | MachineRegisterInfo *MRI = &MF->getRegInfo(); | |||
10555 | MachineConstantPool *MCP = MF->getConstantPool(); | |||
10556 | ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); | |||
10557 | const Function &F = MF->getFunction(); | |||
10558 | ||||
10559 | bool isThumb = Subtarget->isThumb(); | |||
10560 | bool isThumb2 = Subtarget->isThumb2(); | |||
10561 | ||||
10562 | unsigned PCLabelId = AFI->createPICLabelUId(); | |||
10563 | unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; | |||
10564 | ARMConstantPoolValue *CPV = | |||
10565 | ARMConstantPoolMBB::Create(F.getContext(), DispatchBB, PCLabelId, PCAdj); | |||
10566 | unsigned CPI = MCP->getConstantPoolIndex(CPV, Align(4)); | |||
10567 | ||||
10568 | const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass | |||
10569 | : &ARM::GPRRegClass; | |||
10570 | ||||
10571 | // Grab constant pool and fixed stack memory operands. | |||
10572 | MachineMemOperand *CPMMO = | |||
10573 | MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), | |||
10574 | MachineMemOperand::MOLoad, 4, Align(4)); | |||
10575 | ||||
10576 | MachineMemOperand *FIMMOSt = | |||
10577 | MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), | |||
10578 | MachineMemOperand::MOStore, 4, Align(4)); | |||
10579 | ||||
10580 | // Load the address of the dispatch MBB into the jump buffer. | |||
10581 | if (isThumb2) { | |||
10582 | // Incoming value: jbuf | |||
10583 | // ldr.n r5, LCPI1_1 | |||
10584 | // orr r5, r5, #1 | |||
10585 | // add r5, pc | |||
10586 | // str r5, [$jbuf, #+4] ; &jbuf[1] | |||
10587 | Register NewVReg1 = MRI->createVirtualRegister(TRC); | |||
10588 | BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) | |||
10589 | .addConstantPoolIndex(CPI) | |||
10590 | .addMemOperand(CPMMO) | |||
10591 | .add(predOps(ARMCC::AL)); | |||
10592 | // Set the low bit because of thumb mode. | |||
10593 | Register NewVReg2 = MRI->createVirtualRegister(TRC); | |||
10594 | BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) | |||
10595 | .addReg(NewVReg1, RegState::Kill) | |||
10596 | .addImm(0x01) | |||
10597 | .add(predOps(ARMCC::AL)) | |||
10598 | .add(condCodeOp()); | |||
10599 | Register NewVReg3 = MRI->createVirtualRegister(TRC); | |||
10600 | BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) | |||
10601 | .addReg(NewVReg2, RegState::Kill) | |||
10602 | .addImm(PCLabelId); | |||
10603 | BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) | |||
10604 | .addReg(NewVReg3, RegState::Kill) | |||
10605 | .addFrameIndex(FI) | |||
10606 | .addImm(36) // &jbuf[1] :: pc | |||
10607 | .addMemOperand(FIMMOSt) | |||
10608 | .add(predOps(ARMCC::AL)); | |||
10609 | } else if (isThumb) { | |||
10610 | // Incoming value: jbuf | |||
10611 | // ldr.n r1, LCPI1_4 | |||
10612 | // add r1, pc | |||
10613 | // mov r2, #1 | |||
10614 | // orrs r1, r2 | |||
10615 | // add r2, $jbuf, #+4 ; &jbuf[1] | |||
10616 | // str r1, [r2] | |||
10617 | Register NewVReg1 = MRI->createVirtualRegister(TRC); | |||
10618 | BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) | |||
10619 | .addConstantPoolIndex(CPI) | |||
10620 | .addMemOperand(CPMMO) | |||
10621 | .add(predOps(ARMCC::AL)); | |||
10622 | Register NewVReg2 = MRI->createVirtualRegister(TRC); | |||
10623 | BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) | |||
10624 | .addReg(NewVReg1, RegState::Kill) | |||
10625 | .addImm(PCLabelId); | |||
10626 | // Set the low bit because of thumb mode. | |||
10627 | Register NewVReg3 = MRI->createVirtualRegister(TRC); | |||
10628 | BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) | |||
10629 | .addReg(ARM::CPSR, RegState::Define) | |||
10630 | .addImm(1) | |||
10631 | .add(predOps(ARMCC::AL)); | |||
10632 | Register NewVReg4 = MRI->createVirtualRegister(TRC); | |||
10633 | BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) | |||
10634 | .addReg(ARM::CPSR, RegState::Define) | |||
10635 | .addReg(NewVReg2, RegState::Kill) | |||
10636 | .addReg(NewVReg3, RegState::Kill) | |||
10637 | .add(predOps(ARMCC::AL)); | |||
10638 | Register NewVReg5 = MRI->createVirtualRegister(TRC); | |||
10639 | BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5) | |||
10640 | .addFrameIndex(FI) | |||
10641 | .addImm(36); // &jbuf[1] :: pc | |||
10642 | BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) | |||
10643 | .addReg(NewVReg4, RegState::Kill) | |||
10644 | .addReg(NewVReg5, RegState::Kill) | |||
10645 | .addImm(0) | |||
10646 | .addMemOperand(FIMMOSt) | |||
10647 | .add(predOps(ARMCC::AL)); | |||
10648 | } else { | |||
10649 | // Incoming value: jbuf | |||
10650 | // ldr r1, LCPI1_1 | |||
10651 | // add r1, pc, r1 | |||
10652 | // str r1, [$jbuf, #+4] ; &jbuf[1] | |||
10653 | Register NewVReg1 = MRI->createVirtualRegister(TRC); | |||
10654 | BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) | |||
10655 | .addConstantPoolIndex(CPI) | |||
10656 | .addImm(0) | |||
10657 | .addMemOperand(CPMMO) | |||
10658 | .add(predOps(ARMCC::AL)); | |||
10659 | Register NewVReg2 = MRI->createVirtualRegister(TRC); | |||
10660 | BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) | |||
10661 | .addReg(NewVReg1, RegState::Kill) | |||
10662 | .addImm(PCLabelId) | |||
10663 | .add(predOps(ARMCC::AL)); | |||
10664 | BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) | |||
10665 | .addReg(NewVReg2, RegState::Kill) | |||
10666 | .addFrameIndex(FI) | |||
10667 | .addImm(36) // &jbuf[1] :: pc | |||
10668 | .addMemOperand(FIMMOSt) | |||
10669 | .add(predOps(ARMCC::AL)); | |||
10670 | } | |||
10671 | } | |||
10672 | ||||
10673 | void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, | |||
10674 | MachineBasicBlock *MBB) const { | |||
10675 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | |||
10676 | DebugLoc dl = MI.getDebugLoc(); | |||
10677 | MachineFunction *MF = MBB->getParent(); | |||
10678 | MachineRegisterInfo *MRI = &MF->getRegInfo(); | |||
10679 | MachineFrameInfo &MFI = MF->getFrameInfo(); | |||
10680 | int FI = MFI.getFunctionContextIndex(); | |||
10681 | ||||
10682 | const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass | |||
10683 | : &ARM::GPRnopcRegClass; | |||
10684 | ||||
10685 | // Get a mapping of the call site numbers to all of the landing pads they're | |||
10686 | // associated with. | |||
10687 | DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad; | |||
10688 | unsigned MaxCSNum = 0; | |||
10689 | for (MachineBasicBlock &BB : *MF) { | |||
10690 | if (!BB.isEHPad()) | |||
10691 | continue; | |||
10692 | ||||
10693 | // FIXME: We should assert that the EH_LABEL is the first MI in the landing | |||
10694 | // pad. | |||
10695 | for (MachineInstr &II : BB) { | |||
10696 | if (!II.isEHLabel()) | |||
10697 | continue; | |||
10698 | ||||
10699 | MCSymbol *Sym = II.getOperand(0).getMCSymbol(); | |||
10700 | if (!MF->hasCallSiteLandingPad(Sym)) continue; | |||
10701 | ||||
10702 | SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym); | |||
10703 | for (unsigned Idx : CallSiteIdxs) { | |||
10704 | CallSiteNumToLPad[Idx].push_back(&BB); | |||
10705 | MaxCSNum = std::max(MaxCSNum, Idx); | |||
10706 | } | |||
10707 | break; | |||
10708 | } | |||
10709 | } | |||
10710 | ||||
10711 | // Get an ordered list of the machine basic blocks for the jump table. | |||
10712 | std::vector<MachineBasicBlock*> LPadList; | |||
10713 | SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs; | |||
10714 | LPadList.reserve(CallSiteNumToLPad.size()); | |||
10715 | for (unsigned I = 1; I <= MaxCSNum; ++I) { | |||
10716 | SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; | |||
10717 | for (MachineBasicBlock *MBB : MBBList) { | |||
10718 | LPadList.push_back(MBB); | |||
10719 | InvokeBBs.insert(MBB->pred_begin(), MBB->pred_end()); | |||
10720 | } | |||
10721 | } | |||
10722 | ||||
10723 | assert(!LPadList.empty() &&(static_cast <bool> (!LPadList.empty() && "No landing pad destinations for the dispatch jump table!" ) ? void (0) : __assert_fail ("!LPadList.empty() && \"No landing pad destinations for the dispatch jump table!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10724, __extension__ __PRETTY_FUNCTION__)) | |||
10724 | "No landing pad destinations for the dispatch jump table!")(static_cast <bool> (!LPadList.empty() && "No landing pad destinations for the dispatch jump table!" ) ? void (0) : __assert_fail ("!LPadList.empty() && \"No landing pad destinations for the dispatch jump table!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 10724, __extension__ __PRETTY_FUNCTION__)); | |||
10725 | ||||
10726 | // Create the jump table and associated information. | |||
10727 | MachineJumpTableInfo *JTI = | |||
10728 | MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); | |||
10729 | unsigned MJTI = JTI->createJumpTableIndex(LPadList); | |||
10730 | ||||
10731 | // Create the MBBs for the dispatch code. | |||
10732 | ||||
10733 | // Shove the dispatch's address into the return slot in the function context. | |||
10734 | MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); | |||
10735 | DispatchBB->setIsEHPad(); | |||
10736 | ||||
10737 | MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); | |||
10738 | unsigned trap_opcode; | |||
10739 | if (Subtarget->isThumb()) | |||
10740 | trap_opcode = ARM::tTRAP; | |||
10741 | else | |||
10742 | trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; | |||
10743 | ||||
10744 | BuildMI(TrapBB, dl, TII->get(trap_opcode)); | |||
10745 | DispatchBB->addSuccessor(TrapBB); | |||
10746 | ||||
10747 | MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); | |||
10748 | DispatchBB->addSuccessor(DispContBB); | |||
10749 | ||||
10750 | // Insert and MBBs. | |||
10751 | MF->insert(MF->end(), DispatchBB); | |||
10752 | MF->insert(MF->end(), DispContBB); | |||
10753 | MF->insert(MF->end(), TrapBB); | |||
10754 | ||||
10755 | // Insert code into the entry block that creates and registers the function | |||
10756 | // context. | |||
10757 | SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); | |||
10758 | ||||
10759 | MachineMemOperand *FIMMOLd = MF->getMachineMemOperand( | |||
10760 | MachinePointerInfo::getFixedStack(*MF, FI), | |||
10761 | MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, Align(4)); | |||
10762 | ||||
10763 | MachineInstrBuilder MIB; | |||
10764 | MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); | |||
10765 | ||||
10766 | const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); | |||
10767 | const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); | |||
10768 | ||||
10769 | // Add a register mask with no preserved registers. This results in all | |||
10770 | // registers being marked as clobbered. This can't work if the dispatch block | |||
10771 | // is in a Thumb1 function and is linked with ARM code which uses the FP | |||
10772 | // registers, as there is no way to preserve the FP registers in Thumb1 mode. | |||
10773 | MIB.addRegMask(RI.getSjLjDispatchPreservedMask(*MF)); | |||
10774 | ||||
10775 | bool IsPositionIndependent = isPositionIndependent(); | |||
10776 | unsigned NumLPads = LPadList.size(); | |||
10777 | if (Subtarget->isThumb2()) { | |||
10778 | Register NewVReg1 = MRI->createVirtualRegister(TRC); | |||
10779 | BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) | |||
10780 | .addFrameIndex(FI) | |||
10781 | .addImm(4) | |||
10782 | .addMemOperand(FIMMOLd) | |||
10783 | .add(predOps(ARMCC::AL)); | |||
10784 | ||||
10785 | if (NumLPads < 256) { | |||
10786 | BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) | |||
10787 | .addReg(NewVReg1) | |||
10788 | .addImm(LPadList.size()) | |||
10789 | .add(predOps(ARMCC::AL)); | |||
10790 | } else { | |||
10791 | Register VReg1 = MRI->createVirtualRegister(TRC); | |||
10792 | BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) | |||
10793 | .addImm(NumLPads & 0xFFFF) | |||
10794 | .add(predOps(ARMCC::AL)); | |||
10795 | ||||
10796 | unsigned VReg2 = VReg1; | |||
10797 | if ((NumLPads & 0xFFFF0000) != 0) { | |||
10798 | VReg2 = MRI->createVirtualRegister(TRC); | |||
10799 | BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) | |||
10800 | .addReg(VReg1) | |||
10801 | .addImm(NumLPads >> 16) | |||
10802 | .add(predOps(ARMCC::AL)); | |||
10803 | } | |||
10804 | ||||
10805 | BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) | |||
10806 | .addReg(NewVReg1) | |||
10807 | .addReg(VReg2) | |||
10808 | .add(predOps(ARMCC::AL)); | |||
10809 | } | |||
10810 | ||||
10811 | BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) | |||
10812 | .addMBB(TrapBB) | |||
10813 | .addImm(ARMCC::HI) | |||
10814 | .addReg(ARM::CPSR); | |||
10815 | ||||
10816 | Register NewVReg3 = MRI->createVirtualRegister(TRC); | |||
10817 | BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT), NewVReg3) | |||
10818 | .addJumpTableIndex(MJTI) | |||
10819 | .add(predOps(ARMCC::AL)); | |||
10820 | ||||
10821 | Register NewVReg4 = MRI->createVirtualRegister(TRC); | |||
10822 | BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) | |||
10823 | .addReg(NewVReg3, RegState::Kill) | |||
10824 | .addReg(NewVReg1) | |||
10825 | .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)) | |||
10826 | .add(predOps(ARMCC::AL)) | |||
10827 | .add(condCodeOp()); | |||
10828 | ||||
10829 | BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) | |||
10830 | .addReg(NewVReg4, RegState::Kill) | |||
10831 | .addReg(NewVReg1) | |||
10832 | .addJumpTableIndex(MJTI); | |||
10833 | } else if (Subtarget->isThumb()) { | |||
10834 | Register NewVReg1 = MRI->createVirtualRegister(TRC); | |||
10835 | BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) | |||
10836 | .addFrameIndex(FI) | |||
10837 | .addImm(1) | |||
10838 | .addMemOperand(FIMMOLd) | |||
10839 | .add(predOps(ARMCC::AL)); | |||
10840 | ||||
10841 | if (NumLPads < 256) { | |||
10842 | BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) | |||
10843 | .addReg(NewVReg1) | |||
10844 | .addImm(NumLPads) | |||
10845 | .add(predOps(ARMCC::AL)); | |||
10846 | } else { | |||
10847 | MachineConstantPool *ConstantPool = MF->getConstantPool(); | |||
10848 | Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); | |||
10849 | const Constant *C = ConstantInt::get(Int32Ty, NumLPads); | |||
10850 | ||||
10851 | // MachineConstantPool wants an explicit alignment. | |||
10852 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty); | |||
10853 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); | |||
10854 | ||||
10855 | Register VReg1 = MRI->createVirtualRegister(TRC); | |||
10856 | BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) | |||
10857 | .addReg(VReg1, RegState::Define) | |||
10858 | .addConstantPoolIndex(Idx) | |||
10859 | .add(predOps(ARMCC::AL)); | |||
10860 | BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) | |||
10861 | .addReg(NewVReg1) | |||
10862 | .addReg(VReg1) | |||
10863 | .add(predOps(ARMCC::AL)); | |||
10864 | } | |||
10865 | ||||
10866 | BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) | |||
10867 | .addMBB(TrapBB) | |||
10868 | .addImm(ARMCC::HI) | |||
10869 | .addReg(ARM::CPSR); | |||
10870 | ||||
10871 | Register NewVReg2 = MRI->createVirtualRegister(TRC); | |||
10872 | BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) | |||
10873 | .addReg(ARM::CPSR, RegState::Define) | |||
10874 | .addReg(NewVReg1) | |||
10875 | .addImm(2) | |||
10876 | .add(predOps(ARMCC::AL)); | |||
10877 | ||||
10878 | Register NewVReg3 = MRI->createVirtualRegister(TRC); | |||
10879 | BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) | |||
10880 | .addJumpTableIndex(MJTI) | |||
10881 | .add(predOps(ARMCC::AL)); | |||
10882 | ||||
10883 | Register NewVReg4 = MRI->createVirtualRegister(TRC); | |||
10884 | BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) | |||
10885 | .addReg(ARM::CPSR, RegState::Define) | |||
10886 | .addReg(NewVReg2, RegState::Kill) | |||
10887 | .addReg(NewVReg3) | |||
10888 | .add(predOps(ARMCC::AL)); | |||
10889 | ||||
10890 | MachineMemOperand *JTMMOLd = | |||
10891 | MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF), | |||
10892 | MachineMemOperand::MOLoad, 4, Align(4)); | |||
10893 | ||||
10894 | Register NewVReg5 = MRI->createVirtualRegister(TRC); | |||
10895 | BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) | |||
10896 | .addReg(NewVReg4, RegState::Kill) | |||
10897 | .addImm(0) | |||
10898 | .addMemOperand(JTMMOLd) | |||
10899 | .add(predOps(ARMCC::AL)); | |||
10900 | ||||
10901 | unsigned NewVReg6 = NewVReg5; | |||
10902 | if (IsPositionIndependent) { | |||
10903 | NewVReg6 = MRI->createVirtualRegister(TRC); | |||
10904 | BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) | |||
10905 | .addReg(ARM::CPSR, RegState::Define) | |||
10906 | .addReg(NewVReg5, RegState::Kill) | |||
10907 | .addReg(NewVReg3) | |||
10908 | .add(predOps(ARMCC::AL)); | |||
10909 | } | |||
10910 | ||||
10911 | BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) | |||
10912 | .addReg(NewVReg6, RegState::Kill) | |||
10913 | .addJumpTableIndex(MJTI); | |||
10914 | } else { | |||
10915 | Register NewVReg1 = MRI->createVirtualRegister(TRC); | |||
10916 | BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) | |||
10917 | .addFrameIndex(FI) | |||
10918 | .addImm(4) | |||
10919 | .addMemOperand(FIMMOLd) | |||
10920 | .add(predOps(ARMCC::AL)); | |||
10921 | ||||
10922 | if (NumLPads < 256) { | |||
10923 | BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) | |||
10924 | .addReg(NewVReg1) | |||
10925 | .addImm(NumLPads) | |||
10926 | .add(predOps(ARMCC::AL)); | |||
10927 | } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { | |||
10928 | Register VReg1 = MRI->createVirtualRegister(TRC); | |||
10929 | BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) | |||
10930 | .addImm(NumLPads & 0xFFFF) | |||
10931 | .add(predOps(ARMCC::AL)); | |||
10932 | ||||
10933 | unsigned VReg2 = VReg1; | |||
10934 | if ((NumLPads & 0xFFFF0000) != 0) { | |||
10935 | VReg2 = MRI->createVirtualRegister(TRC); | |||
10936 | BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) | |||
10937 | .addReg(VReg1) | |||
10938 | .addImm(NumLPads >> 16) | |||
10939 | .add(predOps(ARMCC::AL)); | |||
10940 | } | |||
10941 | ||||
10942 | BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) | |||
10943 | .addReg(NewVReg1) | |||
10944 | .addReg(VReg2) | |||
10945 | .add(predOps(ARMCC::AL)); | |||
10946 | } else { | |||
10947 | MachineConstantPool *ConstantPool = MF->getConstantPool(); | |||
10948 | Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); | |||
10949 | const Constant *C = ConstantInt::get(Int32Ty, NumLPads); | |||
10950 | ||||
10951 | // MachineConstantPool wants an explicit alignment. | |||
10952 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty); | |||
10953 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); | |||
10954 | ||||
10955 | Register VReg1 = MRI->createVirtualRegister(TRC); | |||
10956 | BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) | |||
10957 | .addReg(VReg1, RegState::Define) | |||
10958 | .addConstantPoolIndex(Idx) | |||
10959 | .addImm(0) | |||
10960 | .add(predOps(ARMCC::AL)); | |||
10961 | BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) | |||
10962 | .addReg(NewVReg1) | |||
10963 | .addReg(VReg1, RegState::Kill) | |||
10964 | .add(predOps(ARMCC::AL)); | |||
10965 | } | |||
10966 | ||||
10967 | BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) | |||
10968 | .addMBB(TrapBB) | |||
10969 | .addImm(ARMCC::HI) | |||
10970 | .addReg(ARM::CPSR); | |||
10971 | ||||
10972 | Register NewVReg3 = MRI->createVirtualRegister(TRC); | |||
10973 | BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) | |||
10974 | .addReg(NewVReg1) | |||
10975 | .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)) | |||
10976 | .add(predOps(ARMCC::AL)) | |||
10977 | .add(condCodeOp()); | |||
10978 | Register NewVReg4 = MRI->createVirtualRegister(TRC); | |||
10979 | BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) | |||
10980 | .addJumpTableIndex(MJTI) | |||
10981 | .add(predOps(ARMCC::AL)); | |||
10982 | ||||
10983 | MachineMemOperand *JTMMOLd = | |||
10984 | MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF), | |||
10985 | MachineMemOperand::MOLoad, 4, Align(4)); | |||
10986 | Register NewVReg5 = MRI->createVirtualRegister(TRC); | |||
10987 | BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) | |||
10988 | .addReg(NewVReg3, RegState::Kill) | |||
10989 | .addReg(NewVReg4) | |||
10990 | .addImm(0) | |||
10991 | .addMemOperand(JTMMOLd) | |||
10992 | .add(predOps(ARMCC::AL)); | |||
10993 | ||||
10994 | if (IsPositionIndependent) { | |||
10995 | BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) | |||
10996 | .addReg(NewVReg5, RegState::Kill) | |||
10997 | .addReg(NewVReg4) | |||
10998 | .addJumpTableIndex(MJTI); | |||
10999 | } else { | |||
11000 | BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr)) | |||
11001 | .addReg(NewVReg5, RegState::Kill) | |||
11002 | .addJumpTableIndex(MJTI); | |||
11003 | } | |||
11004 | } | |||
11005 | ||||
11006 | // Add the jump table entries as successors to the MBB. | |||
11007 | SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; | |||
11008 | for (MachineBasicBlock *CurMBB : LPadList) { | |||
11009 | if (SeenMBBs.insert(CurMBB).second) | |||
11010 | DispContBB->addSuccessor(CurMBB); | |||
11011 | } | |||
11012 | ||||
11013 | // N.B. the order the invoke BBs are processed in doesn't matter here. | |||
11014 | const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF); | |||
11015 | SmallVector<MachineBasicBlock*, 64> MBBLPads; | |||
11016 | for (MachineBasicBlock *BB : InvokeBBs) { | |||
11017 | ||||
11018 | // Remove the landing pad successor from the invoke block and replace it | |||
11019 | // with the new dispatch block. | |||
11020 | SmallVector<MachineBasicBlock*, 4> Successors(BB->successors()); | |||
11021 | while (!Successors.empty()) { | |||
11022 | MachineBasicBlock *SMBB = Successors.pop_back_val(); | |||
11023 | if (SMBB->isEHPad()) { | |||
11024 | BB->removeSuccessor(SMBB); | |||
11025 | MBBLPads.push_back(SMBB); | |||
11026 | } | |||
11027 | } | |||
11028 | ||||
11029 | BB->addSuccessor(DispatchBB, BranchProbability::getZero()); | |||
11030 | BB->normalizeSuccProbs(); | |||
11031 | ||||
11032 | // Find the invoke call and mark all of the callee-saved registers as | |||
11033 | // 'implicit defined' so that they're spilled. This prevents code from | |||
11034 | // moving instructions to before the EH block, where they will never be | |||
11035 | // executed. | |||
11036 | for (MachineBasicBlock::reverse_iterator | |||
11037 | II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { | |||
11038 | if (!II->isCall()) continue; | |||
11039 | ||||
11040 | DenseMap<unsigned, bool> DefRegs; | |||
11041 | for (MachineInstr::mop_iterator | |||
11042 | OI = II->operands_begin(), OE = II->operands_end(); | |||
11043 | OI != OE; ++OI) { | |||
11044 | if (!OI->isReg()) continue; | |||
11045 | DefRegs[OI->getReg()] = true; | |||
11046 | } | |||
11047 | ||||
11048 | MachineInstrBuilder MIB(*MF, &*II); | |||
11049 | ||||
11050 | for (unsigned i = 0; SavedRegs[i] != 0; ++i) { | |||
11051 | unsigned Reg = SavedRegs[i]; | |||
11052 | if (Subtarget->isThumb2() && | |||
11053 | !ARM::tGPRRegClass.contains(Reg) && | |||
11054 | !ARM::hGPRRegClass.contains(Reg)) | |||
11055 | continue; | |||
11056 | if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) | |||
11057 | continue; | |||
11058 | if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) | |||
11059 | continue; | |||
11060 | if (!DefRegs[Reg]) | |||
11061 | MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); | |||
11062 | } | |||
11063 | ||||
11064 | break; | |||
11065 | } | |||
11066 | } | |||
11067 | ||||
11068 | // Mark all former landing pads as non-landing pads. The dispatch is the only | |||
11069 | // landing pad now. | |||
11070 | for (MachineBasicBlock *MBBLPad : MBBLPads) | |||
11071 | MBBLPad->setIsEHPad(false); | |||
11072 | ||||
11073 | // The instruction is gone now. | |||
11074 | MI.eraseFromParent(); | |||
11075 | } | |||
11076 | ||||
11077 | static | |||
11078 | MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { | |||
11079 | for (MachineBasicBlock *S : MBB->successors()) | |||
11080 | if (S != Succ) | |||
11081 | return S; | |||
11082 | llvm_unreachable("Expecting a BB with two successors!")::llvm::llvm_unreachable_internal("Expecting a BB with two successors!" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 11082); | |||
11083 | } | |||
11084 | ||||
11085 | /// Return the load opcode for a given load size. If load size >= 8, | |||
11086 | /// neon opcode will be returned. | |||
11087 | static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) { | |||
11088 | if (LdSize >= 8) | |||
11089 | return LdSize == 16 ? ARM::VLD1q32wb_fixed | |||
11090 | : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0; | |||
11091 | if (IsThumb1) | |||
11092 | return LdSize == 4 ? ARM::tLDRi | |||
11093 | : LdSize == 2 ? ARM::tLDRHi | |||
11094 | : LdSize == 1 ? ARM::tLDRBi : 0; | |||
11095 | if (IsThumb2) | |||
11096 | return LdSize == 4 ? ARM::t2LDR_POST | |||
11097 | : LdSize == 2 ? ARM::t2LDRH_POST | |||
11098 | : LdSize == 1 ? ARM::t2LDRB_POST : 0; | |||
11099 | return LdSize == 4 ? ARM::LDR_POST_IMM | |||
11100 | : LdSize == 2 ? ARM::LDRH_POST | |||
11101 | : LdSize == 1 ? ARM::LDRB_POST_IMM : 0; | |||
11102 | } | |||
11103 | ||||
11104 | /// Return the store opcode for a given store size. If store size >= 8, | |||
11105 | /// neon opcode will be returned. | |||
11106 | static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) { | |||
11107 | if (StSize >= 8) | |||
11108 | return StSize == 16 ? ARM::VST1q32wb_fixed | |||
11109 | : StSize == 8 ? ARM::VST1d32wb_fixed : 0; | |||
11110 | if (IsThumb1) | |||
11111 | return StSize == 4 ? ARM::tSTRi | |||
11112 | : StSize == 2 ? ARM::tSTRHi | |||
11113 | : StSize == 1 ? ARM::tSTRBi : 0; | |||
11114 | if (IsThumb2) | |||
11115 | return StSize == 4 ? ARM::t2STR_POST | |||
11116 | : StSize == 2 ? ARM::t2STRH_POST | |||
11117 | : StSize == 1 ? ARM::t2STRB_POST : 0; | |||
11118 | return StSize == 4 ? ARM::STR_POST_IMM | |||
11119 | : StSize == 2 ? ARM::STRH_POST | |||
11120 | : StSize == 1 ? ARM::STRB_POST_IMM : 0; | |||
11121 | } | |||
11122 | ||||
11123 | /// Emit a post-increment load operation with given size. The instructions | |||
11124 | /// will be added to BB at Pos. | |||
11125 | static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, | |||
11126 | const TargetInstrInfo *TII, const DebugLoc &dl, | |||
11127 | unsigned LdSize, unsigned Data, unsigned AddrIn, | |||
11128 | unsigned AddrOut, bool IsThumb1, bool IsThumb2) { | |||
11129 | unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2); | |||
11130 | assert(LdOpc != 0 && "Should have a load opcode")(static_cast <bool> (LdOpc != 0 && "Should have a load opcode" ) ? void (0) : __assert_fail ("LdOpc != 0 && \"Should have a load opcode\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 11130, __extension__ __PRETTY_FUNCTION__)); | |||
11131 | if (LdSize >= 8) { | |||
11132 | BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) | |||
11133 | .addReg(AddrOut, RegState::Define) | |||
11134 | .addReg(AddrIn) | |||
11135 | .addImm(0) | |||
11136 | .add(predOps(ARMCC::AL)); | |||
11137 | } else if (IsThumb1) { | |||
11138 | // load + update AddrIn | |||
11139 | BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) | |||
11140 | .addReg(AddrIn) | |||
11141 | .addImm(0) | |||
11142 | .add(predOps(ARMCC::AL)); | |||
11143 | BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut) | |||
11144 | .add(t1CondCodeOp()) | |||
11145 | .addReg(AddrIn) | |||
11146 | .addImm(LdSize) | |||
11147 | .add(predOps(ARMCC::AL)); | |||
11148 | } else if (IsThumb2) { | |||
11149 | BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) | |||
11150 | .addReg(AddrOut, RegState::Define) | |||
11151 | .addReg(AddrIn) | |||
11152 | .addImm(LdSize) | |||
11153 | .add(predOps(ARMCC::AL)); | |||
11154 | } else { // arm | |||
11155 | BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) | |||
11156 | .addReg(AddrOut, RegState::Define) | |||
11157 | .addReg(AddrIn) | |||
11158 | .addReg(0) | |||
11159 | .addImm(LdSize) | |||
11160 | .add(predOps(ARMCC::AL)); | |||
11161 | } | |||
11162 | } | |||
11163 | ||||
11164 | /// Emit a post-increment store operation with given size. The instructions | |||
11165 | /// will be added to BB at Pos. | |||
11166 | static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, | |||
11167 | const TargetInstrInfo *TII, const DebugLoc &dl, | |||
11168 | unsigned StSize, unsigned Data, unsigned AddrIn, | |||
11169 | unsigned AddrOut, bool IsThumb1, bool IsThumb2) { | |||
11170 | unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2); | |||
11171 | assert(StOpc != 0 && "Should have a store opcode")(static_cast <bool> (StOpc != 0 && "Should have a store opcode" ) ? void (0) : __assert_fail ("StOpc != 0 && \"Should have a store opcode\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 11171, __extension__ __PRETTY_FUNCTION__)); | |||
11172 | if (StSize >= 8) { | |||
11173 | BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) | |||
11174 | .addReg(AddrIn) | |||
11175 | .addImm(0) | |||
11176 | .addReg(Data) | |||
11177 | .add(predOps(ARMCC::AL)); | |||
11178 | } else if (IsThumb1) { | |||
11179 | // store + update AddrIn | |||
11180 | BuildMI(*BB, Pos, dl, TII->get(StOpc)) | |||
11181 | .addReg(Data) | |||
11182 | .addReg(AddrIn) | |||
11183 | .addImm(0) | |||
11184 | .add(predOps(ARMCC::AL)); | |||
11185 | BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut) | |||
11186 | .add(t1CondCodeOp()) | |||
11187 | .addReg(AddrIn) | |||
11188 | .addImm(StSize) | |||
11189 | .add(predOps(ARMCC::AL)); | |||
11190 | } else if (IsThumb2) { | |||
11191 | BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) | |||
11192 | .addReg(Data) | |||
11193 | .addReg(AddrIn) | |||
11194 | .addImm(StSize) | |||
11195 | .add(predOps(ARMCC::AL)); | |||
11196 | } else { // arm | |||
11197 | BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) | |||
11198 | .addReg(Data) | |||
11199 | .addReg(AddrIn) | |||
11200 | .addReg(0) | |||
11201 | .addImm(StSize) | |||
11202 | .add(predOps(ARMCC::AL)); | |||
11203 | } | |||
11204 | } | |||
11205 | ||||
11206 | MachineBasicBlock * | |||
11207 | ARMTargetLowering::EmitStructByval(MachineInstr &MI, | |||
11208 | MachineBasicBlock *BB) const { | |||
11209 | // This pseudo instruction has 3 operands: dst, src, size | |||
11210 | // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). | |||
11211 | // Otherwise, we will generate unrolled scalar copies. | |||
11212 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | |||
11213 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); | |||
11214 | MachineFunction::iterator It = ++BB->getIterator(); | |||
11215 | ||||
11216 | Register dest = MI.getOperand(0).getReg(); | |||
11217 | Register src = MI.getOperand(1).getReg(); | |||
11218 | unsigned SizeVal = MI.getOperand(2).getImm(); | |||
11219 | unsigned Alignment = MI.getOperand(3).getImm(); | |||
11220 | DebugLoc dl = MI.getDebugLoc(); | |||
11221 | ||||
11222 | MachineFunction *MF = BB->getParent(); | |||
11223 | MachineRegisterInfo &MRI = MF->getRegInfo(); | |||
11224 | unsigned UnitSize = 0; | |||
11225 | const TargetRegisterClass *TRC = nullptr; | |||
11226 | const TargetRegisterClass *VecTRC = nullptr; | |||
11227 | ||||
11228 | bool IsThumb1 = Subtarget->isThumb1Only(); | |||
11229 | bool IsThumb2 = Subtarget->isThumb2(); | |||
11230 | bool IsThumb = Subtarget->isThumb(); | |||
11231 | ||||
11232 | if (Alignment & 1) { | |||
11233 | UnitSize = 1; | |||
11234 | } else if (Alignment & 2) { | |||
11235 | UnitSize = 2; | |||
11236 | } else { | |||
11237 | // Check whether we can use NEON instructions. | |||
11238 | if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) && | |||
11239 | Subtarget->hasNEON()) { | |||
11240 | if ((Alignment % 16 == 0) && SizeVal >= 16) | |||
11241 | UnitSize = 16; | |||
11242 | else if ((Alignment % 8 == 0) && SizeVal >= 8) | |||
11243 | UnitSize = 8; | |||
11244 | } | |||
11245 | // Can't use NEON instructions. | |||
11246 | if (UnitSize == 0) | |||
11247 | UnitSize = 4; | |||
11248 | } | |||
11249 | ||||
11250 | // Select the correct opcode and register class for unit size load/store | |||
11251 | bool IsNeon = UnitSize >= 8; | |||
11252 | TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass; | |||
11253 | if (IsNeon) | |||
11254 | VecTRC = UnitSize == 16 ? &ARM::DPairRegClass | |||
11255 | : UnitSize == 8 ? &ARM::DPRRegClass | |||
11256 | : nullptr; | |||
11257 | ||||
11258 | unsigned BytesLeft = SizeVal % UnitSize; | |||
11259 | unsigned LoopSize = SizeVal - BytesLeft; | |||
11260 | ||||
11261 | if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { | |||
11262 | // Use LDR and STR to copy. | |||
11263 | // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) | |||
11264 | // [destOut] = STR_POST(scratch, destIn, UnitSize) | |||
11265 | unsigned srcIn = src; | |||
11266 | unsigned destIn = dest; | |||
11267 | for (unsigned i = 0; i < LoopSize; i+=UnitSize) { | |||
11268 | Register srcOut = MRI.createVirtualRegister(TRC); | |||
11269 | Register destOut = MRI.createVirtualRegister(TRC); | |||
11270 | Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); | |||
11271 | emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut, | |||
11272 | IsThumb1, IsThumb2); | |||
11273 | emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut, | |||
11274 | IsThumb1, IsThumb2); | |||
11275 | srcIn = srcOut; | |||
11276 | destIn = destOut; | |||
11277 | } | |||
11278 | ||||
11279 | // Handle the leftover bytes with LDRB and STRB. | |||
11280 | // [scratch, srcOut] = LDRB_POST(srcIn, 1) | |||
11281 | // [destOut] = STRB_POST(scratch, destIn, 1) | |||
11282 | for (unsigned i = 0; i < BytesLeft; i++) { | |||
11283 | Register srcOut = MRI.createVirtualRegister(TRC); | |||
11284 | Register destOut = MRI.createVirtualRegister(TRC); | |||
11285 | Register scratch = MRI.createVirtualRegister(TRC); | |||
11286 | emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut, | |||
11287 | IsThumb1, IsThumb2); | |||
11288 | emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut, | |||
11289 | IsThumb1, IsThumb2); | |||
11290 | srcIn = srcOut; | |||
11291 | destIn = destOut; | |||
11292 | } | |||
11293 | MI.eraseFromParent(); // The instruction is gone now. | |||
11294 | return BB; | |||
11295 | } | |||
11296 | ||||
11297 | // Expand the pseudo op to a loop. | |||
11298 | // thisMBB: | |||
11299 | // ... | |||
11300 | // movw varEnd, # --> with thumb2 | |||
11301 | // movt varEnd, # | |||
11302 | // ldrcp varEnd, idx --> without thumb2 | |||
11303 | // fallthrough --> loopMBB | |||
11304 | // loopMBB: | |||
11305 | // PHI varPhi, varEnd, varLoop | |||
11306 | // PHI srcPhi, src, srcLoop | |||
11307 | // PHI destPhi, dst, destLoop | |||
11308 | // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) | |||
11309 | // [destLoop] = STR_POST(scratch, destPhi, UnitSize) | |||
11310 | // subs varLoop, varPhi, #UnitSize | |||
11311 | // bne loopMBB | |||
11312 | // fallthrough --> exitMBB | |||
11313 | // exitMBB: | |||
11314 | // epilogue to handle left-over bytes | |||
11315 | // [scratch, srcOut] = LDRB_POST(srcLoop, 1) | |||
11316 | // [destOut] = STRB_POST(scratch, destLoop, 1) | |||
11317 | MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); | |||
11318 | MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); | |||
11319 | MF->insert(It, loopMBB); | |||
11320 | MF->insert(It, exitMBB); | |||
11321 | ||||
11322 | // Transfer the remainder of BB and its successor edges to exitMBB. | |||
11323 | exitMBB->splice(exitMBB->begin(), BB, | |||
11324 | std::next(MachineBasicBlock::iterator(MI)), BB->end()); | |||
11325 | exitMBB->transferSuccessorsAndUpdatePHIs(BB); | |||
11326 | ||||
11327 | // Load an immediate to varEnd. | |||
11328 | Register varEnd = MRI.createVirtualRegister(TRC); | |||
11329 | if (Subtarget->useMovt()) { | |||
11330 | unsigned Vtmp = varEnd; | |||
11331 | if ((LoopSize & 0xFFFF0000) != 0) | |||
11332 | Vtmp = MRI.createVirtualRegister(TRC); | |||
11333 | BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp) | |||
11334 | .addImm(LoopSize & 0xFFFF) | |||
11335 | .add(predOps(ARMCC::AL)); | |||
11336 | ||||
11337 | if ((LoopSize & 0xFFFF0000) != 0) | |||
11338 | BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd) | |||
11339 | .addReg(Vtmp) | |||
11340 | .addImm(LoopSize >> 16) | |||
11341 | .add(predOps(ARMCC::AL)); | |||
11342 | } else { | |||
11343 | MachineConstantPool *ConstantPool = MF->getConstantPool(); | |||
11344 | Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); | |||
11345 | const Constant *C = ConstantInt::get(Int32Ty, LoopSize); | |||
11346 | ||||
11347 | // MachineConstantPool wants an explicit alignment. | |||
11348 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty); | |||
11349 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); | |||
11350 | MachineMemOperand *CPMMO = | |||
11351 | MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), | |||
11352 | MachineMemOperand::MOLoad, 4, Align(4)); | |||
11353 | ||||
11354 | if (IsThumb) | |||
11355 | BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)) | |||
11356 | .addReg(varEnd, RegState::Define) | |||
11357 | .addConstantPoolIndex(Idx) | |||
11358 | .add(predOps(ARMCC::AL)) | |||
11359 | .addMemOperand(CPMMO); | |||
11360 | else | |||
11361 | BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp)) | |||
11362 | .addReg(varEnd, RegState::Define) | |||
11363 | .addConstantPoolIndex(Idx) | |||
11364 | .addImm(0) | |||
11365 | .add(predOps(ARMCC::AL)) | |||
11366 | .addMemOperand(CPMMO); | |||
11367 | } | |||
11368 | BB->addSuccessor(loopMBB); | |||
11369 | ||||
11370 | // Generate the loop body: | |||
11371 | // varPhi = PHI(varLoop, varEnd) | |||
11372 | // srcPhi = PHI(srcLoop, src) | |||
11373 | // destPhi = PHI(destLoop, dst) | |||
11374 | MachineBasicBlock *entryBB = BB; | |||
11375 | BB = loopMBB; | |||
11376 | Register varLoop = MRI.createVirtualRegister(TRC); | |||
11377 | Register varPhi = MRI.createVirtualRegister(TRC); | |||
11378 | Register srcLoop = MRI.createVirtualRegister(TRC); | |||
11379 | Register srcPhi = MRI.createVirtualRegister(TRC); | |||
11380 | Register destLoop = MRI.createVirtualRegister(TRC); | |||
11381 | Register destPhi = MRI.createVirtualRegister(TRC); | |||
11382 | ||||
11383 | BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi) | |||
11384 | .addReg(varLoop).addMBB(loopMBB) | |||
11385 | .addReg(varEnd).addMBB(entryBB); | |||
11386 | BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi) | |||
11387 | .addReg(srcLoop).addMBB(loopMBB) | |||
11388 | .addReg(src).addMBB(entryBB); | |||
11389 | BuildMI(BB, dl, TII->get(ARM::PHI), destPhi) | |||
11390 | .addReg(destLoop).addMBB(loopMBB) | |||
11391 | .addReg(dest).addMBB(entryBB); | |||
11392 | ||||
11393 | // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) | |||
11394 | // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) | |||
11395 | Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); | |||
11396 | emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop, | |||
11397 | IsThumb1, IsThumb2); | |||
11398 | emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop, | |||
11399 | IsThumb1, IsThumb2); | |||
11400 | ||||
11401 | // Decrement loop variable by UnitSize. | |||
11402 | if (IsThumb1) { | |||
11403 | BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop) | |||
11404 | .add(t1CondCodeOp()) | |||
11405 | .addReg(varPhi) | |||
11406 | .addImm(UnitSize) | |||
11407 | .add(predOps(ARMCC::AL)); | |||
11408 | } else { | |||
11409 | MachineInstrBuilder MIB = | |||
11410 | BuildMI(*BB, BB->end(), dl, | |||
11411 | TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop); | |||
11412 | MIB.addReg(varPhi) | |||
11413 | .addImm(UnitSize) | |||
11414 | .add(predOps(ARMCC::AL)) | |||
11415 | .add(condCodeOp()); | |||
11416 | MIB->getOperand(5).setReg(ARM::CPSR); | |||
11417 | MIB->getOperand(5).setIsDef(true); | |||
11418 | } | |||
11419 | BuildMI(*BB, BB->end(), dl, | |||
11420 | TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc)) | |||
11421 | .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); | |||
11422 | ||||
11423 | // loopMBB can loop back to loopMBB or fall through to exitMBB. | |||
11424 | BB->addSuccessor(loopMBB); | |||
11425 | BB->addSuccessor(exitMBB); | |||
11426 | ||||
11427 | // Add epilogue to handle BytesLeft. | |||
11428 | BB = exitMBB; | |||
11429 | auto StartOfExit = exitMBB->begin(); | |||
11430 | ||||
11431 | // [scratch, srcOut] = LDRB_POST(srcLoop, 1) | |||
11432 | // [destOut] = STRB_POST(scratch, destLoop, 1) | |||
11433 | unsigned srcIn = srcLoop; | |||
11434 | unsigned destIn = destLoop; | |||
11435 | for (unsigned i = 0; i < BytesLeft; i++) { | |||
11436 | Register srcOut = MRI.createVirtualRegister(TRC); | |||
11437 | Register destOut = MRI.createVirtualRegister(TRC); | |||
11438 | Register scratch = MRI.createVirtualRegister(TRC); | |||
11439 | emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut, | |||
11440 | IsThumb1, IsThumb2); | |||
11441 | emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut, | |||
11442 | IsThumb1, IsThumb2); | |||
11443 | srcIn = srcOut; | |||
11444 | destIn = destOut; | |||
11445 | } | |||
11446 | ||||
11447 | MI.eraseFromParent(); // The instruction is gone now. | |||
11448 | return BB; | |||
11449 | } | |||
11450 | ||||
11451 | MachineBasicBlock * | |||
11452 | ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI, | |||
11453 | MachineBasicBlock *MBB) const { | |||
11454 | const TargetMachine &TM = getTargetMachine(); | |||
11455 | const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); | |||
11456 | DebugLoc DL = MI.getDebugLoc(); | |||
11457 | ||||
11458 | assert(Subtarget->isTargetWindows() &&(static_cast <bool> (Subtarget->isTargetWindows() && "__chkstk is only supported on Windows") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"__chkstk is only supported on Windows\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 11459, __extension__ __PRETTY_FUNCTION__)) | |||
11459 | "__chkstk is only supported on Windows")(static_cast <bool> (Subtarget->isTargetWindows() && "__chkstk is only supported on Windows") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"__chkstk is only supported on Windows\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 11459, __extension__ __PRETTY_FUNCTION__)); | |||
11460 | assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode")(static_cast <bool> (Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode") ? void (0) : __assert_fail ("Subtarget->isThumb2() && \"Windows on ARM requires Thumb-2 mode\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 11460, __extension__ __PRETTY_FUNCTION__)); | |||
11461 | ||||
11462 | // __chkstk takes the number of words to allocate on the stack in R4, and | |||
11463 | // returns the stack adjustment in number of bytes in R4. This will not | |||
11464 | // clober any other registers (other than the obvious lr). | |||
11465 | // | |||
11466 | // Although, technically, IP should be considered a register which may be | |||
11467 | // clobbered, the call itself will not touch it. Windows on ARM is a pure | |||
11468 | // thumb-2 environment, so there is no interworking required. As a result, we | |||
11469 | // do not expect a veneer to be emitted by the linker, clobbering IP. | |||
11470 | // | |||
11471 | // Each module receives its own copy of __chkstk, so no import thunk is | |||
11472 | // required, again, ensuring that IP is not clobbered. | |||
11473 | // | |||
11474 | // Finally, although some linkers may theoretically provide a trampoline for | |||
11475 | // out of range calls (which is quite common due to a 32M range limitation of | |||
11476 | // branches for Thumb), we can generate the long-call version via | |||
11477 | // -mcmodel=large, alleviating the need for the trampoline which may clobber | |||
11478 | // IP. | |||
11479 | ||||
11480 | switch (TM.getCodeModel()) { | |||
11481 | case CodeModel::Tiny: | |||
11482 | llvm_unreachable("Tiny code model not available on ARM.")::llvm::llvm_unreachable_internal("Tiny code model not available on ARM." , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 11482); | |||
11483 | case CodeModel::Small: | |||
11484 | case CodeModel::Medium: | |||
11485 | case CodeModel::Kernel: | |||
11486 | BuildMI(*MBB, MI, DL, TII.get(ARM::tBL)) | |||
11487 | .add(predOps(ARMCC::AL)) | |||
11488 | .addExternalSymbol("__chkstk") | |||
11489 | .addReg(ARM::R4, RegState::Implicit | RegState::Kill) | |||
11490 | .addReg(ARM::R4, RegState::Implicit | RegState::Define) | |||
11491 | .addReg(ARM::R12, | |||
11492 | RegState::Implicit | RegState::Define | RegState::Dead) | |||
11493 | .addReg(ARM::CPSR, | |||
11494 | RegState::Implicit | RegState::Define | RegState::Dead); | |||
11495 | break; | |||
11496 | case CodeModel::Large: { | |||
11497 | MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); | |||
11498 | Register Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass); | |||
11499 | ||||
11500 | BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg) | |||
11501 | .addExternalSymbol("__chkstk"); | |||
11502 | BuildMI(*MBB, MI, DL, TII.get(gettBLXrOpcode(*MBB->getParent()))) | |||
11503 | .add(predOps(ARMCC::AL)) | |||
11504 | .addReg(Reg, RegState::Kill) | |||
11505 | .addReg(ARM::R4, RegState::Implicit | RegState::Kill) | |||
11506 | .addReg(ARM::R4, RegState::Implicit | RegState::Define) | |||
11507 | .addReg(ARM::R12, | |||
11508 | RegState::Implicit | RegState::Define | RegState::Dead) | |||
11509 | .addReg(ARM::CPSR, | |||
11510 | RegState::Implicit | RegState::Define | RegState::Dead); | |||
11511 | break; | |||
11512 | } | |||
11513 | } | |||
11514 | ||||
11515 | BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP) | |||
11516 | .addReg(ARM::SP, RegState::Kill) | |||
11517 | .addReg(ARM::R4, RegState::Kill) | |||
11518 | .setMIFlags(MachineInstr::FrameSetup) | |||
11519 | .add(predOps(ARMCC::AL)) | |||
11520 | .add(condCodeOp()); | |||
11521 | ||||
11522 | MI.eraseFromParent(); | |||
11523 | return MBB; | |||
11524 | } | |||
11525 | ||||
11526 | MachineBasicBlock * | |||
11527 | ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI, | |||
11528 | MachineBasicBlock *MBB) const { | |||
11529 | DebugLoc DL = MI.getDebugLoc(); | |||
11530 | MachineFunction *MF = MBB->getParent(); | |||
11531 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | |||
11532 | ||||
11533 | MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock(); | |||
11534 | MF->insert(++MBB->getIterator(), ContBB); | |||
11535 | ContBB->splice(ContBB->begin(), MBB, | |||
11536 | std::next(MachineBasicBlock::iterator(MI)), MBB->end()); | |||
11537 | ContBB->transferSuccessorsAndUpdatePHIs(MBB); | |||
11538 | MBB->addSuccessor(ContBB); | |||
11539 | ||||
11540 | MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); | |||
11541 | BuildMI(TrapBB, DL, TII->get(ARM::t__brkdiv0)); | |||
11542 | MF->push_back(TrapBB); | |||
11543 | MBB->addSuccessor(TrapBB); | |||
11544 | ||||
11545 | BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8)) | |||
11546 | .addReg(MI.getOperand(0).getReg()) | |||
11547 | .addImm(0) | |||
11548 | .add(predOps(ARMCC::AL)); | |||
11549 | BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc)) | |||
11550 | .addMBB(TrapBB) | |||
11551 | .addImm(ARMCC::EQ) | |||
11552 | .addReg(ARM::CPSR); | |||
11553 | ||||
11554 | MI.eraseFromParent(); | |||
11555 | return ContBB; | |||
11556 | } | |||
11557 | ||||
11558 | // The CPSR operand of SelectItr might be missing a kill marker | |||
11559 | // because there were multiple uses of CPSR, and ISel didn't know | |||
11560 | // which to mark. Figure out whether SelectItr should have had a | |||
11561 | // kill marker, and set it if it should. Returns the correct kill | |||
11562 | // marker value. | |||
11563 | static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr, | |||
11564 | MachineBasicBlock* BB, | |||
11565 | const TargetRegisterInfo* TRI) { | |||
11566 | // Scan forward through BB for a use/def of CPSR. | |||
11567 | MachineBasicBlock::iterator miI(std::next(SelectItr)); | |||
11568 | for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { | |||
11569 | const MachineInstr& mi = *miI; | |||
11570 | if (mi.readsRegister(ARM::CPSR)) | |||
11571 | return false; | |||
11572 | if (mi.definesRegister(ARM::CPSR)) | |||
11573 | break; // Should have kill-flag - update below. | |||
11574 | } | |||
11575 | ||||
11576 | // If we hit the end of the block, check whether CPSR is live into a | |||
11577 | // successor. | |||
11578 | if (miI == BB->end()) { | |||
11579 | for (MachineBasicBlock *Succ : BB->successors()) | |||
11580 | if (Succ->isLiveIn(ARM::CPSR)) | |||
11581 | return false; | |||
11582 | } | |||
11583 | ||||
11584 | // We found a def, or hit the end of the basic block and CPSR wasn't live | |||
11585 | // out. SelectMI should have a kill flag on CPSR. | |||
11586 | SelectItr->addRegisterKilled(ARM::CPSR, TRI); | |||
11587 | return true; | |||
11588 | } | |||
11589 | ||||
11590 | /// Adds logic in loop entry MBB to calculate loop iteration count and adds | |||
11591 | /// t2WhileLoopSetup and t2WhileLoopStart to generate WLS loop | |||
11592 | static Register genTPEntry(MachineBasicBlock *TpEntry, | |||
11593 | MachineBasicBlock *TpLoopBody, | |||
11594 | MachineBasicBlock *TpExit, Register OpSizeReg, | |||
11595 | const TargetInstrInfo *TII, DebugLoc Dl, | |||
11596 | MachineRegisterInfo &MRI) { | |||
11597 | // Calculates loop iteration count = ceil(n/16) = (n + 15) >> 4. | |||
11598 | Register AddDestReg = MRI.createVirtualRegister(&ARM::rGPRRegClass); | |||
11599 | BuildMI(TpEntry, Dl, TII->get(ARM::t2ADDri), AddDestReg) | |||
11600 | .addUse(OpSizeReg) | |||
11601 | .addImm(15) | |||
11602 | .add(predOps(ARMCC::AL)) | |||
11603 | .addReg(0); | |||
11604 | ||||
11605 | Register LsrDestReg = MRI.createVirtualRegister(&ARM::rGPRRegClass); | |||
11606 | BuildMI(TpEntry, Dl, TII->get(ARM::t2LSRri), LsrDestReg) | |||
11607 | .addUse(AddDestReg, RegState::Kill) | |||
11608 | .addImm(4) | |||
11609 | .add(predOps(ARMCC::AL)) | |||
11610 | .addReg(0); | |||
11611 | ||||
11612 | Register TotalIterationsReg = MRI.createVirtualRegister(&ARM::GPRlrRegClass); | |||
11613 | BuildMI(TpEntry, Dl, TII->get(ARM::t2WhileLoopSetup), TotalIterationsReg) | |||
11614 | .addUse(LsrDestReg, RegState::Kill); | |||
11615 | ||||
11616 | BuildMI(TpEntry, Dl, TII->get(ARM::t2WhileLoopStart)) | |||
11617 | .addUse(TotalIterationsReg) | |||
11618 | .addMBB(TpExit); | |||
11619 | ||||
11620 | BuildMI(TpEntry, Dl, TII->get(ARM::t2B)) | |||
11621 | .addMBB(TpLoopBody) | |||
11622 | .add(predOps(ARMCC::AL)); | |||
11623 | ||||
11624 | return TotalIterationsReg; | |||
11625 | } | |||
11626 | ||||
11627 | /// Adds logic in the loopBody MBB to generate MVE_VCTP, t2DoLoopDec and | |||
11628 | /// t2DoLoopEnd. These are used by later passes to generate tail predicated | |||
11629 | /// loops. | |||
11630 | static void genTPLoopBody(MachineBasicBlock *TpLoopBody, | |||
11631 | MachineBasicBlock *TpEntry, MachineBasicBlock *TpExit, | |||
11632 | const TargetInstrInfo *TII, DebugLoc Dl, | |||
11633 | MachineRegisterInfo &MRI, Register OpSrcReg, | |||
11634 | Register OpDestReg, Register ElementCountReg, | |||
11635 | Register TotalIterationsReg, bool IsMemcpy) { | |||
11636 | // First insert 4 PHI nodes for: Current pointer to Src (if memcpy), Dest | |||
11637 | // array, loop iteration counter, predication counter. | |||
11638 | ||||
11639 | Register SrcPhiReg, CurrSrcReg; | |||
11640 | if (IsMemcpy) { | |||
11641 | // Current position in the src array | |||
11642 | SrcPhiReg = MRI.createVirtualRegister(&ARM::rGPRRegClass); | |||
11643 | CurrSrcReg = MRI.createVirtualRegister(&ARM::rGPRRegClass); | |||
11644 | BuildMI(TpLoopBody, Dl, TII->get(ARM::PHI), SrcPhiReg) | |||
11645 | .addUse(OpSrcReg) | |||
11646 | .addMBB(TpEntry) | |||
11647 | .addUse(CurrSrcReg) | |||
11648 | .addMBB(TpLoopBody); | |||
11649 | } | |||
11650 | ||||
11651 | // Current position in the dest array | |||
11652 | Register DestPhiReg = MRI.createVirtualRegister(&ARM::rGPRRegClass); | |||
11653 | Register CurrDestReg = MRI.createVirtualRegister(&ARM::rGPRRegClass); | |||
11654 | BuildMI(TpLoopBody, Dl, TII->get(ARM::PHI), DestPhiReg) | |||
11655 | .addUse(OpDestReg) | |||
11656 | .addMBB(TpEntry) | |||
11657 | .addUse(CurrDestReg) | |||
11658 | .addMBB(TpLoopBody); | |||
11659 | ||||
11660 | // Current loop counter | |||
11661 | Register LoopCounterPhiReg = MRI.createVirtualRegister(&ARM::GPRlrRegClass); | |||
11662 | Register RemainingLoopIterationsReg = | |||
11663 | MRI.createVirtualRegister(&ARM::GPRlrRegClass); | |||
11664 | BuildMI(TpLoopBody, Dl, TII->get(ARM::PHI), LoopCounterPhiReg) | |||
11665 | .addUse(TotalIterationsReg) | |||
11666 | .addMBB(TpEntry) | |||
11667 | .addUse(RemainingLoopIterationsReg) | |||
11668 | .addMBB(TpLoopBody); | |||
11669 | ||||
11670 | // Predication counter | |||
11671 | Register PredCounterPhiReg = MRI.createVirtualRegister(&ARM::rGPRRegClass); | |||
11672 | Register RemainingElementsReg = MRI.createVirtualRegister(&ARM::rGPRRegClass); | |||
11673 | BuildMI(TpLoopBody, Dl, TII->get(ARM::PHI), PredCounterPhiReg) | |||
11674 | .addUse(ElementCountReg) | |||
11675 | .addMBB(TpEntry) | |||
11676 | .addUse(RemainingElementsReg) | |||
11677 | .addMBB(TpLoopBody); | |||
11678 | ||||
11679 | // Pass predication counter to VCTP | |||
11680 | Register VccrReg = MRI.createVirtualRegister(&ARM::VCCRRegClass); | |||
11681 | BuildMI(TpLoopBody, Dl, TII->get(ARM::MVE_VCTP8), VccrReg) | |||
11682 | .addUse(PredCounterPhiReg) | |||
11683 | .addImm(ARMVCC::None) | |||
11684 | .addReg(0) | |||
11685 | .addReg(0); | |||
11686 | ||||
11687 | BuildMI(TpLoopBody, Dl, TII->get(ARM::t2SUBri), RemainingElementsReg) | |||
11688 | .addUse(PredCounterPhiReg) | |||
11689 | .addImm(16) | |||
11690 | .add(predOps(ARMCC::AL)) | |||
11691 | .addReg(0); | |||
11692 | ||||
11693 | // VLDRB (only if memcpy) and VSTRB instructions, predicated using VPR | |||
11694 | Register SrcValueReg; | |||
11695 | if (IsMemcpy) { | |||
11696 | SrcValueReg = MRI.createVirtualRegister(&ARM::MQPRRegClass); | |||
11697 | BuildMI(TpLoopBody, Dl, TII->get(ARM::MVE_VLDRBU8_post)) | |||
11698 | .addDef(CurrSrcReg) | |||
11699 | .addDef(SrcValueReg) | |||
11700 | .addReg(SrcPhiReg) | |||
11701 | .addImm(16) | |||
11702 | .addImm(ARMVCC::Then) | |||
11703 | .addUse(VccrReg) | |||
11704 | .addReg(0); | |||
11705 | } else | |||
11706 | SrcValueReg = OpSrcReg; | |||
11707 | ||||
11708 | BuildMI(TpLoopBody, Dl, TII->get(ARM::MVE_VSTRBU8_post)) | |||
11709 | .addDef(CurrDestReg) | |||
11710 | .addUse(SrcValueReg) | |||
11711 | .addReg(DestPhiReg) | |||
11712 | .addImm(16) | |||
11713 | .addImm(ARMVCC::Then) | |||
11714 | .addUse(VccrReg) | |||
11715 | .addReg(0); | |||
11716 | ||||
11717 | // Add the pseudoInstrs for decrementing the loop counter and marking the | |||
11718 | // end:t2DoLoopDec and t2DoLoopEnd | |||
11719 | BuildMI(TpLoopBody, Dl, TII->get(ARM::t2LoopDec), RemainingLoopIterationsReg) | |||
11720 | .addUse(LoopCounterPhiReg) | |||
11721 | .addImm(1); | |||
11722 | ||||
11723 | BuildMI(TpLoopBody, Dl, TII->get(ARM::t2LoopEnd)) | |||
11724 | .addUse(RemainingLoopIterationsReg) | |||
11725 | .addMBB(TpLoopBody); | |||
11726 | ||||
11727 | BuildMI(TpLoopBody, Dl, TII->get(ARM::t2B)) | |||
11728 | .addMBB(TpExit) | |||
11729 | .add(predOps(ARMCC::AL)); | |||
11730 | } | |||
11731 | ||||
11732 | MachineBasicBlock * | |||
11733 | ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, | |||
11734 | MachineBasicBlock *BB) const { | |||
11735 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | |||
11736 | DebugLoc dl = MI.getDebugLoc(); | |||
11737 | bool isThumb2 = Subtarget->isThumb2(); | |||
11738 | switch (MI.getOpcode()) { | |||
11739 | default: { | |||
11740 | MI.print(errs()); | |||
11741 | llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 11741); | |||
11742 | } | |||
11743 | ||||
11744 | // Thumb1 post-indexed loads are really just single-register LDMs. | |||
11745 | case ARM::tLDR_postidx: { | |||
11746 | MachineOperand Def(MI.getOperand(1)); | |||
11747 | BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD)) | |||
11748 | .add(Def) // Rn_wb | |||
11749 | .add(MI.getOperand(2)) // Rn | |||
11750 | .add(MI.getOperand(3)) // PredImm | |||
11751 | .add(MI.getOperand(4)) // PredReg | |||
11752 | .add(MI.getOperand(0)) // Rt | |||
11753 | .cloneMemRefs(MI); | |||
11754 | MI.eraseFromParent(); | |||
11755 | return BB; | |||
11756 | } | |||
11757 | ||||
11758 | case ARM::MVE_MEMCPYLOOPINST: | |||
11759 | case ARM::MVE_MEMSETLOOPINST: { | |||
11760 | ||||
11761 | // Transformation below expands MVE_MEMCPYLOOPINST/MVE_MEMSETLOOPINST Pseudo | |||
11762 | // into a Tail Predicated (TP) Loop. It adds the instructions to calculate | |||
11763 | // the iteration count =ceil(size_in_bytes/16)) in the TP entry block and | |||
11764 | // adds the relevant instructions in the TP loop Body for generation of a | |||
11765 | // WLSTP loop. | |||
11766 | ||||
11767 | // Below is relevant portion of the CFG after the transformation. | |||
11768 | // The Machine Basic Blocks are shown along with branch conditions (in | |||
11769 | // brackets). Note that TP entry/exit MBBs depict the entry/exit of this | |||
11770 | // portion of the CFG and may not necessarily be the entry/exit of the | |||
11771 | // function. | |||
11772 | ||||
11773 | // (Relevant) CFG after transformation: | |||
11774 | // TP entry MBB | |||
11775 | // | | |||
11776 | // |-----------------| | |||
11777 | // (n <= 0) (n > 0) | |||
11778 | // | | | |||
11779 | // | TP loop Body MBB<--| | |||
11780 | // | | | | |||
11781 | // \ |___________| | |||
11782 | // \ / | |||
11783 | // TP exit MBB | |||
11784 | ||||
11785 | MachineFunction *MF = BB->getParent(); | |||
11786 | MachineFunctionProperties &Properties = MF->getProperties(); | |||
11787 | MachineRegisterInfo &MRI = MF->getRegInfo(); | |||
11788 | ||||
11789 | Register OpDestReg = MI.getOperand(0).getReg(); | |||
11790 | Register OpSrcReg = MI.getOperand(1).getReg(); | |||
11791 | Register OpSizeReg = MI.getOperand(2).getReg(); | |||
11792 | ||||
11793 | // Allocate the required MBBs and add to parent function. | |||
11794 | MachineBasicBlock *TpEntry = BB; | |||
11795 | MachineBasicBlock *TpLoopBody = MF->CreateMachineBasicBlock(); | |||
11796 | MachineBasicBlock *TpExit; | |||
11797 | ||||
11798 | MF->push_back(TpLoopBody); | |||
11799 | ||||
11800 | // If any instructions are present in the current block after | |||
11801 | // MVE_MEMCPYLOOPINST or MVE_MEMSETLOOPINST, split the current block and | |||
11802 | // move the instructions into the newly created exit block. If there are no | |||
11803 | // instructions add an explicit branch to the FallThrough block and then | |||
11804 | // split. | |||
11805 | // | |||
11806 | // The split is required for two reasons: | |||
11807 | // 1) A terminator(t2WhileLoopStart) will be placed at that site. | |||
11808 | // 2) Since a TPLoopBody will be added later, any phis in successive blocks | |||
11809 | // need to be updated. splitAt() already handles this. | |||
11810 | TpExit = BB->splitAt(MI, false); | |||
11811 | if (TpExit == BB) { | |||
11812 | assert(BB->canFallThrough() && "Exit Block must be Fallthrough of the "(static_cast <bool> (BB->canFallThrough() && "Exit Block must be Fallthrough of the " "block containing memcpy/memset Pseudo" ) ? void (0) : __assert_fail ("BB->canFallThrough() && \"Exit Block must be Fallthrough of the \" \"block containing memcpy/memset Pseudo\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 11813, __extension__ __PRETTY_FUNCTION__)) | |||
11813 | "block containing memcpy/memset Pseudo")(static_cast <bool> (BB->canFallThrough() && "Exit Block must be Fallthrough of the " "block containing memcpy/memset Pseudo" ) ? void (0) : __assert_fail ("BB->canFallThrough() && \"Exit Block must be Fallthrough of the \" \"block containing memcpy/memset Pseudo\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 11813, __extension__ __PRETTY_FUNCTION__)); | |||
11814 | TpExit = BB->getFallThrough(); | |||
11815 | BuildMI(BB, dl, TII->get(ARM::t2B)) | |||
11816 | .addMBB(TpExit) | |||
11817 | .add(predOps(ARMCC::AL)); | |||
11818 | TpExit = BB->splitAt(MI, false); | |||
11819 | } | |||
11820 | ||||
11821 | // Add logic for iteration count | |||
11822 | Register TotalIterationsReg = | |||
11823 | genTPEntry(TpEntry, TpLoopBody, TpExit, OpSizeReg, TII, dl, MRI); | |||
11824 | ||||
11825 | // Add the vectorized (and predicated) loads/store instructions | |||
11826 | bool IsMemcpy = MI.getOpcode() == ARM::MVE_MEMCPYLOOPINST; | |||
11827 | genTPLoopBody(TpLoopBody, TpEntry, TpExit, TII, dl, MRI, OpSrcReg, | |||
11828 | OpDestReg, OpSizeReg, TotalIterationsReg, IsMemcpy); | |||
11829 | ||||
11830 | // Required to avoid conflict with the MachineVerifier during testing. | |||
11831 | Properties.reset(MachineFunctionProperties::Property::NoPHIs); | |||
11832 | ||||
11833 | // Connect the blocks | |||
11834 | TpEntry->addSuccessor(TpLoopBody); | |||
11835 | TpLoopBody->addSuccessor(TpLoopBody); | |||
11836 | TpLoopBody->addSuccessor(TpExit); | |||
11837 | ||||
11838 | // Reorder for a more natural layout | |||
11839 | TpLoopBody->moveAfter(TpEntry); | |||
11840 | TpExit->moveAfter(TpLoopBody); | |||
11841 | ||||
11842 | // Finally, remove the memcpy Psuedo Instruction | |||
11843 | MI.eraseFromParent(); | |||
11844 | ||||
11845 | // Return the exit block as it may contain other instructions requiring a | |||
11846 | // custom inserter | |||
11847 | return TpExit; | |||
11848 | } | |||
11849 | ||||
11850 | // The Thumb2 pre-indexed stores have the same MI operands, they just | |||
11851 | // define them differently in the .td files from the isel patterns, so | |||
11852 | // they need pseudos. | |||
11853 | case ARM::t2STR_preidx: | |||
11854 | MI.setDesc(TII->get(ARM::t2STR_PRE)); | |||
11855 | return BB; | |||
11856 | case ARM::t2STRB_preidx: | |||
11857 | MI.setDesc(TII->get(ARM::t2STRB_PRE)); | |||
11858 | return BB; | |||
11859 | case ARM::t2STRH_preidx: | |||
11860 | MI.setDesc(TII->get(ARM::t2STRH_PRE)); | |||
11861 | return BB; | |||
11862 | ||||
11863 | case ARM::STRi_preidx: | |||
11864 | case ARM::STRBi_preidx: { | |||
11865 | unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM | |||
11866 | : ARM::STRB_PRE_IMM; | |||
11867 | // Decode the offset. | |||
11868 | unsigned Offset = MI.getOperand(4).getImm(); | |||
11869 | bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; | |||
11870 | Offset = ARM_AM::getAM2Offset(Offset); | |||
11871 | if (isSub) | |||
11872 | Offset = -Offset; | |||
11873 | ||||
11874 | MachineMemOperand *MMO = *MI.memoperands_begin(); | |||
11875 | BuildMI(*BB, MI, dl, TII->get(NewOpc)) | |||
11876 | .add(MI.getOperand(0)) // Rn_wb | |||
11877 | .add(MI.getOperand(1)) // Rt | |||
11878 | .add(MI.getOperand(2)) // Rn | |||
11879 | .addImm(Offset) // offset (skip GPR==zero_reg) | |||
11880 | .add(MI.getOperand(5)) // pred | |||
11881 | .add(MI.getOperand(6)) | |||
11882 | .addMemOperand(MMO); | |||
11883 | MI.eraseFromParent(); | |||
11884 | return BB; | |||
11885 | } | |||
11886 | case ARM::STRr_preidx: | |||
11887 | case ARM::STRBr_preidx: | |||
11888 | case ARM::STRH_preidx: { | |||
11889 | unsigned NewOpc; | |||
11890 | switch (MI.getOpcode()) { | |||
11891 | default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 11891); | |||
11892 | case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; | |||
11893 | case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; | |||
11894 | case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; | |||
11895 | } | |||
11896 | MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); | |||
11897 | for (const MachineOperand &MO : MI.operands()) | |||
11898 | MIB.add(MO); | |||
11899 | MI.eraseFromParent(); | |||
11900 | return BB; | |||
11901 | } | |||
11902 | ||||
11903 | case ARM::tMOVCCr_pseudo: { | |||
11904 | // To "insert" a SELECT_CC instruction, we actually have to insert the | |||
11905 | // diamond control-flow pattern. The incoming instruction knows the | |||
11906 | // destination vreg to set, the condition code register to branch on, the | |||
11907 | // true/false values to select between, and a branch opcode to use. | |||
11908 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); | |||
11909 | MachineFunction::iterator It = ++BB->getIterator(); | |||
11910 | ||||
11911 | // thisMBB: | |||
11912 | // ... | |||
11913 | // TrueVal = ... | |||
11914 | // cmpTY ccX, r1, r2 | |||
11915 | // bCC copy1MBB | |||
11916 | // fallthrough --> copy0MBB | |||
11917 | MachineBasicBlock *thisMBB = BB; | |||
11918 | MachineFunction *F = BB->getParent(); | |||
11919 | MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); | |||
11920 | MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); | |||
11921 | F->insert(It, copy0MBB); | |||
11922 | F->insert(It, sinkMBB); | |||
11923 | ||||
11924 | // Check whether CPSR is live past the tMOVCCr_pseudo. | |||
11925 | const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo(); | |||
11926 | if (!MI.killsRegister(ARM::CPSR) && | |||
11927 | !checkAndUpdateCPSRKill(MI, thisMBB, TRI)) { | |||
11928 | copy0MBB->addLiveIn(ARM::CPSR); | |||
11929 | sinkMBB->addLiveIn(ARM::CPSR); | |||
11930 | } | |||
11931 | ||||
11932 | // Transfer the remainder of BB and its successor edges to sinkMBB. | |||
11933 | sinkMBB->splice(sinkMBB->begin(), BB, | |||
11934 | std::next(MachineBasicBlock::iterator(MI)), BB->end()); | |||
11935 | sinkMBB->transferSuccessorsAndUpdatePHIs(BB); | |||
11936 | ||||
11937 | BB->addSuccessor(copy0MBB); | |||
11938 | BB->addSuccessor(sinkMBB); | |||
11939 | ||||
11940 | BuildMI(BB, dl, TII->get(ARM::tBcc)) | |||
11941 | .addMBB(sinkMBB) | |||
11942 | .addImm(MI.getOperand(3).getImm()) | |||
11943 | .addReg(MI.getOperand(4).getReg()); | |||
11944 | ||||
11945 | // copy0MBB: | |||
11946 | // %FalseValue = ... | |||
11947 | // # fallthrough to sinkMBB | |||
11948 | BB = copy0MBB; | |||
11949 | ||||
11950 | // Update machine-CFG edges | |||
11951 | BB->addSuccessor(sinkMBB); | |||
11952 | ||||
11953 | // sinkMBB: | |||
11954 | // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] | |||
11955 | // ... | |||
11956 | BB = sinkMBB; | |||
11957 | BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg()) | |||
11958 | .addReg(MI.getOperand(1).getReg()) | |||
11959 | .addMBB(copy0MBB) | |||
11960 | .addReg(MI.getOperand(2).getReg()) | |||
11961 | .addMBB(thisMBB); | |||
11962 | ||||
11963 | MI.eraseFromParent(); // The pseudo instruction is gone now. | |||
11964 | return BB; | |||
11965 | } | |||
11966 | ||||
11967 | case ARM::BCCi64: | |||
11968 | case ARM::BCCZi64: { | |||
11969 | // If there is an unconditional branch to the other successor, remove it. | |||
11970 | BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end()); | |||
11971 | ||||
11972 | // Compare both parts that make up the double comparison separately for | |||
11973 | // equality. | |||
11974 | bool RHSisZero = MI.getOpcode() == ARM::BCCZi64; | |||
11975 | ||||
11976 | Register LHS1 = MI.getOperand(1).getReg(); | |||
11977 | Register LHS2 = MI.getOperand(2).getReg(); | |||
11978 | if (RHSisZero) { | |||
11979 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) | |||
11980 | .addReg(LHS1) | |||
11981 | .addImm(0) | |||
11982 | .add(predOps(ARMCC::AL)); | |||
11983 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) | |||
11984 | .addReg(LHS2).addImm(0) | |||
11985 | .addImm(ARMCC::EQ).addReg(ARM::CPSR); | |||
11986 | } else { | |||
11987 | Register RHS1 = MI.getOperand(3).getReg(); | |||
11988 | Register RHS2 = MI.getOperand(4).getReg(); | |||
11989 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) | |||
11990 | .addReg(LHS1) | |||
11991 | .addReg(RHS1) | |||
11992 | .add(predOps(ARMCC::AL)); | |||
11993 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) | |||
11994 | .addReg(LHS2).addReg(RHS2) | |||
11995 | .addImm(ARMCC::EQ).addReg(ARM::CPSR); | |||
11996 | } | |||
11997 | ||||
11998 | MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB(); | |||
11999 | MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); | |||
12000 | if (MI.getOperand(0).getImm() == ARMCC::NE) | |||
12001 | std::swap(destMBB, exitMBB); | |||
12002 | ||||
12003 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) | |||
12004 | .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); | |||
12005 | if (isThumb2) | |||
12006 | BuildMI(BB, dl, TII->get(ARM::t2B)) | |||
12007 | .addMBB(exitMBB) | |||
12008 | .add(predOps(ARMCC::AL)); | |||
12009 | else | |||
12010 | BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); | |||
12011 | ||||
12012 | MI.eraseFromParent(); // The pseudo instruction is gone now. | |||
12013 | return BB; | |||
12014 | } | |||
12015 | ||||
12016 | case ARM::Int_eh_sjlj_setjmp: | |||
12017 | case ARM::Int_eh_sjlj_setjmp_nofp: | |||
12018 | case ARM::tInt_eh_sjlj_setjmp: | |||
12019 | case ARM::t2Int_eh_sjlj_setjmp: | |||
12020 | case ARM::t2Int_eh_sjlj_setjmp_nofp: | |||
12021 | return BB; | |||
12022 | ||||
12023 | case ARM::Int_eh_sjlj_setup_dispatch: | |||
12024 | EmitSjLjDispatchBlock(MI, BB); | |||
12025 | return BB; | |||
12026 | ||||
12027 | case ARM::ABS: | |||
12028 | case ARM::t2ABS: { | |||
12029 | // To insert an ABS instruction, we have to insert the | |||
12030 | // diamond control-flow pattern. The incoming instruction knows the | |||
12031 | // source vreg to test against 0, the destination vreg to set, | |||
12032 | // the condition code register to branch on, the | |||
12033 | // true/false values to select between, and a branch opcode to use. | |||
12034 | // It transforms | |||
12035 | // V1 = ABS V0 | |||
12036 | // into | |||
12037 | // V2 = MOVS V0 | |||
12038 | // BCC (branch to SinkBB if V0 >= 0) | |||
12039 | // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) | |||
12040 | // SinkBB: V1 = PHI(V2, V3) | |||
12041 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); | |||
12042 | MachineFunction::iterator BBI = ++BB->getIterator(); | |||
12043 | MachineFunction *Fn = BB->getParent(); | |||
12044 | MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); | |||
12045 | MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); | |||
12046 | Fn->insert(BBI, RSBBB); | |||
12047 | Fn->insert(BBI, SinkBB); | |||
12048 | ||||
12049 | Register ABSSrcReg = MI.getOperand(1).getReg(); | |||
12050 | Register ABSDstReg = MI.getOperand(0).getReg(); | |||
12051 | bool ABSSrcKIll = MI.getOperand(1).isKill(); | |||
12052 | bool isThumb2 = Subtarget->isThumb2(); | |||
12053 | MachineRegisterInfo &MRI = Fn->getRegInfo(); | |||
12054 | // In Thumb mode S must not be specified if source register is the SP or | |||
12055 | // PC and if destination register is the SP, so restrict register class | |||
12056 | Register NewRsbDstReg = MRI.createVirtualRegister( | |||
12057 | isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass); | |||
12058 | ||||
12059 | // Transfer the remainder of BB and its successor edges to sinkMBB. | |||
12060 | SinkBB->splice(SinkBB->begin(), BB, | |||
12061 | std::next(MachineBasicBlock::iterator(MI)), BB->end()); | |||
12062 | SinkBB->transferSuccessorsAndUpdatePHIs(BB); | |||
12063 | ||||
12064 | BB->addSuccessor(RSBBB); | |||
12065 | BB->addSuccessor(SinkBB); | |||
12066 | ||||
12067 | // fall through to SinkMBB | |||
12068 | RSBBB->addSuccessor(SinkBB); | |||
12069 | ||||
12070 | // insert a cmp at the end of BB | |||
12071 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) | |||
12072 | .addReg(ABSSrcReg) | |||
12073 | .addImm(0) | |||
12074 | .add(predOps(ARMCC::AL)); | |||
12075 | ||||
12076 | // insert a bcc with opposite CC to ARMCC::MI at the end of BB | |||
12077 | BuildMI(BB, dl, | |||
12078 | TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) | |||
12079 | .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); | |||
12080 | ||||
12081 | // insert rsbri in RSBBB | |||
12082 | // Note: BCC and rsbri will be converted into predicated rsbmi | |||
12083 | // by if-conversion pass | |||
12084 | BuildMI(*RSBBB, RSBBB->begin(), dl, | |||
12085 | TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) | |||
12086 | .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0) | |||
12087 | .addImm(0) | |||
12088 | .add(predOps(ARMCC::AL)) | |||
12089 | .add(condCodeOp()); | |||
12090 | ||||
12091 | // insert PHI in SinkBB, | |||
12092 | // reuse ABSDstReg to not change uses of ABS instruction | |||
12093 | BuildMI(*SinkBB, SinkBB->begin(), dl, | |||
12094 | TII->get(ARM::PHI), ABSDstReg) | |||
12095 | .addReg(NewRsbDstReg).addMBB(RSBBB) | |||
12096 | .addReg(ABSSrcReg).addMBB(BB); | |||
12097 | ||||
12098 | // remove ABS instruction | |||
12099 | MI.eraseFromParent(); | |||
12100 | ||||
12101 | // return last added BB | |||
12102 | return SinkBB; | |||
12103 | } | |||
12104 | case ARM::COPY_STRUCT_BYVAL_I32: | |||
12105 | ++NumLoopByVals; | |||
12106 | return EmitStructByval(MI, BB); | |||
12107 | case ARM::WIN__CHKSTK: | |||
12108 | return EmitLowered__chkstk(MI, BB); | |||
12109 | case ARM::WIN__DBZCHK: | |||
12110 | return EmitLowered__dbzchk(MI, BB); | |||
12111 | } | |||
12112 | } | |||
12113 | ||||
12114 | /// Attaches vregs to MEMCPY that it will use as scratch registers | |||
12115 | /// when it is expanded into LDM/STM. This is done as a post-isel lowering | |||
12116 | /// instead of as a custom inserter because we need the use list from the SDNode. | |||
12117 | static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, | |||
12118 | MachineInstr &MI, const SDNode *Node) { | |||
12119 | bool isThumb1 = Subtarget->isThumb1Only(); | |||
12120 | ||||
12121 | DebugLoc DL = MI.getDebugLoc(); | |||
12122 | MachineFunction *MF = MI.getParent()->getParent(); | |||
12123 | MachineRegisterInfo &MRI = MF->getRegInfo(); | |||
12124 | MachineInstrBuilder MIB(*MF, MI); | |||
12125 | ||||
12126 | // If the new dst/src is unused mark it as dead. | |||
12127 | if (!Node->hasAnyUseOfValue(0)) { | |||
12128 | MI.getOperand(0).setIsDead(true); | |||
12129 | } | |||
12130 | if (!Node->hasAnyUseOfValue(1)) { | |||
12131 | MI.getOperand(1).setIsDead(true); | |||
12132 | } | |||
12133 | ||||
12134 | // The MEMCPY both defines and kills the scratch registers. | |||
12135 | for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) { | |||
12136 | Register TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass | |||
12137 | : &ARM::GPRRegClass); | |||
12138 | MIB.addReg(TmpReg, RegState::Define|RegState::Dead); | |||
12139 | } | |||
12140 | } | |||
12141 | ||||
12142 | void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, | |||
12143 | SDNode *Node) const { | |||
12144 | if (MI.getOpcode() == ARM::MEMCPY) { | |||
12145 | attachMEMCPYScratchRegs(Subtarget, MI, Node); | |||
12146 | return; | |||
12147 | } | |||
12148 | ||||
12149 | const MCInstrDesc *MCID = &MI.getDesc(); | |||
12150 | // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, | |||
12151 | // RSC. Coming out of isel, they have an implicit CPSR def, but the optional | |||
12152 | // operand is still set to noreg. If needed, set the optional operand's | |||
12153 | // register to CPSR, and remove the redundant implicit def. | |||
12154 | // | |||
12155 | // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR). | |||
12156 | ||||
12157 | // Rename pseudo opcodes. | |||
12158 | unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode()); | |||
12159 | unsigned ccOutIdx; | |||
12160 | if (NewOpc) { | |||
12161 | const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo(); | |||
12162 | MCID = &TII->get(NewOpc); | |||
12163 | ||||
12164 | assert(MCID->getNumOperands() ==(static_cast <bool> (MCID->getNumOperands() == MI.getDesc ().getNumOperands() + 5 - MI.getDesc().getSize() && "converted opcode should be the same except for cc_out" " (and, on Thumb1, pred)") ? void (0) : __assert_fail ("MCID->getNumOperands() == MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize() && \"converted opcode should be the same except for cc_out\" \" (and, on Thumb1, pred)\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12167, __extension__ __PRETTY_FUNCTION__)) | |||
12165 | MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize()(static_cast <bool> (MCID->getNumOperands() == MI.getDesc ().getNumOperands() + 5 - MI.getDesc().getSize() && "converted opcode should be the same except for cc_out" " (and, on Thumb1, pred)") ? void (0) : __assert_fail ("MCID->getNumOperands() == MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize() && \"converted opcode should be the same except for cc_out\" \" (and, on Thumb1, pred)\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12167, __extension__ __PRETTY_FUNCTION__)) | |||
12166 | && "converted opcode should be the same except for cc_out"(static_cast <bool> (MCID->getNumOperands() == MI.getDesc ().getNumOperands() + 5 - MI.getDesc().getSize() && "converted opcode should be the same except for cc_out" " (and, on Thumb1, pred)") ? void (0) : __assert_fail ("MCID->getNumOperands() == MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize() && \"converted opcode should be the same except for cc_out\" \" (and, on Thumb1, pred)\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12167, __extension__ __PRETTY_FUNCTION__)) | |||
12167 | " (and, on Thumb1, pred)")(static_cast <bool> (MCID->getNumOperands() == MI.getDesc ().getNumOperands() + 5 - MI.getDesc().getSize() && "converted opcode should be the same except for cc_out" " (and, on Thumb1, pred)") ? void (0) : __assert_fail ("MCID->getNumOperands() == MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize() && \"converted opcode should be the same except for cc_out\" \" (and, on Thumb1, pred)\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12167, __extension__ __PRETTY_FUNCTION__)); | |||
12168 | ||||
12169 | MI.setDesc(*MCID); | |||
12170 | ||||
12171 | // Add the optional cc_out operand | |||
12172 | MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); | |||
12173 | ||||
12174 | // On Thumb1, move all input operands to the end, then add the predicate | |||
12175 | if (Subtarget->isThumb1Only()) { | |||
12176 | for (unsigned c = MCID->getNumOperands() - 4; c--;) { | |||
12177 | MI.addOperand(MI.getOperand(1)); | |||
12178 | MI.removeOperand(1); | |||
12179 | } | |||
12180 | ||||
12181 | // Restore the ties | |||
12182 | for (unsigned i = MI.getNumOperands(); i--;) { | |||
12183 | const MachineOperand& op = MI.getOperand(i); | |||
12184 | if (op.isReg() && op.isUse()) { | |||
12185 | int DefIdx = MCID->getOperandConstraint(i, MCOI::TIED_TO); | |||
12186 | if (DefIdx != -1) | |||
12187 | MI.tieOperands(DefIdx, i); | |||
12188 | } | |||
12189 | } | |||
12190 | ||||
12191 | MI.addOperand(MachineOperand::CreateImm(ARMCC::AL)); | |||
12192 | MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/false)); | |||
12193 | ccOutIdx = 1; | |||
12194 | } else | |||
12195 | ccOutIdx = MCID->getNumOperands() - 1; | |||
12196 | } else | |||
12197 | ccOutIdx = MCID->getNumOperands() - 1; | |||
12198 | ||||
12199 | // Any ARM instruction that sets the 's' bit should specify an optional | |||
12200 | // "cc_out" operand in the last operand position. | |||
12201 | if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { | |||
12202 | assert(!NewOpc && "Optional cc_out operand required")(static_cast <bool> (!NewOpc && "Optional cc_out operand required" ) ? void (0) : __assert_fail ("!NewOpc && \"Optional cc_out operand required\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12202, __extension__ __PRETTY_FUNCTION__)); | |||
12203 | return; | |||
12204 | } | |||
12205 | // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it | |||
12206 | // since we already have an optional CPSR def. | |||
12207 | bool definesCPSR = false; | |||
12208 | bool deadCPSR = false; | |||
12209 | for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e; | |||
12210 | ++i) { | |||
12211 | const MachineOperand &MO = MI.getOperand(i); | |||
12212 | if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { | |||
12213 | definesCPSR = true; | |||
12214 | if (MO.isDead()) | |||
12215 | deadCPSR = true; | |||
12216 | MI.removeOperand(i); | |||
12217 | break; | |||
12218 | } | |||
12219 | } | |||
12220 | if (!definesCPSR) { | |||
12221 | assert(!NewOpc && "Optional cc_out operand required")(static_cast <bool> (!NewOpc && "Optional cc_out operand required" ) ? void (0) : __assert_fail ("!NewOpc && \"Optional cc_out operand required\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12221, __extension__ __PRETTY_FUNCTION__)); | |||
12222 | return; | |||
12223 | } | |||
12224 | assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag")(static_cast <bool> (deadCPSR == !Node->hasAnyUseOfValue (1) && "inconsistent dead flag") ? void (0) : __assert_fail ("deadCPSR == !Node->hasAnyUseOfValue(1) && \"inconsistent dead flag\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12224, __extension__ __PRETTY_FUNCTION__)); | |||
12225 | if (deadCPSR) { | |||
12226 | assert(!MI.getOperand(ccOutIdx).getReg() &&(static_cast <bool> (!MI.getOperand(ccOutIdx).getReg() && "expect uninitialized optional cc_out operand") ? void (0) : __assert_fail ("!MI.getOperand(ccOutIdx).getReg() && \"expect uninitialized optional cc_out operand\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12227, __extension__ __PRETTY_FUNCTION__)) | |||
12227 | "expect uninitialized optional cc_out operand")(static_cast <bool> (!MI.getOperand(ccOutIdx).getReg() && "expect uninitialized optional cc_out operand") ? void (0) : __assert_fail ("!MI.getOperand(ccOutIdx).getReg() && \"expect uninitialized optional cc_out operand\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12227, __extension__ __PRETTY_FUNCTION__)); | |||
12228 | // Thumb1 instructions must have the S bit even if the CPSR is dead. | |||
12229 | if (!Subtarget->isThumb1Only()) | |||
12230 | return; | |||
12231 | } | |||
12232 | ||||
12233 | // If this instruction was defined with an optional CPSR def and its dag node | |||
12234 | // had a live implicit CPSR def, then activate the optional CPSR def. | |||
12235 | MachineOperand &MO = MI.getOperand(ccOutIdx); | |||
12236 | MO.setReg(ARM::CPSR); | |||
12237 | MO.setIsDef(true); | |||
12238 | } | |||
12239 | ||||
12240 | //===----------------------------------------------------------------------===// | |||
12241 | // ARM Optimization Hooks | |||
12242 | //===----------------------------------------------------------------------===// | |||
12243 | ||||
12244 | // Helper function that checks if N is a null or all ones constant. | |||
12245 | static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { | |||
12246 | return AllOnes ? isAllOnesConstant(N) : isNullConstant(N); | |||
12247 | } | |||
12248 | ||||
12249 | // Return true if N is conditionally 0 or all ones. | |||
12250 | // Detects these expressions where cc is an i1 value: | |||
12251 | // | |||
12252 | // (select cc 0, y) [AllOnes=0] | |||
12253 | // (select cc y, 0) [AllOnes=0] | |||
12254 | // (zext cc) [AllOnes=0] | |||
12255 | // (sext cc) [AllOnes=0/1] | |||
12256 | // (select cc -1, y) [AllOnes=1] | |||
12257 | // (select cc y, -1) [AllOnes=1] | |||
12258 | // | |||
12259 | // Invert is set when N is the null/all ones constant when CC is false. | |||
12260 | // OtherOp is set to the alternative value of N. | |||
12261 | static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, | |||
12262 | SDValue &CC, bool &Invert, | |||
12263 | SDValue &OtherOp, | |||
12264 | SelectionDAG &DAG) { | |||
12265 | switch (N->getOpcode()) { | |||
12266 | default: return false; | |||
12267 | case ISD::SELECT: { | |||
12268 | CC = N->getOperand(0); | |||
12269 | SDValue N1 = N->getOperand(1); | |||
12270 | SDValue N2 = N->getOperand(2); | |||
12271 | if (isZeroOrAllOnes(N1, AllOnes)) { | |||
12272 | Invert = false; | |||
12273 | OtherOp = N2; | |||
12274 | return true; | |||
12275 | } | |||
12276 | if (isZeroOrAllOnes(N2, AllOnes)) { | |||
12277 | Invert = true; | |||
12278 | OtherOp = N1; | |||
12279 | return true; | |||
12280 | } | |||
12281 | return false; | |||
12282 | } | |||
12283 | case ISD::ZERO_EXTEND: | |||
12284 | // (zext cc) can never be the all ones value. | |||
12285 | if (AllOnes) | |||
12286 | return false; | |||
12287 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
12288 | case ISD::SIGN_EXTEND: { | |||
12289 | SDLoc dl(N); | |||
12290 | EVT VT = N->getValueType(0); | |||
12291 | CC = N->getOperand(0); | |||
12292 | if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC) | |||
12293 | return false; | |||
12294 | Invert = !AllOnes; | |||
12295 | if (AllOnes) | |||
12296 | // When looking for an AllOnes constant, N is an sext, and the 'other' | |||
12297 | // value is 0. | |||
12298 | OtherOp = DAG.getConstant(0, dl, VT); | |||
12299 | else if (N->getOpcode() == ISD::ZERO_EXTEND) | |||
12300 | // When looking for a 0 constant, N can be zext or sext. | |||
12301 | OtherOp = DAG.getConstant(1, dl, VT); | |||
12302 | else | |||
12303 | OtherOp = DAG.getAllOnesConstant(dl, VT); | |||
12304 | return true; | |||
12305 | } | |||
12306 | } | |||
12307 | } | |||
12308 | ||||
12309 | // Combine a constant select operand into its use: | |||
12310 | // | |||
12311 | // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) | |||
12312 | // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) | |||
12313 | // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] | |||
12314 | // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) | |||
12315 | // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) | |||
12316 | // | |||
12317 | // The transform is rejected if the select doesn't have a constant operand that | |||
12318 | // is null, or all ones when AllOnes is set. | |||
12319 | // | |||
12320 | // Also recognize sext/zext from i1: | |||
12321 | // | |||
12322 | // (add (zext cc), x) -> (select cc (add x, 1), x) | |||
12323 | // (add (sext cc), x) -> (select cc (add x, -1), x) | |||
12324 | // | |||
12325 | // These transformations eventually create predicated instructions. | |||
12326 | // | |||
12327 | // @param N The node to transform. | |||
12328 | // @param Slct The N operand that is a select. | |||
12329 | // @param OtherOp The other N operand (x above). | |||
12330 | // @param DCI Context. | |||
12331 | // @param AllOnes Require the select constant to be all ones instead of null. | |||
12332 | // @returns The new node, or SDValue() on failure. | |||
12333 | static | |||
12334 | SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, | |||
12335 | TargetLowering::DAGCombinerInfo &DCI, | |||
12336 | bool AllOnes = false) { | |||
12337 | SelectionDAG &DAG = DCI.DAG; | |||
12338 | EVT VT = N->getValueType(0); | |||
12339 | SDValue NonConstantVal; | |||
12340 | SDValue CCOp; | |||
12341 | bool SwapSelectOps; | |||
12342 | if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps, | |||
12343 | NonConstantVal, DAG)) | |||
12344 | return SDValue(); | |||
12345 | ||||
12346 | // Slct is now know to be the desired identity constant when CC is true. | |||
12347 | SDValue TrueVal = OtherOp; | |||
12348 | SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, | |||
12349 | OtherOp, NonConstantVal); | |||
12350 | // Unless SwapSelectOps says CC should be false. | |||
12351 | if (SwapSelectOps) | |||
12352 | std::swap(TrueVal, FalseVal); | |||
12353 | ||||
12354 | return DAG.getNode(ISD::SELECT, SDLoc(N), VT, | |||
12355 | CCOp, TrueVal, FalseVal); | |||
12356 | } | |||
12357 | ||||
12358 | // Attempt combineSelectAndUse on each operand of a commutative operator N. | |||
12359 | static | |||
12360 | SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, | |||
12361 | TargetLowering::DAGCombinerInfo &DCI) { | |||
12362 | SDValue N0 = N->getOperand(0); | |||
12363 | SDValue N1 = N->getOperand(1); | |||
12364 | if (N0.getNode()->hasOneUse()) | |||
12365 | if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes)) | |||
12366 | return Result; | |||
12367 | if (N1.getNode()->hasOneUse()) | |||
12368 | if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes)) | |||
12369 | return Result; | |||
12370 | return SDValue(); | |||
12371 | } | |||
12372 | ||||
12373 | static bool IsVUZPShuffleNode(SDNode *N) { | |||
12374 | // VUZP shuffle node. | |||
12375 | if (N->getOpcode() == ARMISD::VUZP) | |||
12376 | return true; | |||
12377 | ||||
12378 | // "VUZP" on i32 is an alias for VTRN. | |||
12379 | if (N->getOpcode() == ARMISD::VTRN && N->getValueType(0) == MVT::v2i32) | |||
12380 | return true; | |||
12381 | ||||
12382 | return false; | |||
12383 | } | |||
12384 | ||||
12385 | static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1, | |||
12386 | TargetLowering::DAGCombinerInfo &DCI, | |||
12387 | const ARMSubtarget *Subtarget) { | |||
12388 | // Look for ADD(VUZP.0, VUZP.1). | |||
12389 | if (!IsVUZPShuffleNode(N0.getNode()) || N0.getNode() != N1.getNode() || | |||
12390 | N0 == N1) | |||
12391 | return SDValue(); | |||
12392 | ||||
12393 | // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD. | |||
12394 | if (!N->getValueType(0).is64BitVector()) | |||
12395 | return SDValue(); | |||
12396 | ||||
12397 | // Generate vpadd. | |||
12398 | SelectionDAG &DAG = DCI.DAG; | |||
12399 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
12400 | SDLoc dl(N); | |||
12401 | SDNode *Unzip = N0.getNode(); | |||
12402 | EVT VT = N->getValueType(0); | |||
12403 | ||||
12404 | SmallVector<SDValue, 8> Ops; | |||
12405 | Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpadd, dl, | |||
12406 | TLI.getPointerTy(DAG.getDataLayout()))); | |||
12407 | Ops.push_back(Unzip->getOperand(0)); | |||
12408 | Ops.push_back(Unzip->getOperand(1)); | |||
12409 | ||||
12410 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops); | |||
12411 | } | |||
12412 | ||||
12413 | static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1, | |||
12414 | TargetLowering::DAGCombinerInfo &DCI, | |||
12415 | const ARMSubtarget *Subtarget) { | |||
12416 | // Check for two extended operands. | |||
12417 | if (!(N0.getOpcode() == ISD::SIGN_EXTEND && | |||
12418 | N1.getOpcode() == ISD::SIGN_EXTEND) && | |||
12419 | !(N0.getOpcode() == ISD::ZERO_EXTEND && | |||
12420 | N1.getOpcode() == ISD::ZERO_EXTEND)) | |||
12421 | return SDValue(); | |||
12422 | ||||
12423 | SDValue N00 = N0.getOperand(0); | |||
12424 | SDValue N10 = N1.getOperand(0); | |||
12425 | ||||
12426 | // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1)) | |||
12427 | if (!IsVUZPShuffleNode(N00.getNode()) || N00.getNode() != N10.getNode() || | |||
12428 | N00 == N10) | |||
12429 | return SDValue(); | |||
12430 | ||||
12431 | // We only recognize Q register paddl here; this can't be reached until | |||
12432 | // after type legalization. | |||
12433 | if (!N00.getValueType().is64BitVector() || | |||
12434 | !N0.getValueType().is128BitVector()) | |||
12435 | return SDValue(); | |||
12436 | ||||
12437 | // Generate vpaddl. | |||
12438 | SelectionDAG &DAG = DCI.DAG; | |||
12439 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
12440 | SDLoc dl(N); | |||
12441 | EVT VT = N->getValueType(0); | |||
12442 | ||||
12443 | SmallVector<SDValue, 8> Ops; | |||
12444 | // Form vpaddl.sN or vpaddl.uN depending on the kind of extension. | |||
12445 | unsigned Opcode; | |||
12446 | if (N0.getOpcode() == ISD::SIGN_EXTEND) | |||
12447 | Opcode = Intrinsic::arm_neon_vpaddls; | |||
12448 | else | |||
12449 | Opcode = Intrinsic::arm_neon_vpaddlu; | |||
12450 | Ops.push_back(DAG.getConstant(Opcode, dl, | |||
12451 | TLI.getPointerTy(DAG.getDataLayout()))); | |||
12452 | EVT ElemTy = N00.getValueType().getVectorElementType(); | |||
12453 | unsigned NumElts = VT.getVectorNumElements(); | |||
12454 | EVT ConcatVT = EVT::getVectorVT(*DAG.getContext(), ElemTy, NumElts * 2); | |||
12455 | SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), ConcatVT, | |||
12456 | N00.getOperand(0), N00.getOperand(1)); | |||
12457 | Ops.push_back(Concat); | |||
12458 | ||||
12459 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops); | |||
12460 | } | |||
12461 | ||||
12462 | // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in | |||
12463 | // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is | |||
12464 | // much easier to match. | |||
12465 | static SDValue | |||
12466 | AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1, | |||
12467 | TargetLowering::DAGCombinerInfo &DCI, | |||
12468 | const ARMSubtarget *Subtarget) { | |||
12469 | // Only perform optimization if after legalize, and if NEON is available. We | |||
12470 | // also expected both operands to be BUILD_VECTORs. | |||
12471 | if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() | |||
12472 | || N0.getOpcode() != ISD::BUILD_VECTOR | |||
12473 | || N1.getOpcode() != ISD::BUILD_VECTOR) | |||
12474 | return SDValue(); | |||
12475 | ||||
12476 | // Check output type since VPADDL operand elements can only be 8, 16, or 32. | |||
12477 | EVT VT = N->getValueType(0); | |||
12478 | if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) | |||
12479 | return SDValue(); | |||
12480 | ||||
12481 | // Check that the vector operands are of the right form. | |||
12482 | // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR | |||
12483 | // operands, where N is the size of the formed vector. | |||
12484 | // Each EXTRACT_VECTOR should have the same input vector and odd or even | |||
12485 | // index such that we have a pair wise add pattern. | |||
12486 | ||||
12487 | // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. | |||
12488 | if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) | |||
12489 | return SDValue(); | |||
12490 | SDValue Vec = N0->getOperand(0)->getOperand(0); | |||
12491 | SDNode *V = Vec.getNode(); | |||
12492 | unsigned nextIndex = 0; | |||
12493 | ||||
12494 | // For each operands to the ADD which are BUILD_VECTORs, | |||
12495 | // check to see if each of their operands are an EXTRACT_VECTOR with | |||
12496 | // the same vector and appropriate index. | |||
12497 | for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { | |||
12498 | if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT | |||
12499 | && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { | |||
12500 | ||||
12501 | SDValue ExtVec0 = N0->getOperand(i); | |||
12502 | SDValue ExtVec1 = N1->getOperand(i); | |||
12503 | ||||
12504 | // First operand is the vector, verify its the same. | |||
12505 | if (V != ExtVec0->getOperand(0).getNode() || | |||
12506 | V != ExtVec1->getOperand(0).getNode()) | |||
12507 | return SDValue(); | |||
12508 | ||||
12509 | // Second is the constant, verify its correct. | |||
12510 | ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); | |||
12511 | ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); | |||
12512 | ||||
12513 | // For the constant, we want to see all the even or all the odd. | |||
12514 | if (!C0 || !C1 || C0->getZExtValue() != nextIndex | |||
12515 | || C1->getZExtValue() != nextIndex+1) | |||
12516 | return SDValue(); | |||
12517 | ||||
12518 | // Increment index. | |||
12519 | nextIndex+=2; | |||
12520 | } else | |||
12521 | return SDValue(); | |||
12522 | } | |||
12523 | ||||
12524 | // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure | |||
12525 | // we're using the entire input vector, otherwise there's a size/legality | |||
12526 | // mismatch somewhere. | |||
12527 | if (nextIndex != Vec.getValueType().getVectorNumElements() || | |||
12528 | Vec.getValueType().getVectorElementType() == VT.getVectorElementType()) | |||
12529 | return SDValue(); | |||
12530 | ||||
12531 | // Create VPADDL node. | |||
12532 | SelectionDAG &DAG = DCI.DAG; | |||
12533 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
12534 | ||||
12535 | SDLoc dl(N); | |||
12536 | ||||
12537 | // Build operand list. | |||
12538 | SmallVector<SDValue, 8> Ops; | |||
12539 | Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl, | |||
12540 | TLI.getPointerTy(DAG.getDataLayout()))); | |||
12541 | ||||
12542 | // Input is the vector. | |||
12543 | Ops.push_back(Vec); | |||
12544 | ||||
12545 | // Get widened type and narrowed type. | |||
12546 | MVT widenType; | |||
12547 | unsigned numElem = VT.getVectorNumElements(); | |||
12548 | ||||
12549 | EVT inputLaneType = Vec.getValueType().getVectorElementType(); | |||
12550 | switch (inputLaneType.getSimpleVT().SimpleTy) { | |||
12551 | case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; | |||
12552 | case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; | |||
12553 | case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; | |||
12554 | default: | |||
12555 | llvm_unreachable("Invalid vector element type for padd optimization.")::llvm::llvm_unreachable_internal("Invalid vector element type for padd optimization." , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12555); | |||
12556 | } | |||
12557 | ||||
12558 | SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops); | |||
12559 | unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE; | |||
12560 | return DAG.getNode(ExtOp, dl, VT, tmp); | |||
12561 | } | |||
12562 | ||||
12563 | static SDValue findMUL_LOHI(SDValue V) { | |||
12564 | if (V->getOpcode() == ISD::UMUL_LOHI || | |||
12565 | V->getOpcode() == ISD::SMUL_LOHI) | |||
12566 | return V; | |||
12567 | return SDValue(); | |||
12568 | } | |||
12569 | ||||
12570 | static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode, | |||
12571 | TargetLowering::DAGCombinerInfo &DCI, | |||
12572 | const ARMSubtarget *Subtarget) { | |||
12573 | if (!Subtarget->hasBaseDSP()) | |||
12574 | return SDValue(); | |||
12575 | ||||
12576 | // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and | |||
12577 | // accumulates the product into a 64-bit value. The 16-bit values will | |||
12578 | // be sign extended somehow or SRA'd into 32-bit values | |||
12579 | // (addc (adde (mul 16bit, 16bit), lo), hi) | |||
12580 | SDValue Mul = AddcNode->getOperand(0); | |||
12581 | SDValue Lo = AddcNode->getOperand(1); | |||
12582 | if (Mul.getOpcode() != ISD::MUL) { | |||
12583 | Lo = AddcNode->getOperand(0); | |||
12584 | Mul = AddcNode->getOperand(1); | |||
12585 | if (Mul.getOpcode() != ISD::MUL) | |||
12586 | return SDValue(); | |||
12587 | } | |||
12588 | ||||
12589 | SDValue SRA = AddeNode->getOperand(0); | |||
12590 | SDValue Hi = AddeNode->getOperand(1); | |||
12591 | if (SRA.getOpcode() != ISD::SRA) { | |||
12592 | SRA = AddeNode->getOperand(1); | |||
12593 | Hi = AddeNode->getOperand(0); | |||
12594 | if (SRA.getOpcode() != ISD::SRA) | |||
12595 | return SDValue(); | |||
12596 | } | |||
12597 | if (auto Const = dyn_cast<ConstantSDNode>(SRA.getOperand(1))) { | |||
12598 | if (Const->getZExtValue() != 31) | |||
12599 | return SDValue(); | |||
12600 | } else | |||
12601 | return SDValue(); | |||
12602 | ||||
12603 | if (SRA.getOperand(0) != Mul) | |||
12604 | return SDValue(); | |||
12605 | ||||
12606 | SelectionDAG &DAG = DCI.DAG; | |||
12607 | SDLoc dl(AddcNode); | |||
12608 | unsigned Opcode = 0; | |||
12609 | SDValue Op0; | |||
12610 | SDValue Op1; | |||
12611 | ||||
12612 | if (isS16(Mul.getOperand(0), DAG) && isS16(Mul.getOperand(1), DAG)) { | |||
12613 | Opcode = ARMISD::SMLALBB; | |||
12614 | Op0 = Mul.getOperand(0); | |||
12615 | Op1 = Mul.getOperand(1); | |||
12616 | } else if (isS16(Mul.getOperand(0), DAG) && isSRA16(Mul.getOperand(1))) { | |||
12617 | Opcode = ARMISD::SMLALBT; | |||
12618 | Op0 = Mul.getOperand(0); | |||
12619 | Op1 = Mul.getOperand(1).getOperand(0); | |||
12620 | } else if (isSRA16(Mul.getOperand(0)) && isS16(Mul.getOperand(1), DAG)) { | |||
12621 | Opcode = ARMISD::SMLALTB; | |||
12622 | Op0 = Mul.getOperand(0).getOperand(0); | |||
12623 | Op1 = Mul.getOperand(1); | |||
12624 | } else if (isSRA16(Mul.getOperand(0)) && isSRA16(Mul.getOperand(1))) { | |||
12625 | Opcode = ARMISD::SMLALTT; | |||
12626 | Op0 = Mul->getOperand(0).getOperand(0); | |||
12627 | Op1 = Mul->getOperand(1).getOperand(0); | |||
12628 | } | |||
12629 | ||||
12630 | if (!Op0 || !Op1) | |||
12631 | return SDValue(); | |||
12632 | ||||
12633 | SDValue SMLAL = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), | |||
12634 | Op0, Op1, Lo, Hi); | |||
12635 | // Replace the ADDs' nodes uses by the MLA node's values. | |||
12636 | SDValue HiMLALResult(SMLAL.getNode(), 1); | |||
12637 | SDValue LoMLALResult(SMLAL.getNode(), 0); | |||
12638 | ||||
12639 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult); | |||
12640 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult); | |||
12641 | ||||
12642 | // Return original node to notify the driver to stop replacing. | |||
12643 | SDValue resNode(AddcNode, 0); | |||
12644 | return resNode; | |||
12645 | } | |||
12646 | ||||
12647 | static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode, | |||
12648 | TargetLowering::DAGCombinerInfo &DCI, | |||
12649 | const ARMSubtarget *Subtarget) { | |||
12650 | // Look for multiply add opportunities. | |||
12651 | // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where | |||
12652 | // each add nodes consumes a value from ISD::UMUL_LOHI and there is | |||
12653 | // a glue link from the first add to the second add. | |||
12654 | // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by | |||
12655 | // a S/UMLAL instruction. | |||
12656 | // UMUL_LOHI | |||
12657 | // / :lo \ :hi | |||
12658 | // V \ [no multiline comment] | |||
12659 | // loAdd -> ADDC | | |||
12660 | // \ :carry / | |||
12661 | // V V | |||
12662 | // ADDE <- hiAdd | |||
12663 | // | |||
12664 | // In the special case where only the higher part of a signed result is used | |||
12665 | // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts | |||
12666 | // a constant with the exact value of 0x80000000, we recognize we are dealing | |||
12667 | // with a "rounded multiply and add" (or subtract) and transform it into | |||
12668 | // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively. | |||
12669 | ||||
12670 | assert((AddeSubeNode->getOpcode() == ARMISD::ADDE ||(static_cast <bool> ((AddeSubeNode->getOpcode() == ARMISD ::ADDE || AddeSubeNode->getOpcode() == ARMISD::SUBE) && "Expect an ADDE or SUBE") ? void (0) : __assert_fail ("(AddeSubeNode->getOpcode() == ARMISD::ADDE || AddeSubeNode->getOpcode() == ARMISD::SUBE) && \"Expect an ADDE or SUBE\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12672, __extension__ __PRETTY_FUNCTION__)) | |||
12671 | AddeSubeNode->getOpcode() == ARMISD::SUBE) &&(static_cast <bool> ((AddeSubeNode->getOpcode() == ARMISD ::ADDE || AddeSubeNode->getOpcode() == ARMISD::SUBE) && "Expect an ADDE or SUBE") ? void (0) : __assert_fail ("(AddeSubeNode->getOpcode() == ARMISD::ADDE || AddeSubeNode->getOpcode() == ARMISD::SUBE) && \"Expect an ADDE or SUBE\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12672, __extension__ __PRETTY_FUNCTION__)) | |||
12672 | "Expect an ADDE or SUBE")(static_cast <bool> ((AddeSubeNode->getOpcode() == ARMISD ::ADDE || AddeSubeNode->getOpcode() == ARMISD::SUBE) && "Expect an ADDE or SUBE") ? void (0) : __assert_fail ("(AddeSubeNode->getOpcode() == ARMISD::ADDE || AddeSubeNode->getOpcode() == ARMISD::SUBE) && \"Expect an ADDE or SUBE\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12672, __extension__ __PRETTY_FUNCTION__)); | |||
12673 | ||||
12674 | assert(AddeSubeNode->getNumOperands() == 3 &&(static_cast <bool> (AddeSubeNode->getNumOperands() == 3 && AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && "ADDE node has the wrong inputs") ? void (0) : __assert_fail ("AddeSubeNode->getNumOperands() == 3 && AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && \"ADDE node has the wrong inputs\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12676, __extension__ __PRETTY_FUNCTION__)) | |||
12675 | AddeSubeNode->getOperand(2).getValueType() == MVT::i32 &&(static_cast <bool> (AddeSubeNode->getNumOperands() == 3 && AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && "ADDE node has the wrong inputs") ? void (0) : __assert_fail ("AddeSubeNode->getNumOperands() == 3 && AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && \"ADDE node has the wrong inputs\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12676, __extension__ __PRETTY_FUNCTION__)) | |||
12676 | "ADDE node has the wrong inputs")(static_cast <bool> (AddeSubeNode->getNumOperands() == 3 && AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && "ADDE node has the wrong inputs") ? void (0) : __assert_fail ("AddeSubeNode->getNumOperands() == 3 && AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && \"ADDE node has the wrong inputs\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12676, __extension__ __PRETTY_FUNCTION__)); | |||
12677 | ||||
12678 | // Check that we are chained to the right ADDC or SUBC node. | |||
12679 | SDNode *AddcSubcNode = AddeSubeNode->getOperand(2).getNode(); | |||
12680 | if ((AddeSubeNode->getOpcode() == ARMISD::ADDE && | |||
12681 | AddcSubcNode->getOpcode() != ARMISD::ADDC) || | |||
12682 | (AddeSubeNode->getOpcode() == ARMISD::SUBE && | |||
12683 | AddcSubcNode->getOpcode() != ARMISD::SUBC)) | |||
12684 | return SDValue(); | |||
12685 | ||||
12686 | SDValue AddcSubcOp0 = AddcSubcNode->getOperand(0); | |||
12687 | SDValue AddcSubcOp1 = AddcSubcNode->getOperand(1); | |||
12688 | ||||
12689 | // Check if the two operands are from the same mul_lohi node. | |||
12690 | if (AddcSubcOp0.getNode() == AddcSubcOp1.getNode()) | |||
12691 | return SDValue(); | |||
12692 | ||||
12693 | assert(AddcSubcNode->getNumValues() == 2 &&(static_cast <bool> (AddcSubcNode->getNumValues() == 2 && AddcSubcNode->getValueType(0) == MVT::i32 && "Expect ADDC with two result values. First: i32") ? void (0) : __assert_fail ("AddcSubcNode->getNumValues() == 2 && AddcSubcNode->getValueType(0) == MVT::i32 && \"Expect ADDC with two result values. First: i32\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12695, __extension__ __PRETTY_FUNCTION__)) | |||
12694 | AddcSubcNode->getValueType(0) == MVT::i32 &&(static_cast <bool> (AddcSubcNode->getNumValues() == 2 && AddcSubcNode->getValueType(0) == MVT::i32 && "Expect ADDC with two result values. First: i32") ? void (0) : __assert_fail ("AddcSubcNode->getNumValues() == 2 && AddcSubcNode->getValueType(0) == MVT::i32 && \"Expect ADDC with two result values. First: i32\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12695, __extension__ __PRETTY_FUNCTION__)) | |||
12695 | "Expect ADDC with two result values. First: i32")(static_cast <bool> (AddcSubcNode->getNumValues() == 2 && AddcSubcNode->getValueType(0) == MVT::i32 && "Expect ADDC with two result values. First: i32") ? void (0) : __assert_fail ("AddcSubcNode->getNumValues() == 2 && AddcSubcNode->getValueType(0) == MVT::i32 && \"Expect ADDC with two result values. First: i32\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 12695, __extension__ __PRETTY_FUNCTION__)); | |||
12696 | ||||
12697 | // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it | |||
12698 | // maybe a SMLAL which multiplies two 16-bit values. | |||
12699 | if (AddeSubeNode->getOpcode() == ARMISD::ADDE && | |||
12700 | AddcSubcOp0->getOpcode() != ISD::UMUL_LOHI && | |||
12701 | AddcSubcOp0->getOpcode() != ISD::SMUL_LOHI && | |||
12702 | AddcSubcOp1->getOpcode() != ISD::UMUL_LOHI && | |||
12703 | AddcSubcOp1->getOpcode() != ISD::SMUL_LOHI) | |||
12704 | return AddCombineTo64BitSMLAL16(AddcSubcNode, AddeSubeNode, DCI, Subtarget); | |||
12705 | ||||
12706 | // Check for the triangle shape. | |||
12707 | SDValue AddeSubeOp0 = AddeSubeNode->getOperand(0); | |||
12708 | SDValue AddeSubeOp1 = AddeSubeNode->getOperand(1); | |||
12709 | ||||
12710 | // Make sure that the ADDE/SUBE operands are not coming from the same node. | |||
12711 | if (AddeSubeOp0.getNode() == AddeSubeOp1.getNode()) | |||
12712 | return SDValue(); | |||
12713 | ||||
12714 | // Find the MUL_LOHI node walking up ADDE/SUBE's operands. | |||
12715 | bool IsLeftOperandMUL = false; | |||
12716 | SDValue MULOp = findMUL_LOHI(AddeSubeOp0); | |||
12717 | if (MULOp == SDValue()) | |||
12718 | MULOp = findMUL_LOHI(AddeSubeOp1); | |||
12719 | else | |||
12720 | IsLeftOperandMUL = true; | |||
12721 | if (MULOp == SDValue()) | |||
12722 | return SDValue(); | |||
12723 | ||||
12724 | // Figure out the right opcode. | |||
12725 | unsigned Opc = MULOp->getOpcode(); | |||
12726 | unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; | |||
12727 | ||||
12728 | // Figure out the high and low input values to the MLAL node. | |||
12729 | SDValue *HiAddSub = nullptr; | |||
12730 | SDValue *LoMul = nullptr; | |||
12731 | SDValue *LowAddSub = nullptr; | |||
12732 | ||||
12733 | // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI. | |||
12734 | if ((AddeSubeOp0 != MULOp.getValue(1)) && (AddeSubeOp1 != MULOp.getValue(1))) | |||
12735 | return SDValue(); | |||
12736 | ||||
12737 | if (IsLeftOperandMUL) | |||
12738 | HiAddSub = &AddeSubeOp1; | |||
12739 | else | |||
12740 | HiAddSub = &AddeSubeOp0; | |||
12741 | ||||
12742 | // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node | |||
12743 | // whose low result is fed to the ADDC/SUBC we are checking. | |||
12744 | ||||
12745 | if (AddcSubcOp0 == MULOp.getValue(0)) { | |||
12746 | LoMul = &AddcSubcOp0; | |||
12747 | LowAddSub = &AddcSubcOp1; | |||
12748 | } | |||
12749 | if (AddcSubcOp1 == MULOp.getValue(0)) { | |||
12750 | LoMul = &AddcSubcOp1; | |||
12751 | LowAddSub = &AddcSubcOp0; | |||
12752 | } | |||
12753 | ||||
12754 | if (!LoMul) | |||
12755 | return SDValue(); | |||
12756 | ||||
12757 | // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC | |||
12758 | // the replacement below will create a cycle. | |||
12759 | if (AddcSubcNode == HiAddSub->getNode() || | |||
12760 | AddcSubcNode->isPredecessorOf(HiAddSub->getNode())) | |||
12761 | return SDValue(); | |||
12762 | ||||
12763 | // Create the merged node. | |||
12764 | SelectionDAG &DAG = DCI.DAG; | |||
12765 | ||||
12766 | // Start building operand list. | |||
12767 | SmallVector<SDValue, 8> Ops; | |||
12768 | Ops.push_back(LoMul->getOperand(0)); | |||
12769 | Ops.push_back(LoMul->getOperand(1)); | |||
12770 | ||||
12771 | // Check whether we can use SMMLAR, SMMLSR or SMMULR instead. For this to be | |||
12772 | // the case, we must be doing signed multiplication and only use the higher | |||
12773 | // part of the result of the MLAL, furthermore the LowAddSub must be a constant | |||
12774 | // addition or subtraction with the value of 0x800000. | |||
12775 | if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() && | |||
12776 | FinalOpc == ARMISD::SMLAL && !AddeSubeNode->hasAnyUseOfValue(1) && | |||
12777 | LowAddSub->getNode()->getOpcode() == ISD::Constant && | |||
12778 | static_cast<ConstantSDNode *>(LowAddSub->getNode())->getZExtValue() == | |||
12779 | 0x80000000) { | |||
12780 | Ops.push_back(*HiAddSub); | |||
12781 | if (AddcSubcNode->getOpcode() == ARMISD::SUBC) { | |||
12782 | FinalOpc = ARMISD::SMMLSR; | |||
12783 | } else { | |||
12784 | FinalOpc = ARMISD::SMMLAR; | |||
12785 | } | |||
12786 | SDValue NewNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), MVT::i32, Ops); | |||
12787 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), NewNode); | |||
12788 | ||||
12789 | return SDValue(AddeSubeNode, 0); | |||
12790 | } else if (AddcSubcNode->getOpcode() == ARMISD::SUBC) | |||
12791 | // SMMLS is generated during instruction selection and the rest of this | |||
12792 | // function can not handle the case where AddcSubcNode is a SUBC. | |||
12793 | return SDValue(); | |||
12794 | ||||
12795 | // Finish building the operand list for {U/S}MLAL | |||
12796 | Ops.push_back(*LowAddSub); | |||
12797 | Ops.push_back(*HiAddSub); | |||
12798 | ||||
12799 | SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), | |||
12800 | DAG.getVTList(MVT::i32, MVT::i32), Ops); | |||
12801 | ||||
12802 | // Replace the ADDs' nodes uses by the MLA node's values. | |||
12803 | SDValue HiMLALResult(MLALNode.getNode(), 1); | |||
12804 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), HiMLALResult); | |||
12805 | ||||
12806 | SDValue LoMLALResult(MLALNode.getNode(), 0); | |||
12807 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddcSubcNode, 0), LoMLALResult); | |||
12808 | ||||
12809 | // Return original node to notify the driver to stop replacing. | |||
12810 | return SDValue(AddeSubeNode, 0); | |||
12811 | } | |||
12812 | ||||
12813 | static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode, | |||
12814 | TargetLowering::DAGCombinerInfo &DCI, | |||
12815 | const ARMSubtarget *Subtarget) { | |||
12816 | // UMAAL is similar to UMLAL except that it adds two unsigned values. | |||
12817 | // While trying to combine for the other MLAL nodes, first search for the | |||
12818 | // chance to use UMAAL. Check if Addc uses a node which has already | |||
12819 | // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde | |||
12820 | // as the addend, and it's handled in PerformUMLALCombine. | |||
12821 | ||||
12822 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) | |||
12823 | return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget); | |||
12824 | ||||
12825 | // Check that we have a glued ADDC node. | |||
12826 | SDNode* AddcNode = AddeNode->getOperand(2).getNode(); | |||
12827 | if (AddcNode->getOpcode() != ARMISD::ADDC) | |||
12828 | return SDValue(); | |||
12829 | ||||
12830 | // Find the converted UMAAL or quit if it doesn't exist. | |||
12831 | SDNode *UmlalNode = nullptr; | |||
12832 | SDValue AddHi; | |||
12833 | if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) { | |||
12834 | UmlalNode = AddcNode->getOperand(0).getNode(); | |||
12835 | AddHi = AddcNode->getOperand(1); | |||
12836 | } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) { | |||
12837 | UmlalNode = AddcNode->getOperand(1).getNode(); | |||
12838 | AddHi = AddcNode->getOperand(0); | |||
12839 | } else { | |||
12840 | return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget); | |||
12841 | } | |||
12842 | ||||
12843 | // The ADDC should be glued to an ADDE node, which uses the same UMLAL as | |||
12844 | // the ADDC as well as Zero. | |||
12845 | if (!isNullConstant(UmlalNode->getOperand(3))) | |||
12846 | return SDValue(); | |||
12847 | ||||
12848 | if ((isNullConstant(AddeNode->getOperand(0)) && | |||
12849 | AddeNode->getOperand(1).getNode() == UmlalNode) || | |||
12850 | (AddeNode->getOperand(0).getNode() == UmlalNode && | |||
12851 | isNullConstant(AddeNode->getOperand(1)))) { | |||
12852 | SelectionDAG &DAG = DCI.DAG; | |||
12853 | SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1), | |||
12854 | UmlalNode->getOperand(2), AddHi }; | |||
12855 | SDValue UMAAL = DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode), | |||
12856 | DAG.getVTList(MVT::i32, MVT::i32), Ops); | |||
12857 | ||||
12858 | // Replace the ADDs' nodes uses by the UMAAL node's values. | |||
12859 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1)); | |||
12860 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0)); | |||
12861 | ||||
12862 | // Return original node to notify the driver to stop replacing. | |||
12863 | return SDValue(AddeNode, 0); | |||
12864 | } | |||
12865 | return SDValue(); | |||
12866 | } | |||
12867 | ||||
12868 | static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG, | |||
12869 | const ARMSubtarget *Subtarget) { | |||
12870 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) | |||
12871 | return SDValue(); | |||
12872 | ||||
12873 | // Check that we have a pair of ADDC and ADDE as operands. | |||
12874 | // Both addends of the ADDE must be zero. | |||
12875 | SDNode* AddcNode = N->getOperand(2).getNode(); | |||
12876 | SDNode* AddeNode = N->getOperand(3).getNode(); | |||
12877 | if ((AddcNode->getOpcode() == ARMISD::ADDC) && | |||
12878 | (AddeNode->getOpcode() == ARMISD::ADDE) && | |||
12879 | isNullConstant(AddeNode->getOperand(0)) && | |||
12880 | isNullConstant(AddeNode->getOperand(1)) && | |||
12881 | (AddeNode->getOperand(2).getNode() == AddcNode)) | |||
12882 | return DAG.getNode(ARMISD::UMAAL, SDLoc(N), | |||
12883 | DAG.getVTList(MVT::i32, MVT::i32), | |||
12884 | {N->getOperand(0), N->getOperand(1), | |||
12885 | AddcNode->getOperand(0), AddcNode->getOperand(1)}); | |||
12886 | else | |||
12887 | return SDValue(); | |||
12888 | } | |||
12889 | ||||
12890 | static SDValue PerformAddcSubcCombine(SDNode *N, | |||
12891 | TargetLowering::DAGCombinerInfo &DCI, | |||
12892 | const ARMSubtarget *Subtarget) { | |||
12893 | SelectionDAG &DAG(DCI.DAG); | |||
12894 | ||||
12895 | if (N->getOpcode() == ARMISD::SUBC && N->hasAnyUseOfValue(1)) { | |||
12896 | // (SUBC (ADDE 0, 0, C), 1) -> C | |||
12897 | SDValue LHS = N->getOperand(0); | |||
12898 | SDValue RHS = N->getOperand(1); | |||
12899 | if (LHS->getOpcode() == ARMISD::ADDE && | |||
12900 | isNullConstant(LHS->getOperand(0)) && | |||
12901 | isNullConstant(LHS->getOperand(1)) && isOneConstant(RHS)) { | |||
12902 | return DCI.CombineTo(N, SDValue(N, 0), LHS->getOperand(2)); | |||
12903 | } | |||
12904 | } | |||
12905 | ||||
12906 | if (Subtarget->isThumb1Only()) { | |||
12907 | SDValue RHS = N->getOperand(1); | |||
12908 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { | |||
12909 | int32_t imm = C->getSExtValue(); | |||
12910 | if (imm < 0 && imm > std::numeric_limits<int>::min()) { | |||
12911 | SDLoc DL(N); | |||
12912 | RHS = DAG.getConstant(-imm, DL, MVT::i32); | |||
12913 | unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC | |||
12914 | : ARMISD::ADDC; | |||
12915 | return DAG.getNode(Opcode, DL, N->getVTList(), N->getOperand(0), RHS); | |||
12916 | } | |||
12917 | } | |||
12918 | } | |||
12919 | ||||
12920 | return SDValue(); | |||
12921 | } | |||
12922 | ||||
12923 | static SDValue PerformAddeSubeCombine(SDNode *N, | |||
12924 | TargetLowering::DAGCombinerInfo &DCI, | |||
12925 | const ARMSubtarget *Subtarget) { | |||
12926 | if (Subtarget->isThumb1Only()) { | |||
12927 | SelectionDAG &DAG = DCI.DAG; | |||
12928 | SDValue RHS = N->getOperand(1); | |||
12929 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { | |||
12930 | int64_t imm = C->getSExtValue(); | |||
12931 | if (imm < 0) { | |||
12932 | SDLoc DL(N); | |||
12933 | ||||
12934 | // The with-carry-in form matches bitwise not instead of the negation. | |||
12935 | // Effectively, the inverse interpretation of the carry flag already | |||
12936 | // accounts for part of the negation. | |||
12937 | RHS = DAG.getConstant(~imm, DL, MVT::i32); | |||
12938 | ||||
12939 | unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE | |||
12940 | : ARMISD::ADDE; | |||
12941 | return DAG.getNode(Opcode, DL, N->getVTList(), | |||
12942 | N->getOperand(0), RHS, N->getOperand(2)); | |||
12943 | } | |||
12944 | } | |||
12945 | } else if (N->getOperand(1)->getOpcode() == ISD::SMUL_LOHI) { | |||
12946 | return AddCombineTo64bitMLAL(N, DCI, Subtarget); | |||
12947 | } | |||
12948 | return SDValue(); | |||
12949 | } | |||
12950 | ||||
12951 | static SDValue PerformSELECTCombine(SDNode *N, | |||
12952 | TargetLowering::DAGCombinerInfo &DCI, | |||
12953 | const ARMSubtarget *Subtarget) { | |||
12954 | if (!Subtarget->hasMVEIntegerOps()) | |||
12955 | return SDValue(); | |||
12956 | ||||
12957 | SDLoc dl(N); | |||
12958 | SDValue SetCC; | |||
12959 | SDValue LHS; | |||
12960 | SDValue RHS; | |||
12961 | ISD::CondCode CC; | |||
12962 | SDValue TrueVal; | |||
12963 | SDValue FalseVal; | |||
12964 | ||||
12965 | if (N->getOpcode() == ISD::SELECT && | |||
12966 | N->getOperand(0)->getOpcode() == ISD::SETCC) { | |||
12967 | SetCC = N->getOperand(0); | |||
12968 | LHS = SetCC->getOperand(0); | |||
12969 | RHS = SetCC->getOperand(1); | |||
12970 | CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get(); | |||
12971 | TrueVal = N->getOperand(1); | |||
12972 | FalseVal = N->getOperand(2); | |||
12973 | } else if (N->getOpcode() == ISD::SELECT_CC) { | |||
12974 | LHS = N->getOperand(0); | |||
12975 | RHS = N->getOperand(1); | |||
12976 | CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); | |||
12977 | TrueVal = N->getOperand(2); | |||
12978 | FalseVal = N->getOperand(3); | |||
12979 | } else { | |||
12980 | return SDValue(); | |||
12981 | } | |||
12982 | ||||
12983 | unsigned int Opcode = 0; | |||
12984 | if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMIN || | |||
12985 | FalseVal->getOpcode() == ISD::VECREDUCE_UMIN) && | |||
12986 | (CC == ISD::SETULT || CC == ISD::SETUGT)) { | |||
12987 | Opcode = ARMISD::VMINVu; | |||
12988 | if (CC == ISD::SETUGT) | |||
12989 | std::swap(TrueVal, FalseVal); | |||
12990 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMIN || | |||
12991 | FalseVal->getOpcode() == ISD::VECREDUCE_SMIN) && | |||
12992 | (CC == ISD::SETLT || CC == ISD::SETGT)) { | |||
12993 | Opcode = ARMISD::VMINVs; | |||
12994 | if (CC == ISD::SETGT) | |||
12995 | std::swap(TrueVal, FalseVal); | |||
12996 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMAX || | |||
12997 | FalseVal->getOpcode() == ISD::VECREDUCE_UMAX) && | |||
12998 | (CC == ISD::SETUGT || CC == ISD::SETULT)) { | |||
12999 | Opcode = ARMISD::VMAXVu; | |||
13000 | if (CC == ISD::SETULT) | |||
13001 | std::swap(TrueVal, FalseVal); | |||
13002 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMAX || | |||
13003 | FalseVal->getOpcode() == ISD::VECREDUCE_SMAX) && | |||
13004 | (CC == ISD::SETGT || CC == ISD::SETLT)) { | |||
13005 | Opcode = ARMISD::VMAXVs; | |||
13006 | if (CC == ISD::SETLT) | |||
13007 | std::swap(TrueVal, FalseVal); | |||
13008 | } else | |||
13009 | return SDValue(); | |||
13010 | ||||
13011 | // Normalise to the right hand side being the vector reduction | |||
13012 | switch (TrueVal->getOpcode()) { | |||
13013 | case ISD::VECREDUCE_UMIN: | |||
13014 | case ISD::VECREDUCE_SMIN: | |||
13015 | case ISD::VECREDUCE_UMAX: | |||
13016 | case ISD::VECREDUCE_SMAX: | |||
13017 | std::swap(LHS, RHS); | |||
13018 | std::swap(TrueVal, FalseVal); | |||
13019 | break; | |||
13020 | } | |||
13021 | ||||
13022 | EVT VectorType = FalseVal->getOperand(0).getValueType(); | |||
13023 | ||||
13024 | if (VectorType != MVT::v16i8 && VectorType != MVT::v8i16 && | |||
13025 | VectorType != MVT::v4i32) | |||
13026 | return SDValue(); | |||
13027 | ||||
13028 | EVT VectorScalarType = VectorType.getVectorElementType(); | |||
13029 | ||||
13030 | // The values being selected must also be the ones being compared | |||
13031 | if (TrueVal != LHS || FalseVal != RHS) | |||
13032 | return SDValue(); | |||
13033 | ||||
13034 | EVT LeftType = LHS->getValueType(0); | |||
13035 | EVT RightType = RHS->getValueType(0); | |||
13036 | ||||
13037 | // The types must match the reduced type too | |||
13038 | if (LeftType != VectorScalarType || RightType != VectorScalarType) | |||
13039 | return SDValue(); | |||
13040 | ||||
13041 | // Legalise the scalar to an i32 | |||
13042 | if (VectorScalarType != MVT::i32) | |||
13043 | LHS = DCI.DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS); | |||
13044 | ||||
13045 | // Generate the reduction as an i32 for legalisation purposes | |||
13046 | auto Reduction = | |||
13047 | DCI.DAG.getNode(Opcode, dl, MVT::i32, LHS, RHS->getOperand(0)); | |||
13048 | ||||
13049 | // The result isn't actually an i32 so truncate it back to its original type | |||
13050 | if (VectorScalarType != MVT::i32) | |||
13051 | Reduction = DCI.DAG.getNode(ISD::TRUNCATE, dl, VectorScalarType, Reduction); | |||
13052 | ||||
13053 | return Reduction; | |||
13054 | } | |||
13055 | ||||
13056 | // A special combine for the vqdmulh family of instructions. This is one of the | |||
13057 | // potential set of patterns that could patch this instruction. The base pattern | |||
13058 | // you would expect to be min(max(ashr(mul(mul(sext(x), 2), sext(y)), 16))). | |||
13059 | // This matches the different min(max(ashr(mul(mul(sext(x), sext(y)), 2), 16))), | |||
13060 | // which llvm will have optimized to min(ashr(mul(sext(x), sext(y)), 15))) as | |||
13061 | // the max is unnecessary. | |||
13062 | static SDValue PerformVQDMULHCombine(SDNode *N, SelectionDAG &DAG) { | |||
13063 | EVT VT = N->getValueType(0); | |||
13064 | SDValue Shft; | |||
13065 | ConstantSDNode *Clamp; | |||
13066 | ||||
13067 | if (!VT.isVector() || VT.getScalarSizeInBits() > 64) | |||
13068 | return SDValue(); | |||
13069 | ||||
13070 | if (N->getOpcode() == ISD::SMIN) { | |||
13071 | Shft = N->getOperand(0); | |||
13072 | Clamp = isConstOrConstSplat(N->getOperand(1)); | |||
13073 | } else if (N->getOpcode() == ISD::VSELECT) { | |||
13074 | // Detect a SMIN, which for an i64 node will be a vselect/setcc, not a smin. | |||
13075 | SDValue Cmp = N->getOperand(0); | |||
13076 | if (Cmp.getOpcode() != ISD::SETCC || | |||
13077 | cast<CondCodeSDNode>(Cmp.getOperand(2))->get() != ISD::SETLT || | |||
13078 | Cmp.getOperand(0) != N->getOperand(1) || | |||
13079 | Cmp.getOperand(1) != N->getOperand(2)) | |||
13080 | return SDValue(); | |||
13081 | Shft = N->getOperand(1); | |||
13082 | Clamp = isConstOrConstSplat(N->getOperand(2)); | |||
13083 | } else | |||
13084 | return SDValue(); | |||
13085 | ||||
13086 | if (!Clamp) | |||
13087 | return SDValue(); | |||
13088 | ||||
13089 | MVT ScalarType; | |||
13090 | int ShftAmt = 0; | |||
13091 | switch (Clamp->getSExtValue()) { | |||
13092 | case (1 << 7) - 1: | |||
13093 | ScalarType = MVT::i8; | |||
13094 | ShftAmt = 7; | |||
13095 | break; | |||
13096 | case (1 << 15) - 1: | |||
13097 | ScalarType = MVT::i16; | |||
13098 | ShftAmt = 15; | |||
13099 | break; | |||
13100 | case (1ULL << 31) - 1: | |||
13101 | ScalarType = MVT::i32; | |||
13102 | ShftAmt = 31; | |||
13103 | break; | |||
13104 | default: | |||
13105 | return SDValue(); | |||
13106 | } | |||
13107 | ||||
13108 | if (Shft.getOpcode() != ISD::SRA) | |||
13109 | return SDValue(); | |||
13110 | ConstantSDNode *N1 = isConstOrConstSplat(Shft.getOperand(1)); | |||
13111 | if (!N1 || N1->getSExtValue() != ShftAmt) | |||
13112 | return SDValue(); | |||
13113 | ||||
13114 | SDValue Mul = Shft.getOperand(0); | |||
13115 | if (Mul.getOpcode() != ISD::MUL) | |||
13116 | return SDValue(); | |||
13117 | ||||
13118 | SDValue Ext0 = Mul.getOperand(0); | |||
13119 | SDValue Ext1 = Mul.getOperand(1); | |||
13120 | if (Ext0.getOpcode() != ISD::SIGN_EXTEND || | |||
13121 | Ext1.getOpcode() != ISD::SIGN_EXTEND) | |||
13122 | return SDValue(); | |||
13123 | EVT VecVT = Ext0.getOperand(0).getValueType(); | |||
13124 | if (!VecVT.isPow2VectorType() || VecVT.getVectorNumElements() == 1) | |||
13125 | return SDValue(); | |||
13126 | if (Ext1.getOperand(0).getValueType() != VecVT || | |||
13127 | VecVT.getScalarType() != ScalarType || | |||
13128 | VT.getScalarSizeInBits() < ScalarType.getScalarSizeInBits() * 2) | |||
13129 | return SDValue(); | |||
13130 | ||||
13131 | SDLoc DL(Mul); | |||
13132 | unsigned LegalLanes = 128 / (ShftAmt + 1); | |||
13133 | EVT LegalVecVT = MVT::getVectorVT(ScalarType, LegalLanes); | |||
13134 | // For types smaller than legal vectors extend to be legal and only use needed | |||
13135 | // lanes. | |||
13136 | if (VecVT.getSizeInBits() < 128) { | |||
13137 | EVT ExtVecVT = | |||
13138 | MVT::getVectorVT(MVT::getIntegerVT(128 / VecVT.getVectorNumElements()), | |||
13139 | VecVT.getVectorNumElements()); | |||
13140 | SDValue Inp0 = | |||
13141 | DAG.getNode(ISD::ANY_EXTEND, DL, ExtVecVT, Ext0.getOperand(0)); | |||
13142 | SDValue Inp1 = | |||
13143 | DAG.getNode(ISD::ANY_EXTEND, DL, ExtVecVT, Ext1.getOperand(0)); | |||
13144 | Inp0 = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, LegalVecVT, Inp0); | |||
13145 | Inp1 = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, LegalVecVT, Inp1); | |||
13146 | SDValue VQDMULH = DAG.getNode(ARMISD::VQDMULH, DL, LegalVecVT, Inp0, Inp1); | |||
13147 | SDValue Trunc = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, ExtVecVT, VQDMULH); | |||
13148 | Trunc = DAG.getNode(ISD::TRUNCATE, DL, VecVT, Trunc); | |||
13149 | return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Trunc); | |||
13150 | } | |||
13151 | ||||
13152 | // For larger types, split into legal sized chunks. | |||
13153 | assert(VecVT.getSizeInBits() % 128 == 0 && "Expected a power2 type")(static_cast <bool> (VecVT.getSizeInBits() % 128 == 0 && "Expected a power2 type") ? void (0) : __assert_fail ("VecVT.getSizeInBits() % 128 == 0 && \"Expected a power2 type\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 13153, __extension__ __PRETTY_FUNCTION__)); | |||
13154 | unsigned NumParts = VecVT.getSizeInBits() / 128; | |||
13155 | SmallVector<SDValue> Parts; | |||
13156 | for (unsigned I = 0; I < NumParts; ++I) { | |||
13157 | SDValue Inp0 = | |||
13158 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LegalVecVT, Ext0.getOperand(0), | |||
13159 | DAG.getVectorIdxConstant(I * LegalLanes, DL)); | |||
13160 | SDValue Inp1 = | |||
13161 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LegalVecVT, Ext1.getOperand(0), | |||
13162 | DAG.getVectorIdxConstant(I * LegalLanes, DL)); | |||
13163 | SDValue VQDMULH = DAG.getNode(ARMISD::VQDMULH, DL, LegalVecVT, Inp0, Inp1); | |||
13164 | Parts.push_back(VQDMULH); | |||
13165 | } | |||
13166 | return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, | |||
13167 | DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT, Parts)); | |||
13168 | } | |||
13169 | ||||
13170 | static SDValue PerformVSELECTCombine(SDNode *N, | |||
13171 | TargetLowering::DAGCombinerInfo &DCI, | |||
13172 | const ARMSubtarget *Subtarget) { | |||
13173 | if (!Subtarget->hasMVEIntegerOps()) | |||
13174 | return SDValue(); | |||
13175 | ||||
13176 | if (SDValue V = PerformVQDMULHCombine(N, DCI.DAG)) | |||
13177 | return V; | |||
13178 | ||||
13179 | // Transforms vselect(not(cond), lhs, rhs) into vselect(cond, rhs, lhs). | |||
13180 | // | |||
13181 | // We need to re-implement this optimization here as the implementation in the | |||
13182 | // Target-Independent DAGCombiner does not handle the kind of constant we make | |||
13183 | // (it calls isConstOrConstSplat with AllowTruncation set to false - and for | |||
13184 | // good reason, allowing truncation there would break other targets). | |||
13185 | // | |||
13186 | // Currently, this is only done for MVE, as it's the only target that benefits | |||
13187 | // from this transformation (e.g. VPNOT+VPSEL becomes a single VPSEL). | |||
13188 | if (N->getOperand(0).getOpcode() != ISD::XOR) | |||
13189 | return SDValue(); | |||
13190 | SDValue XOR = N->getOperand(0); | |||
13191 | ||||
13192 | // Check if the XOR's RHS is either a 1, or a BUILD_VECTOR of 1s. | |||
13193 | // It is important to check with truncation allowed as the BUILD_VECTORs we | |||
13194 | // generate in those situations will truncate their operands. | |||
13195 | ConstantSDNode *Const = | |||
13196 | isConstOrConstSplat(XOR->getOperand(1), /*AllowUndefs*/ false, | |||
13197 | /*AllowTruncation*/ true); | |||
13198 | if (!Const || !Const->isOne()) | |||
13199 | return SDValue(); | |||
13200 | ||||
13201 | // Rewrite into vselect(cond, rhs, lhs). | |||
13202 | SDValue Cond = XOR->getOperand(0); | |||
13203 | SDValue LHS = N->getOperand(1); | |||
13204 | SDValue RHS = N->getOperand(2); | |||
13205 | EVT Type = N->getValueType(0); | |||
13206 | return DCI.DAG.getNode(ISD::VSELECT, SDLoc(N), Type, Cond, RHS, LHS); | |||
13207 | } | |||
13208 | ||||
13209 | // Convert vsetcc([0,1,2,..], splat(n), ult) -> vctp n | |||
13210 | static SDValue PerformVSetCCToVCTPCombine(SDNode *N, | |||
13211 | TargetLowering::DAGCombinerInfo &DCI, | |||
13212 | const ARMSubtarget *Subtarget) { | |||
13213 | SDValue Op0 = N->getOperand(0); | |||
13214 | SDValue Op1 = N->getOperand(1); | |||
13215 | ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); | |||
13216 | EVT VT = N->getValueType(0); | |||
13217 | ||||
13218 | if (!Subtarget->hasMVEIntegerOps() || | |||
13219 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) | |||
13220 | return SDValue(); | |||
13221 | ||||
13222 | if (CC == ISD::SETUGE) { | |||
13223 | std::swap(Op0, Op1); | |||
13224 | CC = ISD::SETULT; | |||
13225 | } | |||
13226 | ||||
13227 | if (CC != ISD::SETULT || VT.getScalarSizeInBits() != 1 || | |||
13228 | Op0.getOpcode() != ISD::BUILD_VECTOR) | |||
13229 | return SDValue(); | |||
13230 | ||||
13231 | // Check first operand is BuildVector of 0,1,2,... | |||
13232 | for (unsigned I = 0; I < VT.getVectorNumElements(); I++) { | |||
13233 | if (!Op0.getOperand(I).isUndef() && | |||
13234 | !(isa<ConstantSDNode>(Op0.getOperand(I)) && | |||
13235 | Op0.getConstantOperandVal(I) == I)) | |||
13236 | return SDValue(); | |||
13237 | } | |||
13238 | ||||
13239 | // The second is a Splat of Op1S | |||
13240 | SDValue Op1S = DCI.DAG.getSplatValue(Op1); | |||
13241 | if (!Op1S) | |||
13242 | return SDValue(); | |||
13243 | ||||
13244 | unsigned Opc; | |||
13245 | switch (VT.getVectorNumElements()) { | |||
13246 | case 2: | |||
13247 | Opc = Intrinsic::arm_mve_vctp64; | |||
13248 | break; | |||
13249 | case 4: | |||
13250 | Opc = Intrinsic::arm_mve_vctp32; | |||
13251 | break; | |||
13252 | case 8: | |||
13253 | Opc = Intrinsic::arm_mve_vctp16; | |||
13254 | break; | |||
13255 | case 16: | |||
13256 | Opc = Intrinsic::arm_mve_vctp8; | |||
13257 | break; | |||
13258 | default: | |||
13259 | return SDValue(); | |||
13260 | } | |||
13261 | ||||
13262 | SDLoc DL(N); | |||
13263 | return DCI.DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, | |||
13264 | DCI.DAG.getConstant(Opc, DL, MVT::i32), | |||
13265 | DCI.DAG.getZExtOrTrunc(Op1S, DL, MVT::i32)); | |||
13266 | } | |||
13267 | ||||
13268 | static SDValue PerformABSCombine(SDNode *N, | |||
13269 | TargetLowering::DAGCombinerInfo &DCI, | |||
13270 | const ARMSubtarget *Subtarget) { | |||
13271 | SelectionDAG &DAG = DCI.DAG; | |||
13272 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
13273 | ||||
13274 | if (TLI.isOperationLegal(N->getOpcode(), N->getValueType(0))) | |||
13275 | return SDValue(); | |||
13276 | ||||
13277 | return TLI.expandABS(N, DAG); | |||
13278 | } | |||
13279 | ||||
13280 | /// PerformADDECombine - Target-specific dag combine transform from | |||
13281 | /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or | |||
13282 | /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL | |||
13283 | static SDValue PerformADDECombine(SDNode *N, | |||
13284 | TargetLowering::DAGCombinerInfo &DCI, | |||
13285 | const ARMSubtarget *Subtarget) { | |||
13286 | // Only ARM and Thumb2 support UMLAL/SMLAL. | |||
13287 | if (Subtarget->isThumb1Only()) | |||
13288 | return PerformAddeSubeCombine(N, DCI, Subtarget); | |||
13289 | ||||
13290 | // Only perform the checks after legalize when the pattern is available. | |||
13291 | if (DCI.isBeforeLegalize()) return SDValue(); | |||
13292 | ||||
13293 | return AddCombineTo64bitUMAAL(N, DCI, Subtarget); | |||
13294 | } | |||
13295 | ||||
13296 | /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with | |||
13297 | /// operands N0 and N1. This is a helper for PerformADDCombine that is | |||
13298 | /// called with the default operands, and if that fails, with commuted | |||
13299 | /// operands. | |||
13300 | static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, | |||
13301 | TargetLowering::DAGCombinerInfo &DCI, | |||
13302 | const ARMSubtarget *Subtarget){ | |||
13303 | // Attempt to create vpadd for this add. | |||
13304 | if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget)) | |||
13305 | return Result; | |||
13306 | ||||
13307 | // Attempt to create vpaddl for this add. | |||
13308 | if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget)) | |||
13309 | return Result; | |||
13310 | if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI, | |||
13311 | Subtarget)) | |||
13312 | return Result; | |||
13313 | ||||
13314 | // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) | |||
13315 | if (N0.getNode()->hasOneUse()) | |||
13316 | if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI)) | |||
13317 | return Result; | |||
13318 | return SDValue(); | |||
13319 | } | |||
13320 | ||||
13321 | static SDValue TryDistrubutionADDVecReduce(SDNode *N, SelectionDAG &DAG) { | |||
13322 | EVT VT = N->getValueType(0); | |||
13323 | SDValue N0 = N->getOperand(0); | |||
13324 | SDValue N1 = N->getOperand(1); | |||
13325 | SDLoc dl(N); | |||
13326 | ||||
13327 | auto IsVecReduce = [](SDValue Op) { | |||
13328 | switch (Op.getOpcode()) { | |||
13329 | case ISD::VECREDUCE_ADD: | |||
13330 | case ARMISD::VADDVs: | |||
13331 | case ARMISD::VADDVu: | |||
13332 | case ARMISD::VMLAVs: | |||
13333 | case ARMISD::VMLAVu: | |||
13334 | return true; | |||
13335 | } | |||
13336 | return false; | |||
13337 | }; | |||
13338 | ||||
13339 | auto DistrubuteAddAddVecReduce = [&](SDValue N0, SDValue N1) { | |||
13340 | // Distribute add(X, add(vecreduce(Y), vecreduce(Z))) -> | |||
13341 | // add(add(X, vecreduce(Y)), vecreduce(Z)) | |||
13342 | // to make better use of vaddva style instructions. | |||
13343 | if (VT == MVT::i32 && N1.getOpcode() == ISD::ADD && !IsVecReduce(N0) && | |||
13344 | IsVecReduce(N1.getOperand(0)) && IsVecReduce(N1.getOperand(1)) && | |||
13345 | !isa<ConstantSDNode>(N0)) { | |||
13346 | SDValue Add0 = DAG.getNode(ISD::ADD, dl, VT, N0, N1.getOperand(0)); | |||
13347 | return DAG.getNode(ISD::ADD, dl, VT, Add0, N1.getOperand(1)); | |||
13348 | } | |||
13349 | // And turn add(add(A, reduce(B)), add(C, reduce(D))) -> | |||
13350 | // add(add(add(A, C), reduce(B)), reduce(D)) | |||
13351 | if (VT == MVT::i32 && N0.getOpcode() == ISD::ADD && | |||
13352 | N1.getOpcode() == ISD::ADD) { | |||
13353 | unsigned N0RedOp = 0; | |||
13354 | if (!IsVecReduce(N0.getOperand(N0RedOp))) { | |||
13355 | N0RedOp = 1; | |||
13356 | if (!IsVecReduce(N0.getOperand(N0RedOp))) | |||
13357 | return SDValue(); | |||
13358 | } | |||
13359 | ||||
13360 | unsigned N1RedOp = 0; | |||
13361 | if (!IsVecReduce(N1.getOperand(N1RedOp))) | |||
13362 | N1RedOp = 1; | |||
13363 | if (!IsVecReduce(N1.getOperand(N1RedOp))) | |||
13364 | return SDValue(); | |||
13365 | ||||
13366 | SDValue Add0 = DAG.getNode(ISD::ADD, dl, VT, N0.getOperand(1 - N0RedOp), | |||
13367 | N1.getOperand(1 - N1RedOp)); | |||
13368 | SDValue Add1 = | |||
13369 | DAG.getNode(ISD::ADD, dl, VT, Add0, N0.getOperand(N0RedOp)); | |||
13370 | return DAG.getNode(ISD::ADD, dl, VT, Add1, N1.getOperand(N1RedOp)); | |||
13371 | } | |||
13372 | return SDValue(); | |||
13373 | }; | |||
13374 | if (SDValue R = DistrubuteAddAddVecReduce(N0, N1)) | |||
13375 | return R; | |||
13376 | if (SDValue R = DistrubuteAddAddVecReduce(N1, N0)) | |||
13377 | return R; | |||
13378 | ||||
13379 | // Distribute add(vecreduce(load(Y)), vecreduce(load(Z))) | |||
13380 | // Or add(add(X, vecreduce(load(Y))), vecreduce(load(Z))) | |||
13381 | // by ascending load offsets. This can help cores prefetch if the order of | |||
13382 | // loads is more predictable. | |||
13383 | auto DistrubuteVecReduceLoad = [&](SDValue N0, SDValue N1, bool IsForward) { | |||
13384 | // Check if two reductions are known to load data where one is before/after | |||
13385 | // another. Return negative if N0 loads data before N1, positive if N1 is | |||
13386 | // before N0 and 0 otherwise if nothing is known. | |||
13387 | auto IsKnownOrderedLoad = [&](SDValue N0, SDValue N1) { | |||
13388 | // Look through to the first operand of a MUL, for the VMLA case. | |||
13389 | // Currently only looks at the first operand, in the hope they are equal. | |||
13390 | if (N0.getOpcode() == ISD::MUL) | |||
13391 | N0 = N0.getOperand(0); | |||
13392 | if (N1.getOpcode() == ISD::MUL) | |||
13393 | N1 = N1.getOperand(0); | |||
13394 | ||||
13395 | // Return true if the two operands are loads to the same object and the | |||
13396 | // offset of the first is known to be less than the offset of the second. | |||
13397 | LoadSDNode *Load0 = dyn_cast<LoadSDNode>(N0); | |||
13398 | LoadSDNode *Load1 = dyn_cast<LoadSDNode>(N1); | |||
13399 | if (!Load0 || !Load1 || Load0->getChain() != Load1->getChain() || | |||
13400 | !Load0->isSimple() || !Load1->isSimple() || Load0->isIndexed() || | |||
13401 | Load1->isIndexed()) | |||
13402 | return 0; | |||
13403 | ||||
13404 | auto BaseLocDecomp0 = BaseIndexOffset::match(Load0, DAG); | |||
13405 | auto BaseLocDecomp1 = BaseIndexOffset::match(Load1, DAG); | |||
13406 | ||||
13407 | if (!BaseLocDecomp0.getBase() || | |||
13408 | BaseLocDecomp0.getBase() != BaseLocDecomp1.getBase() || | |||
13409 | !BaseLocDecomp0.hasValidOffset() || !BaseLocDecomp1.hasValidOffset()) | |||
13410 | return 0; | |||
13411 | if (BaseLocDecomp0.getOffset() < BaseLocDecomp1.getOffset()) | |||
13412 | return -1; | |||
13413 | if (BaseLocDecomp0.getOffset() > BaseLocDecomp1.getOffset()) | |||
13414 | return 1; | |||
13415 | return 0; | |||
13416 | }; | |||
13417 | ||||
13418 | SDValue X; | |||
13419 | if (N0.getOpcode() == ISD::ADD) { | |||
13420 | if (IsVecReduce(N0.getOperand(0)) && IsVecReduce(N0.getOperand(1))) { | |||
13421 | int IsBefore = IsKnownOrderedLoad(N0.getOperand(0).getOperand(0), | |||
13422 | N0.getOperand(1).getOperand(0)); | |||
13423 | if (IsBefore < 0) { | |||
13424 | X = N0.getOperand(0); | |||
13425 | N0 = N0.getOperand(1); | |||
13426 | } else if (IsBefore > 0) { | |||
13427 | X = N0.getOperand(1); | |||
13428 | N0 = N0.getOperand(0); | |||
13429 | } else | |||
13430 | return SDValue(); | |||
13431 | } else if (IsVecReduce(N0.getOperand(0))) { | |||
13432 | X = N0.getOperand(1); | |||
13433 | N0 = N0.getOperand(0); | |||
13434 | } else if (IsVecReduce(N0.getOperand(1))) { | |||
13435 | X = N0.getOperand(0); | |||
13436 | N0 = N0.getOperand(1); | |||
13437 | } else | |||
13438 | return SDValue(); | |||
13439 | } else if (IsForward && IsVecReduce(N0) && IsVecReduce(N1) && | |||
13440 | IsKnownOrderedLoad(N0.getOperand(0), N1.getOperand(0)) < 0) { | |||
13441 | // Note this is backward to how you would expect. We create | |||
13442 | // add(reduce(load + 16), reduce(load + 0)) so that the | |||
13443 | // add(reduce(load+16), X) is combined into VADDVA(X, load+16)), leaving | |||
13444 | // the X as VADDV(load + 0) | |||
13445 | return DAG.getNode(ISD::ADD, dl, VT, N1, N0); | |||
13446 | } else | |||
13447 | return SDValue(); | |||
13448 | ||||
13449 | if (!IsVecReduce(N0) || !IsVecReduce(N1)) | |||
13450 | return SDValue(); | |||
13451 | ||||
13452 | if (IsKnownOrderedLoad(N1.getOperand(0), N0.getOperand(0)) >= 0) | |||
13453 | return SDValue(); | |||
13454 | ||||
13455 | // Switch from add(add(X, N0), N1) to add(add(X, N1), N0) | |||
13456 | SDValue Add0 = DAG.getNode(ISD::ADD, dl, VT, X, N1); | |||
13457 | return DAG.getNode(ISD::ADD, dl, VT, Add0, N0); | |||
13458 | }; | |||
13459 | if (SDValue R = DistrubuteVecReduceLoad(N0, N1, true)) | |||
13460 | return R; | |||
13461 | if (SDValue R = DistrubuteVecReduceLoad(N1, N0, false)) | |||
13462 | return R; | |||
13463 | return SDValue(); | |||
13464 | } | |||
13465 | ||||
13466 | static SDValue PerformADDVecReduce(SDNode *N, SelectionDAG &DAG, | |||
13467 | const ARMSubtarget *Subtarget) { | |||
13468 | if (!Subtarget->hasMVEIntegerOps()) | |||
13469 | return SDValue(); | |||
13470 | ||||
13471 | if (SDValue R = TryDistrubutionADDVecReduce(N, DAG)) | |||
13472 | return R; | |||
13473 | ||||
13474 | EVT VT = N->getValueType(0); | |||
13475 | SDValue N0 = N->getOperand(0); | |||
13476 | SDValue N1 = N->getOperand(1); | |||
13477 | SDLoc dl(N); | |||
13478 | ||||
13479 | if (VT != MVT::i64) | |||
13480 | return SDValue(); | |||
13481 | ||||
13482 | // We are looking for a i64 add of a VADDLVx. Due to these being i64's, this | |||
13483 | // will look like: | |||
13484 | // t1: i32,i32 = ARMISD::VADDLVs x | |||
13485 | // t2: i64 = build_pair t1, t1:1 | |||
13486 | // t3: i64 = add t2, y | |||
13487 | // Otherwise we try to push the add up above VADDLVAx, to potentially allow | |||
13488 | // the add to be simplified seperately. | |||
13489 | // We also need to check for sext / zext and commutitive adds. | |||
13490 | auto MakeVecReduce = [&](unsigned Opcode, unsigned OpcodeA, SDValue NA, | |||
13491 | SDValue NB) { | |||
13492 | if (NB->getOpcode() != ISD::BUILD_PAIR) | |||
13493 | return SDValue(); | |||
13494 | SDValue VecRed = NB->getOperand(0); | |||
13495 | if ((VecRed->getOpcode() != Opcode && VecRed->getOpcode() != OpcodeA) || | |||
13496 | VecRed.getResNo() != 0 || | |||
13497 | NB->getOperand(1) != SDValue(VecRed.getNode(), 1)) | |||
13498 | return SDValue(); | |||
13499 | ||||
13500 | if (VecRed->getOpcode() == OpcodeA) { | |||
13501 | // add(NA, VADDLVA(Inp), Y) -> VADDLVA(add(NA, Inp), Y) | |||
13502 | SDValue Inp = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, | |||
13503 | VecRed.getOperand(0), VecRed.getOperand(1)); | |||
13504 | NA = DAG.getNode(ISD::ADD, dl, MVT::i64, Inp, NA); | |||
13505 | } | |||
13506 | ||||
13507 | SmallVector<SDValue, 4> Ops; | |||
13508 | Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, NA, | |||
13509 | DAG.getConstant(0, dl, MVT::i32))); | |||
13510 | Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, NA, | |||
13511 | DAG.getConstant(1, dl, MVT::i32))); | |||
13512 | unsigned S = VecRed->getOpcode() == OpcodeA ? 2 : 0; | |||
13513 | for (unsigned I = S, E = VecRed.getNumOperands(); I < E; I++) | |||
13514 | Ops.push_back(VecRed->getOperand(I)); | |||
13515 | SDValue Red = | |||
13516 | DAG.getNode(OpcodeA, dl, DAG.getVTList({MVT::i32, MVT::i32}), Ops); | |||
13517 | return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Red, | |||
13518 | SDValue(Red.getNode(), 1)); | |||
13519 | }; | |||
13520 | ||||
13521 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N0, N1)) | |||
13522 | return M; | |||
13523 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N0, N1)) | |||
13524 | return M; | |||
13525 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N1, N0)) | |||
13526 | return M; | |||
13527 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N1, N0)) | |||
13528 | return M; | |||
13529 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N0, N1)) | |||
13530 | return M; | |||
13531 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N0, N1)) | |||
13532 | return M; | |||
13533 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N1, N0)) | |||
13534 | return M; | |||
13535 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N1, N0)) | |||
13536 | return M; | |||
13537 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N0, N1)) | |||
13538 | return M; | |||
13539 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N0, N1)) | |||
13540 | return M; | |||
13541 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N1, N0)) | |||
13542 | return M; | |||
13543 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N1, N0)) | |||
13544 | return M; | |||
13545 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N0, N1)) | |||
13546 | return M; | |||
13547 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N0, N1)) | |||
13548 | return M; | |||
13549 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N1, N0)) | |||
13550 | return M; | |||
13551 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N1, N0)) | |||
13552 | return M; | |||
13553 | return SDValue(); | |||
13554 | } | |||
13555 | ||||
13556 | bool | |||
13557 | ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N, | |||
13558 | CombineLevel Level) const { | |||
13559 | if (Level == BeforeLegalizeTypes) | |||
13560 | return true; | |||
13561 | ||||
13562 | if (N->getOpcode() != ISD::SHL) | |||
13563 | return true; | |||
13564 | ||||
13565 | if (Subtarget->isThumb1Only()) { | |||
13566 | // Avoid making expensive immediates by commuting shifts. (This logic | |||
13567 | // only applies to Thumb1 because ARM and Thumb2 immediates can be shifted | |||
13568 | // for free.) | |||
13569 | if (N->getOpcode() != ISD::SHL) | |||
13570 | return true; | |||
13571 | SDValue N1 = N->getOperand(0); | |||
13572 | if (N1->getOpcode() != ISD::ADD && N1->getOpcode() != ISD::AND && | |||
13573 | N1->getOpcode() != ISD::OR && N1->getOpcode() != ISD::XOR) | |||
13574 | return true; | |||
13575 | if (auto *Const = dyn_cast<ConstantSDNode>(N1->getOperand(1))) { | |||
13576 | if (Const->getAPIntValue().ult(256)) | |||
13577 | return false; | |||
13578 | if (N1->getOpcode() == ISD::ADD && Const->getAPIntValue().slt(0) && | |||
13579 | Const->getAPIntValue().sgt(-256)) | |||
13580 | return false; | |||
13581 | } | |||
13582 | return true; | |||
13583 | } | |||
13584 | ||||
13585 | // Turn off commute-with-shift transform after legalization, so it doesn't | |||
13586 | // conflict with PerformSHLSimplify. (We could try to detect when | |||
13587 | // PerformSHLSimplify would trigger more precisely, but it isn't | |||
13588 | // really necessary.) | |||
13589 | return false; | |||
13590 | } | |||
13591 | ||||
13592 | bool ARMTargetLowering::shouldFoldConstantShiftPairToMask( | |||
13593 | const SDNode *N, CombineLevel Level) const { | |||
13594 | if (!Subtarget->isThumb1Only()) | |||
13595 | return true; | |||
13596 | ||||
13597 | if (Level == BeforeLegalizeTypes) | |||
13598 | return true; | |||
13599 | ||||
13600 | return false; | |||
13601 | } | |||
13602 | ||||
13603 | bool ARMTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { | |||
13604 | if (!Subtarget->hasNEON()) { | |||
13605 | if (Subtarget->isThumb1Only()) | |||
13606 | return VT.getScalarSizeInBits() <= 32; | |||
13607 | return true; | |||
13608 | } | |||
13609 | return VT.isScalarInteger(); | |||
13610 | } | |||
13611 | ||||
13612 | bool ARMTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT, | |||
13613 | EVT VT) const { | |||
13614 | if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple()) | |||
13615 | return false; | |||
13616 | ||||
13617 | switch (FPVT.getSimpleVT().SimpleTy) { | |||
13618 | case MVT::f16: | |||
13619 | return Subtarget->hasVFP2Base(); | |||
13620 | case MVT::f32: | |||
13621 | return Subtarget->hasVFP2Base(); | |||
13622 | case MVT::f64: | |||
13623 | return Subtarget->hasFP64(); | |||
13624 | case MVT::v4f32: | |||
13625 | case MVT::v8f16: | |||
13626 | return Subtarget->hasMVEFloatOps(); | |||
13627 | default: | |||
13628 | return false; | |||
13629 | } | |||
13630 | } | |||
13631 | ||||
13632 | static SDValue PerformSHLSimplify(SDNode *N, | |||
13633 | TargetLowering::DAGCombinerInfo &DCI, | |||
13634 | const ARMSubtarget *ST) { | |||
13635 | // Allow the generic combiner to identify potential bswaps. | |||
13636 | if (DCI.isBeforeLegalize()) | |||
13637 | return SDValue(); | |||
13638 | ||||
13639 | // DAG combiner will fold: | |||
13640 | // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) | |||
13641 | // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2 | |||
13642 | // Other code patterns that can be also be modified have the following form: | |||
13643 | // b + ((a << 1) | 510) | |||
13644 | // b + ((a << 1) & 510) | |||
13645 | // b + ((a << 1) ^ 510) | |||
13646 | // b + ((a << 1) + 510) | |||
13647 | ||||
13648 | // Many instructions can perform the shift for free, but it requires both | |||
13649 | // the operands to be registers. If c1 << c2 is too large, a mov immediate | |||
13650 | // instruction will needed. So, unfold back to the original pattern if: | |||
13651 | // - if c1 and c2 are small enough that they don't require mov imms. | |||
13652 | // - the user(s) of the node can perform an shl | |||
13653 | ||||
13654 | // No shifted operands for 16-bit instructions. | |||
13655 | if (ST->isThumb() && ST->isThumb1Only()) | |||
13656 | return SDValue(); | |||
13657 | ||||
13658 | // Check that all the users could perform the shl themselves. | |||
13659 | for (auto U : N->uses()) { | |||
13660 | switch(U->getOpcode()) { | |||
13661 | default: | |||
13662 | return SDValue(); | |||
13663 | case ISD::SUB: | |||
13664 | case ISD::ADD: | |||
13665 | case ISD::AND: | |||
13666 | case ISD::OR: | |||
13667 | case ISD::XOR: | |||
13668 | case ISD::SETCC: | |||
13669 | case ARMISD::CMP: | |||
13670 | // Check that the user isn't already using a constant because there | |||
13671 | // aren't any instructions that support an immediate operand and a | |||
13672 | // shifted operand. | |||
13673 | if (isa<ConstantSDNode>(U->getOperand(0)) || | |||
13674 | isa<ConstantSDNode>(U->getOperand(1))) | |||
13675 | return SDValue(); | |||
13676 | ||||
13677 | // Check that it's not already using a shift. | |||
13678 | if (U->getOperand(0).getOpcode() == ISD::SHL || | |||
13679 | U->getOperand(1).getOpcode() == ISD::SHL) | |||
13680 | return SDValue(); | |||
13681 | break; | |||
13682 | } | |||
13683 | } | |||
13684 | ||||
13685 | if (N->getOpcode() != ISD::ADD && N->getOpcode() != ISD::OR && | |||
13686 | N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND) | |||
13687 | return SDValue(); | |||
13688 | ||||
13689 | if (N->getOperand(0).getOpcode() != ISD::SHL) | |||
13690 | return SDValue(); | |||
13691 | ||||
13692 | SDValue SHL = N->getOperand(0); | |||
13693 | ||||
13694 | auto *C1ShlC2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); | |||
13695 | auto *C2 = dyn_cast<ConstantSDNode>(SHL.getOperand(1)); | |||
13696 | if (!C1ShlC2 || !C2) | |||
13697 | return SDValue(); | |||
13698 | ||||
13699 | APInt C2Int = C2->getAPIntValue(); | |||
13700 | APInt C1Int = C1ShlC2->getAPIntValue(); | |||
13701 | ||||
13702 | // Check that performing a lshr will not lose any information. | |||
13703 | APInt Mask = APInt::getHighBitsSet(C2Int.getBitWidth(), | |||
13704 | C2Int.getBitWidth() - C2->getZExtValue()); | |||
13705 | if ((C1Int & Mask) != C1Int) | |||
13706 | return SDValue(); | |||
13707 | ||||
13708 | // Shift the first constant. | |||
13709 | C1Int.lshrInPlace(C2Int); | |||
13710 | ||||
13711 | // The immediates are encoded as an 8-bit value that can be rotated. | |||
13712 | auto LargeImm = [](const APInt &Imm) { | |||
13713 | unsigned Zeros = Imm.countLeadingZeros() + Imm.countTrailingZeros(); | |||
13714 | return Imm.getBitWidth() - Zeros > 8; | |||
13715 | }; | |||
13716 | ||||
13717 | if (LargeImm(C1Int) || LargeImm(C2Int)) | |||
13718 | return SDValue(); | |||
13719 | ||||
13720 | SelectionDAG &DAG = DCI.DAG; | |||
13721 | SDLoc dl(N); | |||
13722 | SDValue X = SHL.getOperand(0); | |||
13723 | SDValue BinOp = DAG.getNode(N->getOpcode(), dl, MVT::i32, X, | |||
13724 | DAG.getConstant(C1Int, dl, MVT::i32)); | |||
13725 | // Shift left to compensate for the lshr of C1Int. | |||
13726 | SDValue Res = DAG.getNode(ISD::SHL, dl, MVT::i32, BinOp, SHL.getOperand(1)); | |||
13727 | ||||
13728 | LLVM_DEBUG(dbgs() << "Simplify shl use:\n"; SHL.getOperand(0).dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("arm-isel")) { dbgs() << "Simplify shl use:\n"; SHL.getOperand (0).dump(); SHL.dump(); N->dump(); } } while (false) | |||
13729 | SHL.dump(); N->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("arm-isel")) { dbgs() << "Simplify shl use:\n"; SHL.getOperand (0).dump(); SHL.dump(); N->dump(); } } while (false); | |||
13730 | LLVM_DEBUG(dbgs() << "Into:\n"; X.dump(); BinOp.dump(); Res.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("arm-isel")) { dbgs() << "Into:\n"; X.dump(); BinOp.dump (); Res.dump(); } } while (false); | |||
13731 | return Res; | |||
13732 | } | |||
13733 | ||||
13734 | ||||
13735 | /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. | |||
13736 | /// | |||
13737 | static SDValue PerformADDCombine(SDNode *N, | |||
13738 | TargetLowering::DAGCombinerInfo &DCI, | |||
13739 | const ARMSubtarget *Subtarget) { | |||
13740 | SDValue N0 = N->getOperand(0); | |||
13741 | SDValue N1 = N->getOperand(1); | |||
13742 | ||||
13743 | // Only works one way, because it needs an immediate operand. | |||
13744 | if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) | |||
13745 | return Result; | |||
13746 | ||||
13747 | if (SDValue Result = PerformADDVecReduce(N, DCI.DAG, Subtarget)) | |||
13748 | return Result; | |||
13749 | ||||
13750 | // First try with the default operand order. | |||
13751 | if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget)) | |||
13752 | return Result; | |||
13753 | ||||
13754 | // If that didn't work, try again with the operands commuted. | |||
13755 | return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); | |||
13756 | } | |||
13757 | ||||
13758 | // Combine (sub 0, (csinc X, Y, CC)) -> (csinv -X, Y, CC) | |||
13759 | // providing -X is as cheap as X (currently, just a constant). | |||
13760 | static SDValue PerformSubCSINCCombine(SDNode *N, SelectionDAG &DAG) { | |||
13761 | if (N->getValueType(0) != MVT::i32 || !isNullConstant(N->getOperand(0))) | |||
13762 | return SDValue(); | |||
13763 | SDValue CSINC = N->getOperand(1); | |||
13764 | if (CSINC.getOpcode() != ARMISD::CSINC || !CSINC.hasOneUse()) | |||
13765 | return SDValue(); | |||
13766 | ||||
13767 | ConstantSDNode *X = dyn_cast<ConstantSDNode>(CSINC.getOperand(0)); | |||
13768 | if (!X) | |||
13769 | return SDValue(); | |||
13770 | ||||
13771 | return DAG.getNode(ARMISD::CSINV, SDLoc(N), MVT::i32, | |||
13772 | DAG.getNode(ISD::SUB, SDLoc(N), MVT::i32, N->getOperand(0), | |||
13773 | CSINC.getOperand(0)), | |||
13774 | CSINC.getOperand(1), CSINC.getOperand(2), | |||
13775 | CSINC.getOperand(3)); | |||
13776 | } | |||
13777 | ||||
13778 | /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. | |||
13779 | /// | |||
13780 | static SDValue PerformSUBCombine(SDNode *N, | |||
13781 | TargetLowering::DAGCombinerInfo &DCI, | |||
13782 | const ARMSubtarget *Subtarget) { | |||
13783 | SDValue N0 = N->getOperand(0); | |||
13784 | SDValue N1 = N->getOperand(1); | |||
13785 | ||||
13786 | // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) | |||
13787 | if (N1.getNode()->hasOneUse()) | |||
13788 | if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI)) | |||
13789 | return Result; | |||
13790 | ||||
13791 | if (SDValue R = PerformSubCSINCCombine(N, DCI.DAG)) | |||
13792 | return R; | |||
13793 | ||||
13794 | if (!Subtarget->hasMVEIntegerOps() || !N->getValueType(0).isVector()) | |||
13795 | return SDValue(); | |||
13796 | ||||
13797 | // Fold (sub (ARMvmovImm 0), (ARMvdup x)) -> (ARMvdup (sub 0, x)) | |||
13798 | // so that we can readily pattern match more mve instructions which can use | |||
13799 | // a scalar operand. | |||
13800 | SDValue VDup = N->getOperand(1); | |||
13801 | if (VDup->getOpcode() != ARMISD::VDUP) | |||
13802 | return SDValue(); | |||
13803 | ||||
13804 | SDValue VMov = N->getOperand(0); | |||
13805 | if (VMov->getOpcode() == ISD::BITCAST) | |||
13806 | VMov = VMov->getOperand(0); | |||
13807 | ||||
13808 | if (VMov->getOpcode() != ARMISD::VMOVIMM || !isZeroVector(VMov)) | |||
13809 | return SDValue(); | |||
13810 | ||||
13811 | SDLoc dl(N); | |||
13812 | SDValue Negate = DCI.DAG.getNode(ISD::SUB, dl, MVT::i32, | |||
13813 | DCI.DAG.getConstant(0, dl, MVT::i32), | |||
13814 | VDup->getOperand(0)); | |||
13815 | return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), Negate); | |||
13816 | } | |||
13817 | ||||
13818 | /// PerformVMULCombine | |||
13819 | /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the | |||
13820 | /// special multiplier accumulator forwarding. | |||
13821 | /// vmul d3, d0, d2 | |||
13822 | /// vmla d3, d1, d2 | |||
13823 | /// is faster than | |||
13824 | /// vadd d3, d0, d1 | |||
13825 | /// vmul d3, d3, d2 | |||
13826 | // However, for (A + B) * (A + B), | |||
13827 | // vadd d2, d0, d1 | |||
13828 | // vmul d3, d0, d2 | |||
13829 | // vmla d3, d1, d2 | |||
13830 | // is slower than | |||
13831 | // vadd d2, d0, d1 | |||
13832 | // vmul d3, d2, d2 | |||
13833 | static SDValue PerformVMULCombine(SDNode *N, | |||
13834 | TargetLowering::DAGCombinerInfo &DCI, | |||
13835 | const ARMSubtarget *Subtarget) { | |||
13836 | if (!Subtarget->hasVMLxForwarding()) | |||
13837 | return SDValue(); | |||
13838 | ||||
13839 | SelectionDAG &DAG = DCI.DAG; | |||
13840 | SDValue N0 = N->getOperand(0); | |||
13841 | SDValue N1 = N->getOperand(1); | |||
13842 | unsigned Opcode = N0.getOpcode(); | |||
13843 | if (Opcode != ISD::ADD && Opcode != ISD::SUB && | |||
13844 | Opcode != ISD::FADD && Opcode != ISD::FSUB) { | |||
13845 | Opcode = N1.getOpcode(); | |||
13846 | if (Opcode != ISD::ADD && Opcode != ISD::SUB && | |||
13847 | Opcode != ISD::FADD && Opcode != ISD::FSUB) | |||
13848 | return SDValue(); | |||
13849 | std::swap(N0, N1); | |||
13850 | } | |||
13851 | ||||
13852 | if (N0 == N1) | |||
13853 | return SDValue(); | |||
13854 | ||||
13855 | EVT VT = N->getValueType(0); | |||
13856 | SDLoc DL(N); | |||
13857 | SDValue N00 = N0->getOperand(0); | |||
13858 | SDValue N01 = N0->getOperand(1); | |||
13859 | return DAG.getNode(Opcode, DL, VT, | |||
13860 | DAG.getNode(ISD::MUL, DL, VT, N00, N1), | |||
13861 | DAG.getNode(ISD::MUL, DL, VT, N01, N1)); | |||
13862 | } | |||
13863 | ||||
13864 | static SDValue PerformMVEVMULLCombine(SDNode *N, SelectionDAG &DAG, | |||
13865 | const ARMSubtarget *Subtarget) { | |||
13866 | EVT VT = N->getValueType(0); | |||
13867 | if (VT != MVT::v2i64) | |||
13868 | return SDValue(); | |||
13869 | ||||
13870 | SDValue N0 = N->getOperand(0); | |||
13871 | SDValue N1 = N->getOperand(1); | |||
13872 | ||||
13873 | auto IsSignExt = [&](SDValue Op) { | |||
13874 | if (Op->getOpcode() != ISD::SIGN_EXTEND_INREG) | |||
13875 | return SDValue(); | |||
13876 | EVT VT = cast<VTSDNode>(Op->getOperand(1))->getVT(); | |||
13877 | if (VT.getScalarSizeInBits() == 32) | |||
13878 | return Op->getOperand(0); | |||
13879 | return SDValue(); | |||
13880 | }; | |||
13881 | auto IsZeroExt = [&](SDValue Op) { | |||
13882 | // Zero extends are a little more awkward. At the point we are matching | |||
13883 | // this, we are looking for an AND with a (-1, 0, -1, 0) buildvector mask. | |||
13884 | // That might be before of after a bitcast depending on how the and is | |||
13885 | // placed. Because this has to look through bitcasts, it is currently only | |||
13886 | // supported on LE. | |||
13887 | if (!Subtarget->isLittle()) | |||
13888 | return SDValue(); | |||
13889 | ||||
13890 | SDValue And = Op; | |||
13891 | if (And->getOpcode() == ISD::BITCAST) | |||
13892 | And = And->getOperand(0); | |||
13893 | if (And->getOpcode() != ISD::AND) | |||
13894 | return SDValue(); | |||
13895 | SDValue Mask = And->getOperand(1); | |||
13896 | if (Mask->getOpcode() == ISD::BITCAST) | |||
13897 | Mask = Mask->getOperand(0); | |||
13898 | ||||
13899 | if (Mask->getOpcode() != ISD::BUILD_VECTOR || | |||
13900 | Mask.getValueType() != MVT::v4i32) | |||
13901 | return SDValue(); | |||
13902 | if (isAllOnesConstant(Mask->getOperand(0)) && | |||
13903 | isNullConstant(Mask->getOperand(1)) && | |||
13904 | isAllOnesConstant(Mask->getOperand(2)) && | |||
13905 | isNullConstant(Mask->getOperand(3))) | |||
13906 | return And->getOperand(0); | |||
13907 | return SDValue(); | |||
13908 | }; | |||
13909 | ||||
13910 | SDLoc dl(N); | |||
13911 | if (SDValue Op0 = IsSignExt(N0)) { | |||
13912 | if (SDValue Op1 = IsSignExt(N1)) { | |||
13913 | SDValue New0a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op0); | |||
13914 | SDValue New1a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op1); | |||
13915 | return DAG.getNode(ARMISD::VMULLs, dl, VT, New0a, New1a); | |||
13916 | } | |||
13917 | } | |||
13918 | if (SDValue Op0 = IsZeroExt(N0)) { | |||
13919 | if (SDValue Op1 = IsZeroExt(N1)) { | |||
13920 | SDValue New0a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op0); | |||
13921 | SDValue New1a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op1); | |||
13922 | return DAG.getNode(ARMISD::VMULLu, dl, VT, New0a, New1a); | |||
13923 | } | |||
13924 | } | |||
13925 | ||||
13926 | return SDValue(); | |||
13927 | } | |||
13928 | ||||
13929 | static SDValue PerformMULCombine(SDNode *N, | |||
13930 | TargetLowering::DAGCombinerInfo &DCI, | |||
13931 | const ARMSubtarget *Subtarget) { | |||
13932 | SelectionDAG &DAG = DCI.DAG; | |||
13933 | ||||
13934 | EVT VT = N->getValueType(0); | |||
13935 | if (Subtarget->hasMVEIntegerOps() && VT == MVT::v2i64) | |||
13936 | return PerformMVEVMULLCombine(N, DAG, Subtarget); | |||
13937 | ||||
13938 | if (Subtarget->isThumb1Only()) | |||
13939 | return SDValue(); | |||
13940 | ||||
13941 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) | |||
13942 | return SDValue(); | |||
13943 | ||||
13944 | if (VT.is64BitVector() || VT.is128BitVector()) | |||
13945 | return PerformVMULCombine(N, DCI, Subtarget); | |||
13946 | if (VT != MVT::i32) | |||
13947 | return SDValue(); | |||
13948 | ||||
13949 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); | |||
13950 | if (!C) | |||
13951 | return SDValue(); | |||
13952 | ||||
13953 | int64_t MulAmt = C->getSExtValue(); | |||
13954 | unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt); | |||
13955 | ||||
13956 | ShiftAmt = ShiftAmt & (32 - 1); | |||
13957 | SDValue V = N->getOperand(0); | |||
13958 | SDLoc DL(N); | |||
13959 | ||||
13960 | SDValue Res; | |||
13961 | MulAmt >>= ShiftAmt; | |||
13962 | ||||
13963 | if (MulAmt >= 0) { | |||
13964 | if (isPowerOf2_32(MulAmt - 1)) { | |||
13965 | // (mul x, 2^N + 1) => (add (shl x, N), x) | |||
13966 | Res = DAG.getNode(ISD::ADD, DL, VT, | |||
13967 | V, | |||
13968 | DAG.getNode(ISD::SHL, DL, VT, | |||
13969 | V, | |||
13970 | DAG.getConstant(Log2_32(MulAmt - 1), DL, | |||
13971 | MVT::i32))); | |||
13972 | } else if (isPowerOf2_32(MulAmt + 1)) { | |||
13973 | // (mul x, 2^N - 1) => (sub (shl x, N), x) | |||
13974 | Res = DAG.getNode(ISD::SUB, DL, VT, | |||
13975 | DAG.getNode(ISD::SHL, DL, VT, | |||
13976 | V, | |||
13977 | DAG.getConstant(Log2_32(MulAmt + 1), DL, | |||
13978 | MVT::i32)), | |||
13979 | V); | |||
13980 | } else | |||
13981 | return SDValue(); | |||
13982 | } else { | |||
13983 | uint64_t MulAmtAbs = -MulAmt; | |||
13984 | if (isPowerOf2_32(MulAmtAbs + 1)) { | |||
13985 | // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) | |||
13986 | Res = DAG.getNode(ISD::SUB, DL, VT, | |||
13987 | V, | |||
13988 | DAG.getNode(ISD::SHL, DL, VT, | |||
13989 | V, | |||
13990 | DAG.getConstant(Log2_32(MulAmtAbs + 1), DL, | |||
13991 | MVT::i32))); | |||
13992 | } else if (isPowerOf2_32(MulAmtAbs - 1)) { | |||
13993 | // (mul x, -(2^N + 1)) => - (add (shl x, N), x) | |||
13994 | Res = DAG.getNode(ISD::ADD, DL, VT, | |||
13995 | V, | |||
13996 | DAG.getNode(ISD::SHL, DL, VT, | |||
13997 | V, | |||
13998 | DAG.getConstant(Log2_32(MulAmtAbs - 1), DL, | |||
13999 | MVT::i32))); | |||
14000 | Res = DAG.getNode(ISD::SUB, DL, VT, | |||
14001 | DAG.getConstant(0, DL, MVT::i32), Res); | |||
14002 | } else | |||
14003 | return SDValue(); | |||
14004 | } | |||
14005 | ||||
14006 | if (ShiftAmt != 0) | |||
14007 | Res = DAG.getNode(ISD::SHL, DL, VT, | |||
14008 | Res, DAG.getConstant(ShiftAmt, DL, MVT::i32)); | |||
14009 | ||||
14010 | // Do not add new nodes to DAG combiner worklist. | |||
14011 | DCI.CombineTo(N, Res, false); | |||
14012 | return SDValue(); | |||
14013 | } | |||
14014 | ||||
14015 | static SDValue CombineANDShift(SDNode *N, | |||
14016 | TargetLowering::DAGCombinerInfo &DCI, | |||
14017 | const ARMSubtarget *Subtarget) { | |||
14018 | // Allow DAGCombine to pattern-match before we touch the canonical form. | |||
14019 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) | |||
14020 | return SDValue(); | |||
14021 | ||||
14022 | if (N->getValueType(0) != MVT::i32) | |||
14023 | return SDValue(); | |||
14024 | ||||
14025 | ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1)); | |||
14026 | if (!N1C) | |||
14027 | return SDValue(); | |||
14028 | ||||
14029 | uint32_t C1 = (uint32_t)N1C->getZExtValue(); | |||
14030 | // Don't transform uxtb/uxth. | |||
14031 | if (C1 == 255 || C1 == 65535) | |||
14032 | return SDValue(); | |||
14033 | ||||
14034 | SDNode *N0 = N->getOperand(0).getNode(); | |||
14035 | if (!N0->hasOneUse()) | |||
14036 | return SDValue(); | |||
14037 | ||||
14038 | if (N0->getOpcode() != ISD::SHL && N0->getOpcode() != ISD::SRL) | |||
14039 | return SDValue(); | |||
14040 | ||||
14041 | bool LeftShift = N0->getOpcode() == ISD::SHL; | |||
14042 | ||||
14043 | ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1)); | |||
14044 | if (!N01C) | |||
14045 | return SDValue(); | |||
14046 | ||||
14047 | uint32_t C2 = (uint32_t)N01C->getZExtValue(); | |||
14048 | if (!C2 || C2 >= 32) | |||
14049 | return SDValue(); | |||
14050 | ||||
14051 | // Clear irrelevant bits in the mask. | |||
14052 | if (LeftShift) | |||
14053 | C1 &= (-1U << C2); | |||
14054 | else | |||
14055 | C1 &= (-1U >> C2); | |||
14056 | ||||
14057 | SelectionDAG &DAG = DCI.DAG; | |||
14058 | SDLoc DL(N); | |||
14059 | ||||
14060 | // We have a pattern of the form "(and (shl x, c2) c1)" or | |||
14061 | // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to | |||
14062 | // transform to a pair of shifts, to save materializing c1. | |||
14063 | ||||
14064 | // First pattern: right shift, then mask off leading bits. | |||
14065 | // FIXME: Use demanded bits? | |||
14066 | if (!LeftShift && isMask_32(C1)) { | |||
14067 | uint32_t C3 = countLeadingZeros(C1); | |||
14068 | if (C2 < C3) { | |||
14069 | SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0), | |||
14070 | DAG.getConstant(C3 - C2, DL, MVT::i32)); | |||
14071 | return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL, | |||
14072 | DAG.getConstant(C3, DL, MVT::i32)); | |||
14073 | } | |||
14074 | } | |||
14075 | ||||
14076 | // First pattern, reversed: left shift, then mask off trailing bits. | |||
14077 | if (LeftShift && isMask_32(~C1)) { | |||
14078 | uint32_t C3 = countTrailingZeros(C1); | |||
14079 | if (C2 < C3) { | |||
14080 | SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0), | |||
14081 | DAG.getConstant(C3 - C2, DL, MVT::i32)); | |||
14082 | return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL, | |||
14083 | DAG.getConstant(C3, DL, MVT::i32)); | |||
14084 | } | |||
14085 | } | |||
14086 | ||||
14087 | // Second pattern: left shift, then mask off leading bits. | |||
14088 | // FIXME: Use demanded bits? | |||
14089 | if (LeftShift && isShiftedMask_32(C1)) { | |||
14090 | uint32_t Trailing = countTrailingZeros(C1); | |||
14091 | uint32_t C3 = countLeadingZeros(C1); | |||
14092 | if (Trailing == C2 && C2 + C3 < 32) { | |||
14093 | SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0), | |||
14094 | DAG.getConstant(C2 + C3, DL, MVT::i32)); | |||
14095 | return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL, | |||
14096 | DAG.getConstant(C3, DL, MVT::i32)); | |||
14097 | } | |||
14098 | } | |||
14099 | ||||
14100 | // Second pattern, reversed: right shift, then mask off trailing bits. | |||
14101 | // FIXME: Handle other patterns of known/demanded bits. | |||
14102 | if (!LeftShift && isShiftedMask_32(C1)) { | |||
14103 | uint32_t Leading = countLeadingZeros(C1); | |||
14104 | uint32_t C3 = countTrailingZeros(C1); | |||
14105 | if (Leading == C2 && C2 + C3 < 32) { | |||
14106 | SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0), | |||
14107 | DAG.getConstant(C2 + C3, DL, MVT::i32)); | |||
14108 | return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL, | |||
14109 | DAG.getConstant(C3, DL, MVT::i32)); | |||
14110 | } | |||
14111 | } | |||
14112 | ||||
14113 | // FIXME: Transform "(and (shl x, c2) c1)" -> | |||
14114 | // "(shl (and x, c1>>c2), c2)" if "c1 >> c2" is a cheaper immediate than | |||
14115 | // c1. | |||
14116 | return SDValue(); | |||
14117 | } | |||
14118 | ||||
14119 | static SDValue PerformANDCombine(SDNode *N, | |||
14120 | TargetLowering::DAGCombinerInfo &DCI, | |||
14121 | const ARMSubtarget *Subtarget) { | |||
14122 | // Attempt to use immediate-form VBIC | |||
14123 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); | |||
14124 | SDLoc dl(N); | |||
14125 | EVT VT = N->getValueType(0); | |||
14126 | SelectionDAG &DAG = DCI.DAG; | |||
14127 | ||||
14128 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT) || VT == MVT::v2i1 || | |||
14129 | VT == MVT::v4i1 || VT == MVT::v8i1 || VT == MVT::v16i1) | |||
14130 | return SDValue(); | |||
14131 | ||||
14132 | APInt SplatBits, SplatUndef; | |||
14133 | unsigned SplatBitSize; | |||
14134 | bool HasAnyUndefs; | |||
14135 | if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && | |||
14136 | BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { | |||
14137 | if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 || | |||
14138 | SplatBitSize == 64) { | |||
14139 | EVT VbicVT; | |||
14140 | SDValue Val = isVMOVModifiedImm((~SplatBits).getZExtValue(), | |||
14141 | SplatUndef.getZExtValue(), SplatBitSize, | |||
14142 | DAG, dl, VbicVT, VT, OtherModImm); | |||
14143 | if (Val.getNode()) { | |||
14144 | SDValue Input = | |||
14145 | DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); | |||
14146 | SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); | |||
14147 | return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); | |||
14148 | } | |||
14149 | } | |||
14150 | } | |||
14151 | ||||
14152 | if (!Subtarget->isThumb1Only()) { | |||
14153 | // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) | |||
14154 | if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI)) | |||
14155 | return Result; | |||
14156 | ||||
14157 | if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) | |||
14158 | return Result; | |||
14159 | } | |||
14160 | ||||
14161 | if (Subtarget->isThumb1Only()) | |||
14162 | if (SDValue Result = CombineANDShift(N, DCI, Subtarget)) | |||
14163 | return Result; | |||
14164 | ||||
14165 | return SDValue(); | |||
14166 | } | |||
14167 | ||||
14168 | // Try combining OR nodes to SMULWB, SMULWT. | |||
14169 | static SDValue PerformORCombineToSMULWBT(SDNode *OR, | |||
14170 | TargetLowering::DAGCombinerInfo &DCI, | |||
14171 | const ARMSubtarget *Subtarget) { | |||
14172 | if (!Subtarget->hasV6Ops() || | |||
14173 | (Subtarget->isThumb() && | |||
14174 | (!Subtarget->hasThumb2() || !Subtarget->hasDSP()))) | |||
14175 | return SDValue(); | |||
14176 | ||||
14177 | SDValue SRL = OR->getOperand(0); | |||
14178 | SDValue SHL = OR->getOperand(1); | |||
14179 | ||||
14180 | if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) { | |||
14181 | SRL = OR->getOperand(1); | |||
14182 | SHL = OR->getOperand(0); | |||
14183 | } | |||
14184 | if (!isSRL16(SRL) || !isSHL16(SHL)) | |||
14185 | return SDValue(); | |||
14186 | ||||
14187 | // The first operands to the shifts need to be the two results from the | |||
14188 | // same smul_lohi node. | |||
14189 | if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) || | |||
14190 | SRL.getOperand(0).getOpcode() != ISD::SMUL_LOHI) | |||
14191 | return SDValue(); | |||
14192 | ||||
14193 | SDNode *SMULLOHI = SRL.getOperand(0).getNode(); | |||
14194 | if (SRL.getOperand(0) != SDValue(SMULLOHI, 0) || | |||
14195 | SHL.getOperand(0) != SDValue(SMULLOHI, 1)) | |||
14196 | return SDValue(); | |||
14197 | ||||
14198 | // Now we have: | |||
14199 | // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16))) | |||
14200 | // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments. | |||
14201 | // For SMUWB the 16-bit value will signed extended somehow. | |||
14202 | // For SMULWT only the SRA is required. | |||
14203 | // Check both sides of SMUL_LOHI | |||
14204 | SDValue OpS16 = SMULLOHI->getOperand(0); | |||
14205 | SDValue OpS32 = SMULLOHI->getOperand(1); | |||
14206 | ||||
14207 | SelectionDAG &DAG = DCI.DAG; | |||
14208 | if (!isS16(OpS16, DAG) && !isSRA16(OpS16)) { | |||
14209 | OpS16 = OpS32; | |||
14210 | OpS32 = SMULLOHI->getOperand(0); | |||
14211 | } | |||
14212 | ||||
14213 | SDLoc dl(OR); | |||
14214 | unsigned Opcode = 0; | |||
14215 | if (isS16(OpS16, DAG)) | |||
14216 | Opcode = ARMISD::SMULWB; | |||
14217 | else if (isSRA16(OpS16)) { | |||
14218 | Opcode = ARMISD::SMULWT; | |||
14219 | OpS16 = OpS16->getOperand(0); | |||
14220 | } | |||
14221 | else | |||
14222 | return SDValue(); | |||
14223 | ||||
14224 | SDValue Res = DAG.getNode(Opcode, dl, MVT::i32, OpS32, OpS16); | |||
14225 | DAG.ReplaceAllUsesOfValueWith(SDValue(OR, 0), Res); | |||
14226 | return SDValue(OR, 0); | |||
14227 | } | |||
14228 | ||||
14229 | static SDValue PerformORCombineToBFI(SDNode *N, | |||
14230 | TargetLowering::DAGCombinerInfo &DCI, | |||
14231 | const ARMSubtarget *Subtarget) { | |||
14232 | // BFI is only available on V6T2+ | |||
14233 | if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) | |||
14234 | return SDValue(); | |||
14235 | ||||
14236 | EVT VT = N->getValueType(0); | |||
14237 | SDValue N0 = N->getOperand(0); | |||
14238 | SDValue N1 = N->getOperand(1); | |||
14239 | SelectionDAG &DAG = DCI.DAG; | |||
14240 | SDLoc DL(N); | |||
14241 | // 1) or (and A, mask), val => ARMbfi A, val, mask | |||
14242 | // iff (val & mask) == val | |||
14243 | // | |||
14244 | // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask | |||
14245 | // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) | |||
14246 | // && mask == ~mask2 | |||
14247 | // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) | |||
14248 | // && ~mask == mask2 | |||
14249 | // (i.e., copy a bitfield value into another bitfield of the same width) | |||
14250 | ||||
14251 | if (VT != MVT::i32) | |||
14252 | return SDValue(); | |||
14253 | ||||
14254 | SDValue N00 = N0.getOperand(0); | |||
14255 | ||||
14256 | // The value and the mask need to be constants so we can verify this is | |||
14257 | // actually a bitfield set. If the mask is 0xffff, we can do better | |||
14258 | // via a movt instruction, so don't use BFI in that case. | |||
14259 | SDValue MaskOp = N0.getOperand(1); | |||
14260 | ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); | |||
14261 | if (!MaskC) | |||
14262 | return SDValue(); | |||
14263 | unsigned Mask = MaskC->getZExtValue(); | |||
14264 | if (Mask == 0xffff) | |||
14265 | return SDValue(); | |||
14266 | SDValue Res; | |||
14267 | // Case (1): or (and A, mask), val => ARMbfi A, val, mask | |||
14268 | ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); | |||
14269 | if (N1C) { | |||
14270 | unsigned Val = N1C->getZExtValue(); | |||
14271 | if ((Val & ~Mask) != Val) | |||
14272 | return SDValue(); | |||
14273 | ||||
14274 | if (ARM::isBitFieldInvertedMask(Mask)) { | |||
14275 | Val >>= countTrailingZeros(~Mask); | |||
14276 | ||||
14277 | Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, | |||
14278 | DAG.getConstant(Val, DL, MVT::i32), | |||
14279 | DAG.getConstant(Mask, DL, MVT::i32)); | |||
14280 | ||||
14281 | DCI.CombineTo(N, Res, false); | |||
14282 | // Return value from the original node to inform the combiner than N is | |||
14283 | // now dead. | |||
14284 | return SDValue(N, 0); | |||
14285 | } | |||
14286 | } else if (N1.getOpcode() == ISD::AND) { | |||
14287 | // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask | |||
14288 | ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); | |||
14289 | if (!N11C) | |||
14290 | return SDValue(); | |||
14291 | unsigned Mask2 = N11C->getZExtValue(); | |||
14292 | ||||
14293 | // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern | |||
14294 | // as is to match. | |||
14295 | if (ARM::isBitFieldInvertedMask(Mask) && | |||
14296 | (Mask == ~Mask2)) { | |||
14297 | // The pack halfword instruction works better for masks that fit it, | |||
14298 | // so use that when it's available. | |||
14299 | if (Subtarget->hasDSP() && | |||
14300 | (Mask == 0xffff || Mask == 0xffff0000)) | |||
14301 | return SDValue(); | |||
14302 | // 2a | |||
14303 | unsigned amt = countTrailingZeros(Mask2); | |||
14304 | Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), | |||
14305 | DAG.getConstant(amt, DL, MVT::i32)); | |||
14306 | Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, | |||
14307 | DAG.getConstant(Mask, DL, MVT::i32)); | |||
14308 | DCI.CombineTo(N, Res, false); | |||
14309 | // Return value from the original node to inform the combiner than N is | |||
14310 | // now dead. | |||
14311 | return SDValue(N, 0); | |||
14312 | } else if (ARM::isBitFieldInvertedMask(~Mask) && | |||
14313 | (~Mask == Mask2)) { | |||
14314 | // The pack halfword instruction works better for masks that fit it, | |||
14315 | // so use that when it's available. | |||
14316 | if (Subtarget->hasDSP() && | |||
14317 | (Mask2 == 0xffff || Mask2 == 0xffff0000)) | |||
14318 | return SDValue(); | |||
14319 | // 2b | |||
14320 | unsigned lsb = countTrailingZeros(Mask); | |||
14321 | Res = DAG.getNode(ISD::SRL, DL, VT, N00, | |||
14322 | DAG.getConstant(lsb, DL, MVT::i32)); | |||
14323 | Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, | |||
14324 | DAG.getConstant(Mask2, DL, MVT::i32)); | |||
14325 | DCI.CombineTo(N, Res, false); | |||
14326 | // Return value from the original node to inform the combiner than N is | |||
14327 | // now dead. | |||
14328 | return SDValue(N, 0); | |||
14329 | } | |||
14330 | } | |||
14331 | ||||
14332 | if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && | |||
14333 | N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && | |||
14334 | ARM::isBitFieldInvertedMask(~Mask)) { | |||
14335 | // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask | |||
14336 | // where lsb(mask) == #shamt and masked bits of B are known zero. | |||
14337 | SDValue ShAmt = N00.getOperand(1); | |||
14338 | unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); | |||
14339 | unsigned LSB = countTrailingZeros(Mask); | |||
14340 | if (ShAmtC != LSB) | |||
14341 | return SDValue(); | |||
14342 | ||||
14343 | Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), | |||
14344 | DAG.getConstant(~Mask, DL, MVT::i32)); | |||
14345 | ||||
14346 | DCI.CombineTo(N, Res, false); | |||
14347 | // Return value from the original node to inform the combiner than N is | |||
14348 | // now dead. | |||
14349 | return SDValue(N, 0); | |||
14350 | } | |||
14351 | ||||
14352 | return SDValue(); | |||
14353 | } | |||
14354 | ||||
14355 | static bool isValidMVECond(unsigned CC, bool IsFloat) { | |||
14356 | switch (CC) { | |||
14357 | case ARMCC::EQ: | |||
14358 | case ARMCC::NE: | |||
14359 | case ARMCC::LE: | |||
14360 | case ARMCC::GT: | |||
14361 | case ARMCC::GE: | |||
14362 | case ARMCC::LT: | |||
14363 | return true; | |||
14364 | case ARMCC::HS: | |||
14365 | case ARMCC::HI: | |||
14366 | return !IsFloat; | |||
14367 | default: | |||
14368 | return false; | |||
14369 | }; | |||
14370 | } | |||
14371 | ||||
14372 | static ARMCC::CondCodes getVCMPCondCode(SDValue N) { | |||
14373 | if (N->getOpcode() == ARMISD::VCMP) | |||
14374 | return (ARMCC::CondCodes)N->getConstantOperandVal(2); | |||
14375 | else if (N->getOpcode() == ARMISD::VCMPZ) | |||
14376 | return (ARMCC::CondCodes)N->getConstantOperandVal(1); | |||
14377 | else | |||
14378 | llvm_unreachable("Not a VCMP/VCMPZ!")::llvm::llvm_unreachable_internal("Not a VCMP/VCMPZ!", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 14378); | |||
14379 | } | |||
14380 | ||||
14381 | static bool CanInvertMVEVCMP(SDValue N) { | |||
14382 | ARMCC::CondCodes CC = ARMCC::getOppositeCondition(getVCMPCondCode(N)); | |||
14383 | return isValidMVECond(CC, N->getOperand(0).getValueType().isFloatingPoint()); | |||
14384 | } | |||
14385 | ||||
14386 | static SDValue PerformORCombine_i1(SDNode *N, SelectionDAG &DAG, | |||
14387 | const ARMSubtarget *Subtarget) { | |||
14388 | // Try to invert "or A, B" -> "and ~A, ~B", as the "and" is easier to chain | |||
14389 | // together with predicates | |||
14390 | EVT VT = N->getValueType(0); | |||
14391 | SDLoc DL(N); | |||
14392 | SDValue N0 = N->getOperand(0); | |||
14393 | SDValue N1 = N->getOperand(1); | |||
14394 | ||||
14395 | auto IsFreelyInvertable = [&](SDValue V) { | |||
14396 | if (V->getOpcode() == ARMISD::VCMP || V->getOpcode() == ARMISD::VCMPZ) | |||
14397 | return CanInvertMVEVCMP(V); | |||
14398 | return false; | |||
14399 | }; | |||
14400 | ||||
14401 | // At least one operand must be freely invertable. | |||
14402 | if (!(IsFreelyInvertable(N0) || IsFreelyInvertable(N1))) | |||
14403 | return SDValue(); | |||
14404 | ||||
14405 | SDValue NewN0 = DAG.getLogicalNOT(DL, N0, VT); | |||
14406 | SDValue NewN1 = DAG.getLogicalNOT(DL, N1, VT); | |||
14407 | SDValue And = DAG.getNode(ISD::AND, DL, VT, NewN0, NewN1); | |||
14408 | return DAG.getLogicalNOT(DL, And, VT); | |||
14409 | } | |||
14410 | ||||
14411 | /// PerformORCombine - Target-specific dag combine xforms for ISD::OR | |||
14412 | static SDValue PerformORCombine(SDNode *N, | |||
14413 | TargetLowering::DAGCombinerInfo &DCI, | |||
14414 | const ARMSubtarget *Subtarget) { | |||
14415 | // Attempt to use immediate-form VORR | |||
14416 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); | |||
14417 | SDLoc dl(N); | |||
14418 | EVT VT = N->getValueType(0); | |||
14419 | SelectionDAG &DAG = DCI.DAG; | |||
14420 | ||||
14421 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) | |||
14422 | return SDValue(); | |||
14423 | ||||
14424 | if (Subtarget->hasMVEIntegerOps() && (VT == MVT::v2i1 || VT == MVT::v4i1 || | |||
14425 | VT == MVT::v8i1 || VT == MVT::v16i1)) | |||
14426 | return PerformORCombine_i1(N, DAG, Subtarget); | |||
14427 | ||||
14428 | APInt SplatBits, SplatUndef; | |||
14429 | unsigned SplatBitSize; | |||
14430 | bool HasAnyUndefs; | |||
14431 | if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && | |||
14432 | BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { | |||
14433 | if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 || | |||
14434 | SplatBitSize == 64) { | |||
14435 | EVT VorrVT; | |||
14436 | SDValue Val = | |||
14437 | isVMOVModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(), | |||
14438 | SplatBitSize, DAG, dl, VorrVT, VT, OtherModImm); | |||
14439 | if (Val.getNode()) { | |||
14440 | SDValue Input = | |||
14441 | DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); | |||
14442 | SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); | |||
14443 | return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); | |||
14444 | } | |||
14445 | } | |||
14446 | } | |||
14447 | ||||
14448 | if (!Subtarget->isThumb1Only()) { | |||
14449 | // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) | |||
14450 | if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) | |||
14451 | return Result; | |||
14452 | if (SDValue Result = PerformORCombineToSMULWBT(N, DCI, Subtarget)) | |||
14453 | return Result; | |||
14454 | } | |||
14455 | ||||
14456 | SDValue N0 = N->getOperand(0); | |||
14457 | SDValue N1 = N->getOperand(1); | |||
14458 | ||||
14459 | // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. | |||
14460 | if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && | |||
14461 | DAG.getTargetLoweringInfo().isTypeLegal(VT)) { | |||
14462 | ||||
14463 | // The code below optimizes (or (and X, Y), Z). | |||
14464 | // The AND operand needs to have a single user to make these optimizations | |||
14465 | // profitable. | |||
14466 | if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) | |||
14467 | return SDValue(); | |||
14468 | ||||
14469 | APInt SplatUndef; | |||
14470 | unsigned SplatBitSize; | |||
14471 | bool HasAnyUndefs; | |||
14472 | ||||
14473 | APInt SplatBits0, SplatBits1; | |||
14474 | BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); | |||
14475 | BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); | |||
14476 | // Ensure that the second operand of both ands are constants | |||
14477 | if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, | |||
14478 | HasAnyUndefs) && !HasAnyUndefs) { | |||
14479 | if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, | |||
14480 | HasAnyUndefs) && !HasAnyUndefs) { | |||
14481 | // Ensure that the bit width of the constants are the same and that | |||
14482 | // the splat arguments are logical inverses as per the pattern we | |||
14483 | // are trying to simplify. | |||
14484 | if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() && | |||
14485 | SplatBits0 == ~SplatBits1) { | |||
14486 | // Canonicalize the vector type to make instruction selection | |||
14487 | // simpler. | |||
14488 | EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; | |||
14489 | SDValue Result = DAG.getNode(ARMISD::VBSP, dl, CanonicalVT, | |||
14490 | N0->getOperand(1), | |||
14491 | N0->getOperand(0), | |||
14492 | N1->getOperand(0)); | |||
14493 | return DAG.getNode(ISD::BITCAST, dl, VT, Result); | |||
14494 | } | |||
14495 | } | |||
14496 | } | |||
14497 | } | |||
14498 | ||||
14499 | // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when | |||
14500 | // reasonable. | |||
14501 | if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) { | |||
14502 | if (SDValue Res = PerformORCombineToBFI(N, DCI, Subtarget)) | |||
14503 | return Res; | |||
14504 | } | |||
14505 | ||||
14506 | if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) | |||
14507 | return Result; | |||
14508 | ||||
14509 | return SDValue(); | |||
14510 | } | |||
14511 | ||||
14512 | static SDValue PerformXORCombine(SDNode *N, | |||
14513 | TargetLowering::DAGCombinerInfo &DCI, | |||
14514 | const ARMSubtarget *Subtarget) { | |||
14515 | EVT VT = N->getValueType(0); | |||
14516 | SelectionDAG &DAG = DCI.DAG; | |||
14517 | ||||
14518 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) | |||
14519 | return SDValue(); | |||
14520 | ||||
14521 | if (!Subtarget->isThumb1Only()) { | |||
14522 | // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) | |||
14523 | if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) | |||
14524 | return Result; | |||
14525 | ||||
14526 | if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) | |||
14527 | return Result; | |||
14528 | } | |||
14529 | ||||
14530 | if (Subtarget->hasMVEIntegerOps()) { | |||
14531 | // fold (xor(vcmp/z, 1)) into a vcmp with the opposite condition. | |||
14532 | SDValue N0 = N->getOperand(0); | |||
14533 | SDValue N1 = N->getOperand(1); | |||
14534 | const TargetLowering *TLI = Subtarget->getTargetLowering(); | |||
14535 | if (TLI->isConstTrueVal(N1) && | |||
14536 | (N0->getOpcode() == ARMISD::VCMP || N0->getOpcode() == ARMISD::VCMPZ)) { | |||
14537 | if (CanInvertMVEVCMP(N0)) { | |||
14538 | SDLoc DL(N0); | |||
14539 | ARMCC::CondCodes CC = ARMCC::getOppositeCondition(getVCMPCondCode(N0)); | |||
14540 | ||||
14541 | SmallVector<SDValue, 4> Ops; | |||
14542 | Ops.push_back(N0->getOperand(0)); | |||
14543 | if (N0->getOpcode() == ARMISD::VCMP) | |||
14544 | Ops.push_back(N0->getOperand(1)); | |||
14545 | Ops.push_back(DAG.getConstant(CC, DL, MVT::i32)); | |||
14546 | return DAG.getNode(N0->getOpcode(), DL, N0->getValueType(0), Ops); | |||
14547 | } | |||
14548 | } | |||
14549 | } | |||
14550 | ||||
14551 | return SDValue(); | |||
14552 | } | |||
14553 | ||||
14554 | // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it, | |||
14555 | // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and | |||
14556 | // their position in "to" (Rd). | |||
14557 | static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) { | |||
14558 | assert(N->getOpcode() == ARMISD::BFI)(static_cast <bool> (N->getOpcode() == ARMISD::BFI) ? void (0) : __assert_fail ("N->getOpcode() == ARMISD::BFI" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 14558, __extension__ __PRETTY_FUNCTION__)); | |||
14559 | ||||
14560 | SDValue From = N->getOperand(1); | |||
14561 | ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue(); | |||
14562 | FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation()); | |||
14563 | ||||
14564 | // If the Base came from a SHR #C, we can deduce that it is really testing bit | |||
14565 | // #C in the base of the SHR. | |||
14566 | if (From->getOpcode() == ISD::SRL && | |||
14567 | isa<ConstantSDNode>(From->getOperand(1))) { | |||
14568 | APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue(); | |||
14569 | assert(Shift.getLimitedValue() < 32 && "Shift too large!")(static_cast <bool> (Shift.getLimitedValue() < 32 && "Shift too large!") ? void (0) : __assert_fail ("Shift.getLimitedValue() < 32 && \"Shift too large!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 14569, __extension__ __PRETTY_FUNCTION__)); | |||
14570 | FromMask <<= Shift.getLimitedValue(31); | |||
14571 | From = From->getOperand(0); | |||
14572 | } | |||
14573 | ||||
14574 | return From; | |||
14575 | } | |||
14576 | ||||
14577 | // If A and B contain one contiguous set of bits, does A | B == A . B? | |||
14578 | // | |||
14579 | // Neither A nor B must be zero. | |||
14580 | static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) { | |||
14581 | unsigned LastActiveBitInA = A.countTrailingZeros(); | |||
14582 | unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1; | |||
14583 | return LastActiveBitInA - 1 == FirstActiveBitInB; | |||
14584 | } | |||
14585 | ||||
14586 | static SDValue FindBFIToCombineWith(SDNode *N) { | |||
14587 | // We have a BFI in N. Find a BFI it can combine with, if one exists. | |||
14588 | APInt ToMask, FromMask; | |||
14589 | SDValue From = ParseBFI(N, ToMask, FromMask); | |||
14590 | SDValue To = N->getOperand(0); | |||
14591 | ||||
14592 | SDValue V = To; | |||
14593 | if (V.getOpcode() != ARMISD::BFI) | |||
14594 | return SDValue(); | |||
14595 | ||||
14596 | APInt NewToMask, NewFromMask; | |||
14597 | SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask); | |||
14598 | if (NewFrom != From) | |||
14599 | return SDValue(); | |||
14600 | ||||
14601 | // Do the written bits conflict with any we've seen so far? | |||
14602 | if ((NewToMask & ToMask).getBoolValue()) | |||
14603 | // Conflicting bits. | |||
14604 | return SDValue(); | |||
14605 | ||||
14606 | // Are the new bits contiguous when combined with the old bits? | |||
14607 | if (BitsProperlyConcatenate(ToMask, NewToMask) && | |||
14608 | BitsProperlyConcatenate(FromMask, NewFromMask)) | |||
14609 | return V; | |||
14610 | if (BitsProperlyConcatenate(NewToMask, ToMask) && | |||
14611 | BitsProperlyConcatenate(NewFromMask, FromMask)) | |||
14612 | return V; | |||
14613 | ||||
14614 | return SDValue(); | |||
14615 | } | |||
14616 | ||||
14617 | static SDValue PerformBFICombine(SDNode *N, SelectionDAG &DAG) { | |||
14618 | SDValue N0 = N->getOperand(0); | |||
14619 | SDValue N1 = N->getOperand(1); | |||
14620 | ||||
14621 | if (N1.getOpcode() == ISD::AND) { | |||
14622 | // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff | |||
14623 | // the bits being cleared by the AND are not demanded by the BFI. | |||
14624 | ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); | |||
14625 | if (!N11C) | |||
14626 | return SDValue(); | |||
14627 | unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); | |||
14628 | unsigned LSB = countTrailingZeros(~InvMask); | |||
14629 | unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB; | |||
14630 | assert(Width <(static_cast <bool> (Width < static_cast<unsigned >(std::numeric_limits<unsigned>::digits) && "undefined behavior" ) ? void (0) : __assert_fail ("Width < static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && \"undefined behavior\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 14632, __extension__ __PRETTY_FUNCTION__)) | |||
14631 | static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&(static_cast <bool> (Width < static_cast<unsigned >(std::numeric_limits<unsigned>::digits) && "undefined behavior" ) ? void (0) : __assert_fail ("Width < static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && \"undefined behavior\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 14632, __extension__ __PRETTY_FUNCTION__)) | |||
14632 | "undefined behavior")(static_cast <bool> (Width < static_cast<unsigned >(std::numeric_limits<unsigned>::digits) && "undefined behavior" ) ? void (0) : __assert_fail ("Width < static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && \"undefined behavior\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 14632, __extension__ __PRETTY_FUNCTION__)); | |||
14633 | unsigned Mask = (1u << Width) - 1; | |||
14634 | unsigned Mask2 = N11C->getZExtValue(); | |||
14635 | if ((Mask & (~Mask2)) == 0) | |||
14636 | return DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0), | |||
14637 | N->getOperand(0), N1.getOperand(0), N->getOperand(2)); | |||
14638 | return SDValue(); | |||
14639 | } | |||
14640 | ||||
14641 | // Look for another BFI to combine with. | |||
14642 | if (SDValue CombineBFI = FindBFIToCombineWith(N)) { | |||
14643 | // We've found a BFI. | |||
14644 | APInt ToMask1, FromMask1; | |||
14645 | SDValue From1 = ParseBFI(N, ToMask1, FromMask1); | |||
14646 | ||||
14647 | APInt ToMask2, FromMask2; | |||
14648 | SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2); | |||
14649 | assert(From1 == From2)(static_cast <bool> (From1 == From2) ? void (0) : __assert_fail ("From1 == From2", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 14649, __extension__ __PRETTY_FUNCTION__)); | |||
14650 | (void)From2; | |||
14651 | ||||
14652 | // Create a new BFI, combining the two together. | |||
14653 | APInt NewFromMask = FromMask1 | FromMask2; | |||
14654 | APInt NewToMask = ToMask1 | ToMask2; | |||
14655 | ||||
14656 | EVT VT = N->getValueType(0); | |||
14657 | SDLoc dl(N); | |||
14658 | ||||
14659 | if (NewFromMask[0] == 0) | |||
14660 | From1 = DAG.getNode( | |||
14661 | ISD::SRL, dl, VT, From1, | |||
14662 | DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT)); | |||
14663 | return DAG.getNode(ARMISD::BFI, dl, VT, CombineBFI.getOperand(0), From1, | |||
14664 | DAG.getConstant(~NewToMask, dl, VT)); | |||
14665 | } | |||
14666 | ||||
14667 | // Reassociate BFI(BFI (A, B, M1), C, M2) to BFI(BFI (A, C, M2), B, M1) so | |||
14668 | // that lower bit insertions are performed first, providing that M1 and M2 | |||
14669 | // do no overlap. This can allow multiple BFI instructions to be combined | |||
14670 | // together by the other folds above. | |||
14671 | if (N->getOperand(0).getOpcode() == ARMISD::BFI) { | |||
14672 | APInt ToMask1 = ~N->getConstantOperandAPInt(2); | |||
14673 | APInt ToMask2 = ~N0.getConstantOperandAPInt(2); | |||
14674 | ||||
14675 | if (!N0.hasOneUse() || (ToMask1 & ToMask2) != 0 || | |||
14676 | ToMask1.countLeadingZeros() < ToMask2.countLeadingZeros()) | |||
14677 | return SDValue(); | |||
14678 | ||||
14679 | EVT VT = N->getValueType(0); | |||
14680 | SDLoc dl(N); | |||
14681 | SDValue BFI1 = DAG.getNode(ARMISD::BFI, dl, VT, N0.getOperand(0), | |||
14682 | N->getOperand(1), N->getOperand(2)); | |||
14683 | return DAG.getNode(ARMISD::BFI, dl, VT, BFI1, N0.getOperand(1), | |||
14684 | N0.getOperand(2)); | |||
14685 | } | |||
14686 | ||||
14687 | return SDValue(); | |||
14688 | } | |||
14689 | ||||
14690 | // Check that N is CMPZ(CSINC(0, 0, CC, X)), | |||
14691 | // or CMPZ(CMOV(1, 0, CC, $cpsr, X)) | |||
14692 | // return X if valid. | |||
14693 | static SDValue IsCMPZCSINC(SDNode *Cmp, ARMCC::CondCodes &CC) { | |||
14694 | if (Cmp->getOpcode() != ARMISD::CMPZ || !isNullConstant(Cmp->getOperand(1))) | |||
14695 | return SDValue(); | |||
14696 | SDValue CSInc = Cmp->getOperand(0); | |||
14697 | ||||
14698 | // Ignore any `And 1` nodes that may not yet have been removed. We are | |||
14699 | // looking for a value that produces 1/0, so these have no effect on the | |||
14700 | // code. | |||
14701 | while (CSInc.getOpcode() == ISD::AND && | |||
14702 | isa<ConstantSDNode>(CSInc.getOperand(1)) && | |||
14703 | CSInc.getConstantOperandVal(1) == 1 && CSInc->hasOneUse()) | |||
14704 | CSInc = CSInc.getOperand(0); | |||
14705 | ||||
14706 | if (CSInc.getOpcode() == ARMISD::CSINC && | |||
14707 | isNullConstant(CSInc.getOperand(0)) && | |||
14708 | isNullConstant(CSInc.getOperand(1)) && CSInc->hasOneUse()) { | |||
14709 | CC = (ARMCC::CondCodes)CSInc.getConstantOperandVal(2); | |||
14710 | return CSInc.getOperand(3); | |||
14711 | } | |||
14712 | if (CSInc.getOpcode() == ARMISD::CMOV && isOneConstant(CSInc.getOperand(0)) && | |||
14713 | isNullConstant(CSInc.getOperand(1)) && CSInc->hasOneUse()) { | |||
14714 | CC = (ARMCC::CondCodes)CSInc.getConstantOperandVal(2); | |||
14715 | return CSInc.getOperand(4); | |||
14716 | } | |||
14717 | if (CSInc.getOpcode() == ARMISD::CMOV && isOneConstant(CSInc.getOperand(1)) && | |||
14718 | isNullConstant(CSInc.getOperand(0)) && CSInc->hasOneUse()) { | |||
14719 | CC = ARMCC::getOppositeCondition( | |||
14720 | (ARMCC::CondCodes)CSInc.getConstantOperandVal(2)); | |||
14721 | return CSInc.getOperand(4); | |||
14722 | } | |||
14723 | return SDValue(); | |||
14724 | } | |||
14725 | ||||
14726 | static SDValue PerformCMPZCombine(SDNode *N, SelectionDAG &DAG) { | |||
14727 | // Given CMPZ(CSINC(C, 0, 0, EQ), 0), we can just use C directly. As in | |||
14728 | // t92: glue = ARMISD::CMPZ t74, 0 | |||
14729 | // t93: i32 = ARMISD::CSINC 0, 0, 1, t92 | |||
14730 | // t96: glue = ARMISD::CMPZ t93, 0 | |||
14731 | // t114: i32 = ARMISD::CSINV 0, 0, 0, t96 | |||
14732 | ARMCC::CondCodes Cond; | |||
14733 | if (SDValue C = IsCMPZCSINC(N, Cond)) | |||
14734 | if (Cond == ARMCC::EQ) | |||
14735 | return C; | |||
14736 | return SDValue(); | |||
14737 | } | |||
14738 | ||||
14739 | static SDValue PerformCSETCombine(SDNode *N, SelectionDAG &DAG) { | |||
14740 | // Fold away an unneccessary CMPZ/CSINC | |||
14741 | // CSXYZ A, B, C1 (CMPZ (CSINC 0, 0, C2, D), 0) -> | |||
14742 | // if C1==EQ -> CSXYZ A, B, C2, D | |||
14743 | // if C1==NE -> CSXYZ A, B, NOT(C2), D | |||
14744 | ARMCC::CondCodes Cond; | |||
14745 | if (SDValue C = IsCMPZCSINC(N->getOperand(3).getNode(), Cond)) { | |||
14746 | if (N->getConstantOperandVal(2) == ARMCC::EQ) | |||
14747 | return DAG.getNode(N->getOpcode(), SDLoc(N), MVT::i32, N->getOperand(0), | |||
14748 | N->getOperand(1), | |||
14749 | DAG.getConstant(Cond, SDLoc(N), MVT::i32), C); | |||
14750 | if (N->getConstantOperandVal(2) == ARMCC::NE) | |||
14751 | return DAG.getNode( | |||
14752 | N->getOpcode(), SDLoc(N), MVT::i32, N->getOperand(0), | |||
14753 | N->getOperand(1), | |||
14754 | DAG.getConstant(ARMCC::getOppositeCondition(Cond), SDLoc(N), MVT::i32), C); | |||
14755 | } | |||
14756 | return SDValue(); | |||
14757 | } | |||
14758 | ||||
14759 | /// PerformVMOVRRDCombine - Target-specific dag combine xforms for | |||
14760 | /// ARMISD::VMOVRRD. | |||
14761 | static SDValue PerformVMOVRRDCombine(SDNode *N, | |||
14762 | TargetLowering::DAGCombinerInfo &DCI, | |||
14763 | const ARMSubtarget *Subtarget) { | |||
14764 | // vmovrrd(vmovdrr x, y) -> x,y | |||
14765 | SDValue InDouble = N->getOperand(0); | |||
14766 | if (InDouble.getOpcode() == ARMISD::VMOVDRR && Subtarget->hasFP64()) | |||
14767 | return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); | |||
14768 | ||||
14769 | // vmovrrd(load f64) -> (load i32), (load i32) | |||
14770 | SDNode *InNode = InDouble.getNode(); | |||
14771 | if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && | |||
14772 | InNode->getValueType(0) == MVT::f64 && | |||
14773 | InNode->getOperand(1).getOpcode() == ISD::FrameIndex && | |||
14774 | !cast<LoadSDNode>(InNode)->isVolatile()) { | |||
14775 | // TODO: Should this be done for non-FrameIndex operands? | |||
14776 | LoadSDNode *LD = cast<LoadSDNode>(InNode); | |||
14777 | ||||
14778 | SelectionDAG &DAG = DCI.DAG; | |||
14779 | SDLoc DL(LD); | |||
14780 | SDValue BasePtr = LD->getBasePtr(); | |||
14781 | SDValue NewLD1 = | |||
14782 | DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(), | |||
14783 | LD->getAlignment(), LD->getMemOperand()->getFlags()); | |||
14784 | ||||
14785 | SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, | |||
14786 | DAG.getConstant(4, DL, MVT::i32)); | |||
14787 | ||||
14788 | SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, LD->getChain(), OffsetPtr, | |||
14789 | LD->getPointerInfo().getWithOffset(4), | |||
14790 | std::min(4U, LD->getAlignment()), | |||
14791 | LD->getMemOperand()->getFlags()); | |||
14792 | ||||
14793 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); | |||
14794 | if (DCI.DAG.getDataLayout().isBigEndian()) | |||
14795 | std::swap (NewLD1, NewLD2); | |||
14796 | SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); | |||
14797 | return Result; | |||
14798 | } | |||
14799 | ||||
14800 | // VMOVRRD(extract(..(build_vector(a, b, c, d)))) -> a,b or c,d | |||
14801 | // VMOVRRD(extract(insert_vector(insert_vector(.., a, l1), b, l2))) -> a,b | |||
14802 | if (InDouble.getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
14803 | isa<ConstantSDNode>(InDouble.getOperand(1))) { | |||
14804 | SDValue BV = InDouble.getOperand(0); | |||
14805 | // Look up through any nop bitcasts and vector_reg_casts. bitcasts may | |||
14806 | // change lane order under big endian. | |||
14807 | bool BVSwap = BV.getOpcode() == ISD::BITCAST; | |||
14808 | while ( | |||
14809 | (BV.getOpcode() == ISD::BITCAST || | |||
14810 | BV.getOpcode() == ARMISD::VECTOR_REG_CAST) && | |||
14811 | (BV.getValueType() == MVT::v2f64 || BV.getValueType() == MVT::v2i64)) { | |||
14812 | BVSwap = BV.getOpcode() == ISD::BITCAST; | |||
14813 | BV = BV.getOperand(0); | |||
14814 | } | |||
14815 | if (BV.getValueType() != MVT::v4i32) | |||
14816 | return SDValue(); | |||
14817 | ||||
14818 | // Handle buildvectors, pulling out the correct lane depending on | |||
14819 | // endianness. | |||
14820 | unsigned Offset = InDouble.getConstantOperandVal(1) == 1 ? 2 : 0; | |||
14821 | if (BV.getOpcode() == ISD::BUILD_VECTOR) { | |||
14822 | SDValue Op0 = BV.getOperand(Offset); | |||
14823 | SDValue Op1 = BV.getOperand(Offset + 1); | |||
14824 | if (!Subtarget->isLittle() && BVSwap) | |||
14825 | std::swap(Op0, Op1); | |||
14826 | ||||
14827 | return DCI.DAG.getMergeValues({Op0, Op1}, SDLoc(N)); | |||
14828 | } | |||
14829 | ||||
14830 | // A chain of insert_vectors, grabbing the correct value of the chain of | |||
14831 | // inserts. | |||
14832 | SDValue Op0, Op1; | |||
14833 | while (BV.getOpcode() == ISD::INSERT_VECTOR_ELT) { | |||
14834 | if (isa<ConstantSDNode>(BV.getOperand(2))) { | |||
14835 | if (BV.getConstantOperandVal(2) == Offset) | |||
14836 | Op0 = BV.getOperand(1); | |||
14837 | if (BV.getConstantOperandVal(2) == Offset + 1) | |||
14838 | Op1 = BV.getOperand(1); | |||
14839 | } | |||
14840 | BV = BV.getOperand(0); | |||
14841 | } | |||
14842 | if (!Subtarget->isLittle() && BVSwap) | |||
14843 | std::swap(Op0, Op1); | |||
14844 | if (Op0 && Op1) | |||
14845 | return DCI.DAG.getMergeValues({Op0, Op1}, SDLoc(N)); | |||
14846 | } | |||
14847 | ||||
14848 | return SDValue(); | |||
14849 | } | |||
14850 | ||||
14851 | /// PerformVMOVDRRCombine - Target-specific dag combine xforms for | |||
14852 | /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. | |||
14853 | static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { | |||
14854 | // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) | |||
14855 | SDValue Op0 = N->getOperand(0); | |||
14856 | SDValue Op1 = N->getOperand(1); | |||
14857 | if (Op0.getOpcode() == ISD::BITCAST) | |||
14858 | Op0 = Op0.getOperand(0); | |||
14859 | if (Op1.getOpcode() == ISD::BITCAST) | |||
14860 | Op1 = Op1.getOperand(0); | |||
14861 | if (Op0.getOpcode() == ARMISD::VMOVRRD && | |||
14862 | Op0.getNode() == Op1.getNode() && | |||
14863 | Op0.getResNo() == 0 && Op1.getResNo() == 1) | |||
14864 | return DAG.getNode(ISD::BITCAST, SDLoc(N), | |||
14865 | N->getValueType(0), Op0.getOperand(0)); | |||
14866 | return SDValue(); | |||
14867 | } | |||
14868 | ||||
14869 | static SDValue PerformVMOVhrCombine(SDNode *N, | |||
14870 | TargetLowering::DAGCombinerInfo &DCI) { | |||
14871 | SDValue Op0 = N->getOperand(0); | |||
14872 | ||||
14873 | // VMOVhr (VMOVrh (X)) -> X | |||
14874 | if (Op0->getOpcode() == ARMISD::VMOVrh) | |||
14875 | return Op0->getOperand(0); | |||
14876 | ||||
14877 | // FullFP16: half values are passed in S-registers, and we don't | |||
14878 | // need any of the bitcast and moves: | |||
14879 | // | |||
14880 | // t2: f32,ch = CopyFromReg t0, Register:f32 %0 | |||
14881 | // t5: i32 = bitcast t2 | |||
14882 | // t18: f16 = ARMISD::VMOVhr t5 | |||
14883 | if (Op0->getOpcode() == ISD::BITCAST) { | |||
14884 | SDValue Copy = Op0->getOperand(0); | |||
14885 | if (Copy.getValueType() == MVT::f32 && | |||
14886 | Copy->getOpcode() == ISD::CopyFromReg) { | |||
14887 | SDValue Ops[] = {Copy->getOperand(0), Copy->getOperand(1)}; | |||
14888 | SDValue NewCopy = | |||
14889 | DCI.DAG.getNode(ISD::CopyFromReg, SDLoc(N), N->getValueType(0), Ops); | |||
14890 | return NewCopy; | |||
14891 | } | |||
14892 | } | |||
14893 | ||||
14894 | // fold (VMOVhr (load x)) -> (load (f16*)x) | |||
14895 | if (LoadSDNode *LN0 = dyn_cast<LoadSDNode>(Op0)) { | |||
14896 | if (LN0->hasOneUse() && LN0->isUnindexed() && | |||
14897 | LN0->getMemoryVT() == MVT::i16) { | |||
14898 | SDValue Load = | |||
14899 | DCI.DAG.getLoad(N->getValueType(0), SDLoc(N), LN0->getChain(), | |||
14900 | LN0->getBasePtr(), LN0->getMemOperand()); | |||
14901 | DCI.DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Load.getValue(0)); | |||
14902 | DCI.DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Load.getValue(1)); | |||
14903 | return Load; | |||
14904 | } | |||
14905 | } | |||
14906 | ||||
14907 | // Only the bottom 16 bits of the source register are used. | |||
14908 | APInt DemandedMask = APInt::getLowBitsSet(32, 16); | |||
14909 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); | |||
14910 | if (TLI.SimplifyDemandedBits(Op0, DemandedMask, DCI)) | |||
14911 | return SDValue(N, 0); | |||
14912 | ||||
14913 | return SDValue(); | |||
14914 | } | |||
14915 | ||||
14916 | static SDValue PerformVMOVrhCombine(SDNode *N, SelectionDAG &DAG) { | |||
14917 | SDValue N0 = N->getOperand(0); | |||
14918 | EVT VT = N->getValueType(0); | |||
14919 | ||||
14920 | // fold (VMOVrh (fpconst x)) -> const x | |||
14921 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N0)) { | |||
14922 | APFloat V = C->getValueAPF(); | |||
14923 | return DAG.getConstant(V.bitcastToAPInt().getZExtValue(), SDLoc(N), VT); | |||
14924 | } | |||
14925 | ||||
14926 | // fold (VMOVrh (load x)) -> (zextload (i16*)x) | |||
14927 | if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse()) { | |||
14928 | LoadSDNode *LN0 = cast<LoadSDNode>(N0); | |||
14929 | ||||
14930 | SDValue Load = | |||
14931 | DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, LN0->getChain(), | |||
14932 | LN0->getBasePtr(), MVT::i16, LN0->getMemOperand()); | |||
14933 | DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Load.getValue(0)); | |||
14934 | DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1)); | |||
14935 | return Load; | |||
14936 | } | |||
14937 | ||||
14938 | // Fold VMOVrh(extract(x, n)) -> vgetlaneu(x, n) | |||
14939 | if (N0->getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
14940 | isa<ConstantSDNode>(N0->getOperand(1))) | |||
14941 | return DAG.getNode(ARMISD::VGETLANEu, SDLoc(N), VT, N0->getOperand(0), | |||
14942 | N0->getOperand(1)); | |||
14943 | ||||
14944 | return SDValue(); | |||
14945 | } | |||
14946 | ||||
14947 | /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node | |||
14948 | /// are normal, non-volatile loads. If so, it is profitable to bitcast an | |||
14949 | /// i64 vector to have f64 elements, since the value can then be loaded | |||
14950 | /// directly into a VFP register. | |||
14951 | static bool hasNormalLoadOperand(SDNode *N) { | |||
14952 | unsigned NumElts = N->getValueType(0).getVectorNumElements(); | |||
14953 | for (unsigned i = 0; i < NumElts; ++i) { | |||
14954 | SDNode *Elt = N->getOperand(i).getNode(); | |||
14955 | if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) | |||
14956 | return true; | |||
14957 | } | |||
14958 | return false; | |||
14959 | } | |||
14960 | ||||
14961 | /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for | |||
14962 | /// ISD::BUILD_VECTOR. | |||
14963 | static SDValue PerformBUILD_VECTORCombine(SDNode *N, | |||
14964 | TargetLowering::DAGCombinerInfo &DCI, | |||
14965 | const ARMSubtarget *Subtarget) { | |||
14966 | // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): | |||
14967 | // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value | |||
14968 | // into a pair of GPRs, which is fine when the value is used as a scalar, | |||
14969 | // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. | |||
14970 | SelectionDAG &DAG = DCI.DAG; | |||
14971 | if (N->getNumOperands() == 2) | |||
14972 | if (SDValue RV = PerformVMOVDRRCombine(N, DAG)) | |||
14973 | return RV; | |||
14974 | ||||
14975 | // Load i64 elements as f64 values so that type legalization does not split | |||
14976 | // them up into i32 values. | |||
14977 | EVT VT = N->getValueType(0); | |||
14978 | if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) | |||
14979 | return SDValue(); | |||
14980 | SDLoc dl(N); | |||
14981 | SmallVector<SDValue, 8> Ops; | |||
14982 | unsigned NumElts = VT.getVectorNumElements(); | |||
14983 | for (unsigned i = 0; i < NumElts; ++i) { | |||
14984 | SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); | |||
14985 | Ops.push_back(V); | |||
14986 | // Make the DAGCombiner fold the bitcast. | |||
14987 | DCI.AddToWorklist(V.getNode()); | |||
14988 | } | |||
14989 | EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); | |||
14990 | SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops); | |||
14991 | return DAG.getNode(ISD::BITCAST, dl, VT, BV); | |||
14992 | } | |||
14993 | ||||
14994 | /// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. | |||
14995 | static SDValue | |||
14996 | PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { | |||
14997 | // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR. | |||
14998 | // At that time, we may have inserted bitcasts from integer to float. | |||
14999 | // If these bitcasts have survived DAGCombine, change the lowering of this | |||
15000 | // BUILD_VECTOR in something more vector friendly, i.e., that does not | |||
15001 | // force to use floating point types. | |||
15002 | ||||
15003 | // Make sure we can change the type of the vector. | |||
15004 | // This is possible iff: | |||
15005 | // 1. The vector is only used in a bitcast to a integer type. I.e., | |||
15006 | // 1.1. Vector is used only once. | |||
15007 | // 1.2. Use is a bit convert to an integer type. | |||
15008 | // 2. The size of its operands are 32-bits (64-bits are not legal). | |||
15009 | EVT VT = N->getValueType(0); | |||
15010 | EVT EltVT = VT.getVectorElementType(); | |||
15011 | ||||
15012 | // Check 1.1. and 2. | |||
15013 | if (EltVT.getSizeInBits() != 32 || !N->hasOneUse()) | |||
15014 | return SDValue(); | |||
15015 | ||||
15016 | // By construction, the input type must be float. | |||
15017 | assert(EltVT == MVT::f32 && "Unexpected type!")(static_cast <bool> (EltVT == MVT::f32 && "Unexpected type!" ) ? void (0) : __assert_fail ("EltVT == MVT::f32 && \"Unexpected type!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 15017, __extension__ __PRETTY_FUNCTION__)); | |||
15018 | ||||
15019 | // Check 1.2. | |||
15020 | SDNode *Use = *N->use_begin(); | |||
15021 | if (Use->getOpcode() != ISD::BITCAST || | |||
15022 | Use->getValueType(0).isFloatingPoint()) | |||
15023 | return SDValue(); | |||
15024 | ||||
15025 | // Check profitability. | |||
15026 | // Model is, if more than half of the relevant operands are bitcast from | |||
15027 | // i32, turn the build_vector into a sequence of insert_vector_elt. | |||
15028 | // Relevant operands are everything that is not statically | |||
15029 | // (i.e., at compile time) bitcasted. | |||
15030 | unsigned NumOfBitCastedElts = 0; | |||
15031 | unsigned NumElts = VT.getVectorNumElements(); | |||
15032 | unsigned NumOfRelevantElts = NumElts; | |||
15033 | for (unsigned Idx = 0; Idx < NumElts; ++Idx) { | |||
15034 | SDValue Elt = N->getOperand(Idx); | |||
15035 | if (Elt->getOpcode() == ISD::BITCAST) { | |||
15036 | // Assume only bit cast to i32 will go away. | |||
15037 | if (Elt->getOperand(0).getValueType() == MVT::i32) | |||
15038 | ++NumOfBitCastedElts; | |||
15039 | } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt)) | |||
15040 | // Constants are statically casted, thus do not count them as | |||
15041 | // relevant operands. | |||
15042 | --NumOfRelevantElts; | |||
15043 | } | |||
15044 | ||||
15045 | // Check if more than half of the elements require a non-free bitcast. | |||
15046 | if (NumOfBitCastedElts <= NumOfRelevantElts / 2) | |||
15047 | return SDValue(); | |||
15048 | ||||
15049 | SelectionDAG &DAG = DCI.DAG; | |||
15050 | // Create the new vector type. | |||
15051 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); | |||
15052 | // Check if the type is legal. | |||
15053 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
15054 | if (!TLI.isTypeLegal(VecVT)) | |||
15055 | return SDValue(); | |||
15056 | ||||
15057 | // Combine: | |||
15058 | // ARMISD::BUILD_VECTOR E1, E2, ..., EN. | |||
15059 | // => BITCAST INSERT_VECTOR_ELT | |||
15060 | // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1), | |||
15061 | // (BITCAST EN), N. | |||
15062 | SDValue Vec = DAG.getUNDEF(VecVT); | |||
15063 | SDLoc dl(N); | |||
15064 | for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) { | |||
15065 | SDValue V = N->getOperand(Idx); | |||
15066 | if (V.isUndef()) | |||
15067 | continue; | |||
15068 | if (V.getOpcode() == ISD::BITCAST && | |||
15069 | V->getOperand(0).getValueType() == MVT::i32) | |||
15070 | // Fold obvious case. | |||
15071 | V = V.getOperand(0); | |||
15072 | else { | |||
15073 | V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V); | |||
15074 | // Make the DAGCombiner fold the bitcasts. | |||
15075 | DCI.AddToWorklist(V.getNode()); | |||
15076 | } | |||
15077 | SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32); | |||
15078 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx); | |||
15079 | } | |||
15080 | Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec); | |||
15081 | // Make the DAGCombiner fold the bitcasts. | |||
15082 | DCI.AddToWorklist(Vec.getNode()); | |||
15083 | return Vec; | |||
15084 | } | |||
15085 | ||||
15086 | static SDValue | |||
15087 | PerformPREDICATE_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { | |||
15088 | EVT VT = N->getValueType(0); | |||
15089 | SDValue Op = N->getOperand(0); | |||
15090 | SDLoc dl(N); | |||
15091 | ||||
15092 | // PREDICATE_CAST(PREDICATE_CAST(x)) == PREDICATE_CAST(x) | |||
15093 | if (Op->getOpcode() == ARMISD::PREDICATE_CAST) { | |||
15094 | // If the valuetypes are the same, we can remove the cast entirely. | |||
15095 | if (Op->getOperand(0).getValueType() == VT) | |||
15096 | return Op->getOperand(0); | |||
15097 | return DCI.DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, Op->getOperand(0)); | |||
15098 | } | |||
15099 | ||||
15100 | // Turn pred_cast(xor x, -1) into xor(pred_cast x, -1), in order to produce | |||
15101 | // more VPNOT which might get folded as else predicates. | |||
15102 | if (Op.getValueType() == MVT::i32 && isBitwiseNot(Op)) { | |||
15103 | SDValue X = | |||
15104 | DCI.DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, Op->getOperand(0)); | |||
15105 | SDValue C = DCI.DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, | |||
15106 | DCI.DAG.getConstant(65535, dl, MVT::i32)); | |||
15107 | return DCI.DAG.getNode(ISD::XOR, dl, VT, X, C); | |||
15108 | } | |||
15109 | ||||
15110 | // Only the bottom 16 bits of the source register are used. | |||
15111 | if (Op.getValueType() == MVT::i32) { | |||
15112 | APInt DemandedMask = APInt::getLowBitsSet(32, 16); | |||
15113 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); | |||
15114 | if (TLI.SimplifyDemandedBits(Op, DemandedMask, DCI)) | |||
15115 | return SDValue(N, 0); | |||
15116 | } | |||
15117 | return SDValue(); | |||
15118 | } | |||
15119 | ||||
15120 | static SDValue PerformVECTOR_REG_CASTCombine(SDNode *N, SelectionDAG &DAG, | |||
15121 | const ARMSubtarget *ST) { | |||
15122 | EVT VT = N->getValueType(0); | |||
15123 | SDValue Op = N->getOperand(0); | |||
15124 | SDLoc dl(N); | |||
15125 | ||||
15126 | // Under Little endian, a VECTOR_REG_CAST is equivalent to a BITCAST | |||
15127 | if (ST->isLittle()) | |||
15128 | return DAG.getNode(ISD::BITCAST, dl, VT, Op); | |||
15129 | ||||
15130 | // VECTOR_REG_CAST undef -> undef | |||
15131 | if (Op.isUndef()) | |||
15132 | return DAG.getUNDEF(VT); | |||
15133 | ||||
15134 | // VECTOR_REG_CAST(VECTOR_REG_CAST(x)) == VECTOR_REG_CAST(x) | |||
15135 | if (Op->getOpcode() == ARMISD::VECTOR_REG_CAST) { | |||
15136 | // If the valuetypes are the same, we can remove the cast entirely. | |||
15137 | if (Op->getOperand(0).getValueType() == VT) | |||
15138 | return Op->getOperand(0); | |||
15139 | return DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, Op->getOperand(0)); | |||
15140 | } | |||
15141 | ||||
15142 | return SDValue(); | |||
15143 | } | |||
15144 | ||||
15145 | static SDValue PerformVCMPCombine(SDNode *N, SelectionDAG &DAG, | |||
15146 | const ARMSubtarget *Subtarget) { | |||
15147 | if (!Subtarget->hasMVEIntegerOps()) | |||
15148 | return SDValue(); | |||
15149 | ||||
15150 | EVT VT = N->getValueType(0); | |||
15151 | SDValue Op0 = N->getOperand(0); | |||
15152 | SDValue Op1 = N->getOperand(1); | |||
15153 | ARMCC::CondCodes Cond = | |||
15154 | (ARMCC::CondCodes)cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); | |||
15155 | SDLoc dl(N); | |||
15156 | ||||
15157 | // vcmp X, 0, cc -> vcmpz X, cc | |||
15158 | if (isZeroVector(Op1)) | |||
15159 | return DAG.getNode(ARMISD::VCMPZ, dl, VT, Op0, N->getOperand(2)); | |||
15160 | ||||
15161 | unsigned SwappedCond = getSwappedCondition(Cond); | |||
15162 | if (isValidMVECond(SwappedCond, VT.isFloatingPoint())) { | |||
15163 | // vcmp 0, X, cc -> vcmpz X, reversed(cc) | |||
15164 | if (isZeroVector(Op0)) | |||
15165 | return DAG.getNode(ARMISD::VCMPZ, dl, VT, Op1, | |||
15166 | DAG.getConstant(SwappedCond, dl, MVT::i32)); | |||
15167 | // vcmp vdup(Y), X, cc -> vcmp X, vdup(Y), reversed(cc) | |||
15168 | if (Op0->getOpcode() == ARMISD::VDUP && Op1->getOpcode() != ARMISD::VDUP) | |||
15169 | return DAG.getNode(ARMISD::VCMP, dl, VT, Op1, Op0, | |||
15170 | DAG.getConstant(SwappedCond, dl, MVT::i32)); | |||
15171 | } | |||
15172 | ||||
15173 | return SDValue(); | |||
15174 | } | |||
15175 | ||||
15176 | /// PerformInsertEltCombine - Target-specific dag combine xforms for | |||
15177 | /// ISD::INSERT_VECTOR_ELT. | |||
15178 | static SDValue PerformInsertEltCombine(SDNode *N, | |||
15179 | TargetLowering::DAGCombinerInfo &DCI) { | |||
15180 | // Bitcast an i64 load inserted into a vector to f64. | |||
15181 | // Otherwise, the i64 value will be legalized to a pair of i32 values. | |||
15182 | EVT VT = N->getValueType(0); | |||
15183 | SDNode *Elt = N->getOperand(1).getNode(); | |||
15184 | if (VT.getVectorElementType() != MVT::i64 || | |||
15185 | !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) | |||
15186 | return SDValue(); | |||
15187 | ||||
15188 | SelectionDAG &DAG = DCI.DAG; | |||
15189 | SDLoc dl(N); | |||
15190 | EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, | |||
15191 | VT.getVectorNumElements()); | |||
15192 | SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); | |||
15193 | SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); | |||
15194 | // Make the DAGCombiner fold the bitcasts. | |||
15195 | DCI.AddToWorklist(Vec.getNode()); | |||
15196 | DCI.AddToWorklist(V.getNode()); | |||
15197 | SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, | |||
15198 | Vec, V, N->getOperand(2)); | |||
15199 | return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); | |||
15200 | } | |||
15201 | ||||
15202 | // Convert a pair of extracts from the same base vector to a VMOVRRD. Either | |||
15203 | // directly or bitcast to an integer if the original is a float vector. | |||
15204 | // extract(x, n); extract(x, n+1) -> VMOVRRD(extract v2f64 x, n/2) | |||
15205 | // bitcast(extract(x, n)); bitcast(extract(x, n+1)) -> VMOVRRD(extract x, n/2) | |||
15206 | static SDValue | |||
15207 | PerformExtractEltToVMOVRRD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { | |||
15208 | EVT VT = N->getValueType(0); | |||
15209 | SDLoc dl(N); | |||
15210 | ||||
15211 | if (!DCI.isAfterLegalizeDAG() || VT != MVT::i32 || | |||
15212 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(MVT::f64)) | |||
15213 | return SDValue(); | |||
15214 | ||||
15215 | SDValue Ext = SDValue(N, 0); | |||
15216 | if (Ext.getOpcode() == ISD::BITCAST && | |||
15217 | Ext.getOperand(0).getValueType() == MVT::f32) | |||
15218 | Ext = Ext.getOperand(0); | |||
15219 | if (Ext.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
15220 | !isa<ConstantSDNode>(Ext.getOperand(1)) || | |||
15221 | Ext.getConstantOperandVal(1) % 2 != 0) | |||
15222 | return SDValue(); | |||
15223 | if (Ext->use_size() == 1 && | |||
15224 | (Ext->use_begin()->getOpcode() == ISD::SINT_TO_FP || | |||
15225 | Ext->use_begin()->getOpcode() == ISD::UINT_TO_FP)) | |||
15226 | return SDValue(); | |||
15227 | ||||
15228 | SDValue Op0 = Ext.getOperand(0); | |||
15229 | EVT VecVT = Op0.getValueType(); | |||
15230 | unsigned ResNo = Op0.getResNo(); | |||
15231 | unsigned Lane = Ext.getConstantOperandVal(1); | |||
15232 | if (VecVT.getVectorNumElements() != 4) | |||
15233 | return SDValue(); | |||
15234 | ||||
15235 | // Find another extract, of Lane + 1 | |||
15236 | auto OtherIt = find_if(Op0->uses(), [&](SDNode *V) { | |||
15237 | return V->getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
15238 | isa<ConstantSDNode>(V->getOperand(1)) && | |||
15239 | V->getConstantOperandVal(1) == Lane + 1 && | |||
15240 | V->getOperand(0).getResNo() == ResNo; | |||
15241 | }); | |||
15242 | if (OtherIt == Op0->uses().end()) | |||
15243 | return SDValue(); | |||
15244 | ||||
15245 | // For float extracts, we need to be converting to a i32 for both vector | |||
15246 | // lanes. | |||
15247 | SDValue OtherExt(*OtherIt, 0); | |||
15248 | if (OtherExt.getValueType() != MVT::i32) { | |||
15249 | if (OtherExt->use_size() != 1 || | |||
15250 | OtherExt->use_begin()->getOpcode() != ISD::BITCAST || | |||
15251 | OtherExt->use_begin()->getValueType(0) != MVT::i32) | |||
15252 | return SDValue(); | |||
15253 | OtherExt = SDValue(*OtherExt->use_begin(), 0); | |||
15254 | } | |||
15255 | ||||
15256 | // Convert the type to a f64 and extract with a VMOVRRD. | |||
15257 | SDValue F64 = DCI.DAG.getNode( | |||
15258 | ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, | |||
15259 | DCI.DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v2f64, Op0), | |||
15260 | DCI.DAG.getConstant(Ext.getConstantOperandVal(1) / 2, dl, MVT::i32)); | |||
15261 | SDValue VMOVRRD = | |||
15262 | DCI.DAG.getNode(ARMISD::VMOVRRD, dl, {MVT::i32, MVT::i32}, F64); | |||
15263 | ||||
15264 | DCI.CombineTo(OtherExt.getNode(), SDValue(VMOVRRD.getNode(), 1)); | |||
15265 | return VMOVRRD; | |||
15266 | } | |||
15267 | ||||
15268 | static SDValue PerformExtractEltCombine(SDNode *N, | |||
15269 | TargetLowering::DAGCombinerInfo &DCI, | |||
15270 | const ARMSubtarget *ST) { | |||
15271 | SDValue Op0 = N->getOperand(0); | |||
15272 | EVT VT = N->getValueType(0); | |||
15273 | SDLoc dl(N); | |||
15274 | ||||
15275 | // extract (vdup x) -> x | |||
15276 | if (Op0->getOpcode() == ARMISD::VDUP) { | |||
15277 | SDValue X = Op0->getOperand(0); | |||
15278 | if (VT == MVT::f16 && X.getValueType() == MVT::i32) | |||
15279 | return DCI.DAG.getNode(ARMISD::VMOVhr, dl, VT, X); | |||
15280 | if (VT == MVT::i32 && X.getValueType() == MVT::f16) | |||
15281 | return DCI.DAG.getNode(ARMISD::VMOVrh, dl, VT, X); | |||
15282 | if (VT == MVT::f32 && X.getValueType() == MVT::i32) | |||
15283 | return DCI.DAG.getNode(ISD::BITCAST, dl, VT, X); | |||
15284 | ||||
15285 | while (X.getValueType() != VT && X->getOpcode() == ISD::BITCAST) | |||
15286 | X = X->getOperand(0); | |||
15287 | if (X.getValueType() == VT) | |||
15288 | return X; | |||
15289 | } | |||
15290 | ||||
15291 | // extract ARM_BUILD_VECTOR -> x | |||
15292 | if (Op0->getOpcode() == ARMISD::BUILD_VECTOR && | |||
15293 | isa<ConstantSDNode>(N->getOperand(1)) && | |||
15294 | N->getConstantOperandVal(1) < Op0.getNumOperands()) { | |||
15295 | return Op0.getOperand(N->getConstantOperandVal(1)); | |||
15296 | } | |||
15297 | ||||
15298 | // extract(bitcast(BUILD_VECTOR(VMOVDRR(a, b), ..))) -> a or b | |||
15299 | if (Op0.getValueType() == MVT::v4i32 && | |||
15300 | isa<ConstantSDNode>(N->getOperand(1)) && | |||
15301 | Op0.getOpcode() == ISD::BITCAST && | |||
15302 | Op0.getOperand(0).getOpcode() == ISD::BUILD_VECTOR && | |||
15303 | Op0.getOperand(0).getValueType() == MVT::v2f64) { | |||
15304 | SDValue BV = Op0.getOperand(0); | |||
15305 | unsigned Offset = N->getConstantOperandVal(1); | |||
15306 | SDValue MOV = BV.getOperand(Offset < 2 ? 0 : 1); | |||
15307 | if (MOV.getOpcode() == ARMISD::VMOVDRR) | |||
15308 | return MOV.getOperand(ST->isLittle() ? Offset % 2 : 1 - Offset % 2); | |||
15309 | } | |||
15310 | ||||
15311 | // extract x, n; extract x, n+1 -> VMOVRRD x | |||
15312 | if (SDValue R = PerformExtractEltToVMOVRRD(N, DCI)) | |||
15313 | return R; | |||
15314 | ||||
15315 | // extract (MVETrunc(x)) -> extract x | |||
15316 | if (Op0->getOpcode() == ARMISD::MVETRUNC) { | |||
15317 | unsigned Idx = N->getConstantOperandVal(1); | |||
15318 | unsigned Vec = | |||
15319 | Idx / Op0->getOperand(0).getValueType().getVectorNumElements(); | |||
15320 | unsigned SubIdx = | |||
15321 | Idx % Op0->getOperand(0).getValueType().getVectorNumElements(); | |||
15322 | return DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Op0.getOperand(Vec), | |||
15323 | DCI.DAG.getConstant(SubIdx, dl, MVT::i32)); | |||
15324 | } | |||
15325 | ||||
15326 | return SDValue(); | |||
15327 | } | |||
15328 | ||||
15329 | static SDValue PerformSignExtendInregCombine(SDNode *N, SelectionDAG &DAG) { | |||
15330 | SDValue Op = N->getOperand(0); | |||
15331 | EVT VT = N->getValueType(0); | |||
15332 | ||||
15333 | // sext_inreg(VGETLANEu) -> VGETLANEs | |||
15334 | if (Op.getOpcode() == ARMISD::VGETLANEu && | |||
15335 | cast<VTSDNode>(N->getOperand(1))->getVT() == | |||
15336 | Op.getOperand(0).getValueType().getScalarType()) | |||
15337 | return DAG.getNode(ARMISD::VGETLANEs, SDLoc(N), VT, Op.getOperand(0), | |||
15338 | Op.getOperand(1)); | |||
15339 | ||||
15340 | return SDValue(); | |||
15341 | } | |||
15342 | ||||
15343 | // When lowering complex nodes that we recognize, like VQDMULH and MULH, we | |||
15344 | // can end up with shuffle(binop(shuffle, shuffle)), that can be simplified to | |||
15345 | // binop as the shuffles cancel out. | |||
15346 | static SDValue FlattenVectorShuffle(ShuffleVectorSDNode *N, SelectionDAG &DAG) { | |||
15347 | EVT VT = N->getValueType(0); | |||
15348 | if (!N->getOperand(1).isUndef() || N->getOperand(0).getValueType() != VT) | |||
15349 | return SDValue(); | |||
15350 | SDValue Op = N->getOperand(0); | |||
15351 | ||||
15352 | // Looking for binary operators that will have been folded from | |||
15353 | // truncates/extends. | |||
15354 | switch (Op.getOpcode()) { | |||
15355 | case ARMISD::VQDMULH: | |||
15356 | case ISD::MULHS: | |||
15357 | case ISD::MULHU: | |||
15358 | case ISD::ABDS: | |||
15359 | case ISD::ABDU: | |||
15360 | case ISD::AVGFLOORS: | |||
15361 | case ISD::AVGFLOORU: | |||
15362 | case ISD::AVGCEILS: | |||
15363 | case ISD::AVGCEILU: | |||
15364 | break; | |||
15365 | default: | |||
15366 | return SDValue(); | |||
15367 | } | |||
15368 | ||||
15369 | ShuffleVectorSDNode *Op0 = dyn_cast<ShuffleVectorSDNode>(Op.getOperand(0)); | |||
15370 | ShuffleVectorSDNode *Op1 = dyn_cast<ShuffleVectorSDNode>(Op.getOperand(1)); | |||
15371 | if (!Op0 || !Op1 || !Op0->getOperand(1).isUndef() || | |||
15372 | !Op1->getOperand(1).isUndef() || Op0->getMask() != Op1->getMask() || | |||
15373 | Op0->getOperand(0).getValueType() != VT) | |||
15374 | return SDValue(); | |||
15375 | ||||
15376 | // Check the mask turns into an identity shuffle. | |||
15377 | ArrayRef<int> NMask = N->getMask(); | |||
15378 | ArrayRef<int> OpMask = Op0->getMask(); | |||
15379 | for (int i = 0, e = NMask.size(); i != e; i++) { | |||
15380 | if (NMask[i] > 0 && OpMask[NMask[i]] > 0 && OpMask[NMask[i]] != i) | |||
15381 | return SDValue(); | |||
15382 | } | |||
15383 | ||||
15384 | return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), | |||
15385 | Op0->getOperand(0), Op1->getOperand(0)); | |||
15386 | } | |||
15387 | ||||
15388 | static SDValue | |||
15389 | PerformInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { | |||
15390 | SDValue Vec = N->getOperand(0); | |||
15391 | SDValue SubVec = N->getOperand(1); | |||
15392 | uint64_t IdxVal = N->getConstantOperandVal(2); | |||
15393 | EVT VecVT = Vec.getValueType(); | |||
15394 | EVT SubVT = SubVec.getValueType(); | |||
15395 | ||||
15396 | // Only do this for legal fixed vector types. | |||
15397 | if (!VecVT.isFixedLengthVector() || | |||
15398 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VecVT) || | |||
15399 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(SubVT)) | |||
15400 | return SDValue(); | |||
15401 | ||||
15402 | // Ignore widening patterns. | |||
15403 | if (IdxVal == 0 && Vec.isUndef()) | |||
15404 | return SDValue(); | |||
15405 | ||||
15406 | // Subvector must be half the width and an "aligned" insertion. | |||
15407 | unsigned NumSubElts = SubVT.getVectorNumElements(); | |||
15408 | if ((SubVT.getSizeInBits() * 2) != VecVT.getSizeInBits() || | |||
15409 | (IdxVal != 0 && IdxVal != NumSubElts)) | |||
15410 | return SDValue(); | |||
15411 | ||||
15412 | // Fold insert_subvector -> concat_vectors | |||
15413 | // insert_subvector(Vec,Sub,lo) -> concat_vectors(Sub,extract(Vec,hi)) | |||
15414 | // insert_subvector(Vec,Sub,hi) -> concat_vectors(extract(Vec,lo),Sub) | |||
15415 | SDLoc DL(N); | |||
15416 | SDValue Lo, Hi; | |||
15417 | if (IdxVal == 0) { | |||
15418 | Lo = SubVec; | |||
15419 | Hi = DCI.DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec, | |||
15420 | DCI.DAG.getVectorIdxConstant(NumSubElts, DL)); | |||
15421 | } else { | |||
15422 | Lo = DCI.DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec, | |||
15423 | DCI.DAG.getVectorIdxConstant(0, DL)); | |||
15424 | Hi = SubVec; | |||
15425 | } | |||
15426 | return DCI.DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT, Lo, Hi); | |||
15427 | } | |||
15428 | ||||
15429 | // shuffle(MVETrunc(x, y)) -> VMOVN(x, y) | |||
15430 | static SDValue PerformShuffleVMOVNCombine(ShuffleVectorSDNode *N, | |||
15431 | SelectionDAG &DAG) { | |||
15432 | SDValue Trunc = N->getOperand(0); | |||
15433 | EVT VT = Trunc.getValueType(); | |||
15434 | if (Trunc.getOpcode() != ARMISD::MVETRUNC || !N->getOperand(1).isUndef()) | |||
15435 | return SDValue(); | |||
15436 | ||||
15437 | SDLoc DL(Trunc); | |||
15438 | if (isVMOVNTruncMask(N->getMask(), VT, false)) | |||
15439 | return DAG.getNode( | |||
15440 | ARMISD::VMOVN, DL, VT, | |||
15441 | DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, Trunc.getOperand(0)), | |||
15442 | DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, Trunc.getOperand(1)), | |||
15443 | DAG.getConstant(1, DL, MVT::i32)); | |||
15444 | else if (isVMOVNTruncMask(N->getMask(), VT, true)) | |||
15445 | return DAG.getNode( | |||
15446 | ARMISD::VMOVN, DL, VT, | |||
15447 | DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, Trunc.getOperand(1)), | |||
15448 | DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, Trunc.getOperand(0)), | |||
15449 | DAG.getConstant(1, DL, MVT::i32)); | |||
15450 | return SDValue(); | |||
15451 | } | |||
15452 | ||||
15453 | /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for | |||
15454 | /// ISD::VECTOR_SHUFFLE. | |||
15455 | static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { | |||
15456 | if (SDValue R = FlattenVectorShuffle(cast<ShuffleVectorSDNode>(N), DAG)) | |||
15457 | return R; | |||
15458 | if (SDValue R = PerformShuffleVMOVNCombine(cast<ShuffleVectorSDNode>(N), DAG)) | |||
15459 | return R; | |||
15460 | ||||
15461 | // The LLVM shufflevector instruction does not require the shuffle mask | |||
15462 | // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does | |||
15463 | // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the | |||
15464 | // operands do not match the mask length, they are extended by concatenating | |||
15465 | // them with undef vectors. That is probably the right thing for other | |||
15466 | // targets, but for NEON it is better to concatenate two double-register | |||
15467 | // size vector operands into a single quad-register size vector. Do that | |||
15468 | // transformation here: | |||
15469 | // shuffle(concat(v1, undef), concat(v2, undef)) -> | |||
15470 | // shuffle(concat(v1, v2), undef) | |||
15471 | SDValue Op0 = N->getOperand(0); | |||
15472 | SDValue Op1 = N->getOperand(1); | |||
15473 | if (Op0.getOpcode() != ISD::CONCAT_VECTORS || | |||
15474 | Op1.getOpcode() != ISD::CONCAT_VECTORS || | |||
15475 | Op0.getNumOperands() != 2 || | |||
15476 | Op1.getNumOperands() != 2) | |||
15477 | return SDValue(); | |||
15478 | SDValue Concat0Op1 = Op0.getOperand(1); | |||
15479 | SDValue Concat1Op1 = Op1.getOperand(1); | |||
15480 | if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef()) | |||
15481 | return SDValue(); | |||
15482 | // Skip the transformation if any of the types are illegal. | |||
15483 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
15484 | EVT VT = N->getValueType(0); | |||
15485 | if (!TLI.isTypeLegal(VT) || | |||
15486 | !TLI.isTypeLegal(Concat0Op1.getValueType()) || | |||
15487 | !TLI.isTypeLegal(Concat1Op1.getValueType())) | |||
15488 | return SDValue(); | |||
15489 | ||||
15490 | SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, | |||
15491 | Op0.getOperand(0), Op1.getOperand(0)); | |||
15492 | // Translate the shuffle mask. | |||
15493 | SmallVector<int, 16> NewMask; | |||
15494 | unsigned NumElts = VT.getVectorNumElements(); | |||
15495 | unsigned HalfElts = NumElts/2; | |||
15496 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); | |||
15497 | for (unsigned n = 0; n < NumElts; ++n) { | |||
15498 | int MaskElt = SVN->getMaskElt(n); | |||
15499 | int NewElt = -1; | |||
15500 | if (MaskElt < (int)HalfElts) | |||
15501 | NewElt = MaskElt; | |||
15502 | else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) | |||
15503 | NewElt = HalfElts + MaskElt - NumElts; | |||
15504 | NewMask.push_back(NewElt); | |||
15505 | } | |||
15506 | return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat, | |||
15507 | DAG.getUNDEF(VT), NewMask); | |||
15508 | } | |||
15509 | ||||
15510 | /// Load/store instruction that can be merged with a base address | |||
15511 | /// update | |||
15512 | struct BaseUpdateTarget { | |||
15513 | SDNode *N; | |||
15514 | bool isIntrinsic; | |||
15515 | bool isStore; | |||
15516 | unsigned AddrOpIdx; | |||
15517 | }; | |||
15518 | ||||
15519 | struct BaseUpdateUser { | |||
15520 | /// Instruction that updates a pointer | |||
15521 | SDNode *N; | |||
15522 | /// Pointer increment operand | |||
15523 | SDValue Inc; | |||
15524 | /// Pointer increment value if it is a constant, or 0 otherwise | |||
15525 | unsigned ConstInc; | |||
15526 | }; | |||
15527 | ||||
15528 | static bool TryCombineBaseUpdate(struct BaseUpdateTarget &Target, | |||
15529 | struct BaseUpdateUser &User, | |||
15530 | bool SimpleConstIncOnly, | |||
15531 | TargetLowering::DAGCombinerInfo &DCI) { | |||
15532 | SelectionDAG &DAG = DCI.DAG; | |||
15533 | SDNode *N = Target.N; | |||
15534 | MemSDNode *MemN = cast<MemSDNode>(N); | |||
15535 | SDLoc dl(N); | |||
15536 | ||||
15537 | // Find the new opcode for the updating load/store. | |||
15538 | bool isLoadOp = true; | |||
15539 | bool isLaneOp = false; | |||
15540 | // Workaround for vst1x and vld1x intrinsics which do not have alignment | |||
15541 | // as an operand. | |||
15542 | bool hasAlignment = true; | |||
15543 | unsigned NewOpc = 0; | |||
15544 | unsigned NumVecs = 0; | |||
15545 | if (Target.isIntrinsic) { | |||
15546 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); | |||
15547 | switch (IntNo) { | |||
15548 | default: | |||
15549 | llvm_unreachable("unexpected intrinsic for Neon base update")::llvm::llvm_unreachable_internal("unexpected intrinsic for Neon base update" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 15549); | |||
15550 | case Intrinsic::arm_neon_vld1: | |||
15551 | NewOpc = ARMISD::VLD1_UPD; | |||
15552 | NumVecs = 1; | |||
15553 | break; | |||
15554 | case Intrinsic::arm_neon_vld2: | |||
15555 | NewOpc = ARMISD::VLD2_UPD; | |||
15556 | NumVecs = 2; | |||
15557 | break; | |||
15558 | case Intrinsic::arm_neon_vld3: | |||
15559 | NewOpc = ARMISD::VLD3_UPD; | |||
15560 | NumVecs = 3; | |||
15561 | break; | |||
15562 | case Intrinsic::arm_neon_vld4: | |||
15563 | NewOpc = ARMISD::VLD4_UPD; | |||
15564 | NumVecs = 4; | |||
15565 | break; | |||
15566 | case Intrinsic::arm_neon_vld1x2: | |||
15567 | NewOpc = ARMISD::VLD1x2_UPD; | |||
15568 | NumVecs = 2; | |||
15569 | hasAlignment = false; | |||
15570 | break; | |||
15571 | case Intrinsic::arm_neon_vld1x3: | |||
15572 | NewOpc = ARMISD::VLD1x3_UPD; | |||
15573 | NumVecs = 3; | |||
15574 | hasAlignment = false; | |||
15575 | break; | |||
15576 | case Intrinsic::arm_neon_vld1x4: | |||
15577 | NewOpc = ARMISD::VLD1x4_UPD; | |||
15578 | NumVecs = 4; | |||
15579 | hasAlignment = false; | |||
15580 | break; | |||
15581 | case Intrinsic::arm_neon_vld2dup: | |||
15582 | NewOpc = ARMISD::VLD2DUP_UPD; | |||
15583 | NumVecs = 2; | |||
15584 | break; | |||
15585 | case Intrinsic::arm_neon_vld3dup: | |||
15586 | NewOpc = ARMISD::VLD3DUP_UPD; | |||
15587 | NumVecs = 3; | |||
15588 | break; | |||
15589 | case Intrinsic::arm_neon_vld4dup: | |||
15590 | NewOpc = ARMISD::VLD4DUP_UPD; | |||
15591 | NumVecs = 4; | |||
15592 | break; | |||
15593 | case Intrinsic::arm_neon_vld2lane: | |||
15594 | NewOpc = ARMISD::VLD2LN_UPD; | |||
15595 | NumVecs = 2; | |||
15596 | isLaneOp = true; | |||
15597 | break; | |||
15598 | case Intrinsic::arm_neon_vld3lane: | |||
15599 | NewOpc = ARMISD::VLD3LN_UPD; | |||
15600 | NumVecs = 3; | |||
15601 | isLaneOp = true; | |||
15602 | break; | |||
15603 | case Intrinsic::arm_neon_vld4lane: | |||
15604 | NewOpc = ARMISD::VLD4LN_UPD; | |||
15605 | NumVecs = 4; | |||
15606 | isLaneOp = true; | |||
15607 | break; | |||
15608 | case Intrinsic::arm_neon_vst1: | |||
15609 | NewOpc = ARMISD::VST1_UPD; | |||
15610 | NumVecs = 1; | |||
15611 | isLoadOp = false; | |||
15612 | break; | |||
15613 | case Intrinsic::arm_neon_vst2: | |||
15614 | NewOpc = ARMISD::VST2_UPD; | |||
15615 | NumVecs = 2; | |||
15616 | isLoadOp = false; | |||
15617 | break; | |||
15618 | case Intrinsic::arm_neon_vst3: | |||
15619 | NewOpc = ARMISD::VST3_UPD; | |||
15620 | NumVecs = 3; | |||
15621 | isLoadOp = false; | |||
15622 | break; | |||
15623 | case Intrinsic::arm_neon_vst4: | |||
15624 | NewOpc = ARMISD::VST4_UPD; | |||
15625 | NumVecs = 4; | |||
15626 | isLoadOp = false; | |||
15627 | break; | |||
15628 | case Intrinsic::arm_neon_vst2lane: | |||
15629 | NewOpc = ARMISD::VST2LN_UPD; | |||
15630 | NumVecs = 2; | |||
15631 | isLoadOp = false; | |||
15632 | isLaneOp = true; | |||
15633 | break; | |||
15634 | case Intrinsic::arm_neon_vst3lane: | |||
15635 | NewOpc = ARMISD::VST3LN_UPD; | |||
15636 | NumVecs = 3; | |||
15637 | isLoadOp = false; | |||
15638 | isLaneOp = true; | |||
15639 | break; | |||
15640 | case Intrinsic::arm_neon_vst4lane: | |||
15641 | NewOpc = ARMISD::VST4LN_UPD; | |||
15642 | NumVecs = 4; | |||
15643 | isLoadOp = false; | |||
15644 | isLaneOp = true; | |||
15645 | break; | |||
15646 | case Intrinsic::arm_neon_vst1x2: | |||
15647 | NewOpc = ARMISD::VST1x2_UPD; | |||
15648 | NumVecs = 2; | |||
15649 | isLoadOp = false; | |||
15650 | hasAlignment = false; | |||
15651 | break; | |||
15652 | case Intrinsic::arm_neon_vst1x3: | |||
15653 | NewOpc = ARMISD::VST1x3_UPD; | |||
15654 | NumVecs = 3; | |||
15655 | isLoadOp = false; | |||
15656 | hasAlignment = false; | |||
15657 | break; | |||
15658 | case Intrinsic::arm_neon_vst1x4: | |||
15659 | NewOpc = ARMISD::VST1x4_UPD; | |||
15660 | NumVecs = 4; | |||
15661 | isLoadOp = false; | |||
15662 | hasAlignment = false; | |||
15663 | break; | |||
15664 | } | |||
15665 | } else { | |||
15666 | isLaneOp = true; | |||
15667 | switch (N->getOpcode()) { | |||
15668 | default: | |||
15669 | llvm_unreachable("unexpected opcode for Neon base update")::llvm::llvm_unreachable_internal("unexpected opcode for Neon base update" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 15669); | |||
15670 | case ARMISD::VLD1DUP: | |||
15671 | NewOpc = ARMISD::VLD1DUP_UPD; | |||
15672 | NumVecs = 1; | |||
15673 | break; | |||
15674 | case ARMISD::VLD2DUP: | |||
15675 | NewOpc = ARMISD::VLD2DUP_UPD; | |||
15676 | NumVecs = 2; | |||
15677 | break; | |||
15678 | case ARMISD::VLD3DUP: | |||
15679 | NewOpc = ARMISD::VLD3DUP_UPD; | |||
15680 | NumVecs = 3; | |||
15681 | break; | |||
15682 | case ARMISD::VLD4DUP: | |||
15683 | NewOpc = ARMISD::VLD4DUP_UPD; | |||
15684 | NumVecs = 4; | |||
15685 | break; | |||
15686 | case ISD::LOAD: | |||
15687 | NewOpc = ARMISD::VLD1_UPD; | |||
15688 | NumVecs = 1; | |||
15689 | isLaneOp = false; | |||
15690 | break; | |||
15691 | case ISD::STORE: | |||
15692 | NewOpc = ARMISD::VST1_UPD; | |||
15693 | NumVecs = 1; | |||
15694 | isLaneOp = false; | |||
15695 | isLoadOp = false; | |||
15696 | break; | |||
15697 | } | |||
15698 | } | |||
15699 | ||||
15700 | // Find the size of memory referenced by the load/store. | |||
15701 | EVT VecTy; | |||
15702 | if (isLoadOp) { | |||
15703 | VecTy = N->getValueType(0); | |||
15704 | } else if (Target.isIntrinsic) { | |||
15705 | VecTy = N->getOperand(Target.AddrOpIdx + 1).getValueType(); | |||
15706 | } else { | |||
15707 | assert(Target.isStore &&(static_cast <bool> (Target.isStore && "Node has to be a load, a store, or an intrinsic!" ) ? void (0) : __assert_fail ("Target.isStore && \"Node has to be a load, a store, or an intrinsic!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 15708, __extension__ __PRETTY_FUNCTION__)) | |||
15708 | "Node has to be a load, a store, or an intrinsic!")(static_cast <bool> (Target.isStore && "Node has to be a load, a store, or an intrinsic!" ) ? void (0) : __assert_fail ("Target.isStore && \"Node has to be a load, a store, or an intrinsic!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 15708, __extension__ __PRETTY_FUNCTION__)); | |||
15709 | VecTy = N->getOperand(1).getValueType(); | |||
15710 | } | |||
15711 | ||||
15712 | bool isVLDDUPOp = | |||
15713 | NewOpc == ARMISD::VLD1DUP_UPD || NewOpc == ARMISD::VLD2DUP_UPD || | |||
15714 | NewOpc == ARMISD::VLD3DUP_UPD || NewOpc == ARMISD::VLD4DUP_UPD; | |||
15715 | ||||
15716 | unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; | |||
15717 | if (isLaneOp || isVLDDUPOp) | |||
15718 | NumBytes /= VecTy.getVectorNumElements(); | |||
15719 | ||||
15720 | if (NumBytes >= 3 * 16 && User.ConstInc != NumBytes) { | |||
15721 | // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two | |||
15722 | // separate instructions that make it harder to use a non-constant update. | |||
15723 | return false; | |||
15724 | } | |||
15725 | ||||
15726 | if (SimpleConstIncOnly && User.ConstInc != NumBytes) | |||
15727 | return false; | |||
15728 | ||||
15729 | // OK, we found an ADD we can fold into the base update. | |||
15730 | // Now, create a _UPD node, taking care of not breaking alignment. | |||
15731 | ||||
15732 | EVT AlignedVecTy = VecTy; | |||
15733 | unsigned Alignment = MemN->getAlignment(); | |||
15734 | ||||
15735 | // If this is a less-than-standard-aligned load/store, change the type to | |||
15736 | // match the standard alignment. | |||
15737 | // The alignment is overlooked when selecting _UPD variants; and it's | |||
15738 | // easier to introduce bitcasts here than fix that. | |||
15739 | // There are 3 ways to get to this base-update combine: | |||
15740 | // - intrinsics: they are assumed to be properly aligned (to the standard | |||
15741 | // alignment of the memory type), so we don't need to do anything. | |||
15742 | // - ARMISD::VLDx nodes: they are only generated from the aforementioned | |||
15743 | // intrinsics, so, likewise, there's nothing to do. | |||
15744 | // - generic load/store instructions: the alignment is specified as an | |||
15745 | // explicit operand, rather than implicitly as the standard alignment | |||
15746 | // of the memory type (like the intrisics). We need to change the | |||
15747 | // memory type to match the explicit alignment. That way, we don't | |||
15748 | // generate non-standard-aligned ARMISD::VLDx nodes. | |||
15749 | if (isa<LSBaseSDNode>(N)) { | |||
15750 | if (Alignment == 0) | |||
15751 | Alignment = 1; | |||
15752 | if (Alignment < VecTy.getScalarSizeInBits() / 8) { | |||
15753 | MVT EltTy = MVT::getIntegerVT(Alignment * 8); | |||
15754 | assert(NumVecs == 1 && "Unexpected multi-element generic load/store.")(static_cast <bool> (NumVecs == 1 && "Unexpected multi-element generic load/store." ) ? void (0) : __assert_fail ("NumVecs == 1 && \"Unexpected multi-element generic load/store.\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 15754, __extension__ __PRETTY_FUNCTION__)); | |||
15755 | assert(!isLaneOp && "Unexpected generic load/store lane.")(static_cast <bool> (!isLaneOp && "Unexpected generic load/store lane." ) ? void (0) : __assert_fail ("!isLaneOp && \"Unexpected generic load/store lane.\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 15755, __extension__ __PRETTY_FUNCTION__)); | |||
15756 | unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8); | |||
15757 | AlignedVecTy = MVT::getVectorVT(EltTy, NumElts); | |||
15758 | } | |||
15759 | // Don't set an explicit alignment on regular load/stores that we want | |||
15760 | // to transform to VLD/VST 1_UPD nodes. | |||
15761 | // This matches the behavior of regular load/stores, which only get an | |||
15762 | // explicit alignment if the MMO alignment is larger than the standard | |||
15763 | // alignment of the memory type. | |||
15764 | // Intrinsics, however, always get an explicit alignment, set to the | |||
15765 | // alignment of the MMO. | |||
15766 | Alignment = 1; | |||
15767 | } | |||
15768 | ||||
15769 | // Create the new updating load/store node. | |||
15770 | // First, create an SDVTList for the new updating node's results. | |||
15771 | EVT Tys[6]; | |||
15772 | unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); | |||
15773 | unsigned n; | |||
15774 | for (n = 0; n < NumResultVecs; ++n) | |||
15775 | Tys[n] = AlignedVecTy; | |||
15776 | Tys[n++] = MVT::i32; | |||
15777 | Tys[n] = MVT::Other; | |||
15778 | SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs + 2)); | |||
15779 | ||||
15780 | // Then, gather the new node's operands. | |||
15781 | SmallVector<SDValue, 8> Ops; | |||
15782 | Ops.push_back(N->getOperand(0)); // incoming chain | |||
15783 | Ops.push_back(N->getOperand(Target.AddrOpIdx)); | |||
15784 | Ops.push_back(User.Inc); | |||
15785 | ||||
15786 | if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) { | |||
15787 | // Try to match the intrinsic's signature | |||
15788 | Ops.push_back(StN->getValue()); | |||
15789 | } else { | |||
15790 | // Loads (and of course intrinsics) match the intrinsics' signature, | |||
15791 | // so just add all but the alignment operand. | |||
15792 | unsigned LastOperand = | |||
15793 | hasAlignment ? N->getNumOperands() - 1 : N->getNumOperands(); | |||
15794 | for (unsigned i = Target.AddrOpIdx + 1; i < LastOperand; ++i) | |||
15795 | Ops.push_back(N->getOperand(i)); | |||
15796 | } | |||
15797 | ||||
15798 | // For all node types, the alignment operand is always the last one. | |||
15799 | Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32)); | |||
15800 | ||||
15801 | // If this is a non-standard-aligned STORE, the penultimate operand is the | |||
15802 | // stored value. Bitcast it to the aligned type. | |||
15803 | if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) { | |||
15804 | SDValue &StVal = Ops[Ops.size() - 2]; | |||
15805 | StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal); | |||
15806 | } | |||
15807 | ||||
15808 | EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy; | |||
15809 | SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, LoadVT, | |||
15810 | MemN->getMemOperand()); | |||
15811 | ||||
15812 | // Update the uses. | |||
15813 | SmallVector<SDValue, 5> NewResults; | |||
15814 | for (unsigned i = 0; i < NumResultVecs; ++i) | |||
15815 | NewResults.push_back(SDValue(UpdN.getNode(), i)); | |||
15816 | ||||
15817 | // If this is an non-standard-aligned LOAD, the first result is the loaded | |||
15818 | // value. Bitcast it to the expected result type. | |||
15819 | if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) { | |||
15820 | SDValue &LdVal = NewResults[0]; | |||
15821 | LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal); | |||
15822 | } | |||
15823 | ||||
15824 | NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain | |||
15825 | DCI.CombineTo(N, NewResults); | |||
15826 | DCI.CombineTo(User.N, SDValue(UpdN.getNode(), NumResultVecs)); | |||
15827 | ||||
15828 | return true; | |||
15829 | } | |||
15830 | ||||
15831 | // If (opcode ptr inc) is and ADD-like instruction, return the | |||
15832 | // increment value. Otherwise return 0. | |||
15833 | static unsigned getPointerConstIncrement(unsigned Opcode, SDValue Ptr, | |||
15834 | SDValue Inc, const SelectionDAG &DAG) { | |||
15835 | ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode()); | |||
15836 | if (!CInc) | |||
15837 | return 0; | |||
15838 | ||||
15839 | switch (Opcode) { | |||
15840 | case ARMISD::VLD1_UPD: | |||
15841 | case ISD::ADD: | |||
15842 | return CInc->getZExtValue(); | |||
15843 | case ISD::OR: { | |||
15844 | if (DAG.haveNoCommonBitsSet(Ptr, Inc)) { | |||
15845 | // (OR ptr inc) is the same as (ADD ptr inc) | |||
15846 | return CInc->getZExtValue(); | |||
15847 | } | |||
15848 | return 0; | |||
15849 | } | |||
15850 | default: | |||
15851 | return 0; | |||
15852 | } | |||
15853 | } | |||
15854 | ||||
15855 | static bool findPointerConstIncrement(SDNode *N, SDValue *Ptr, SDValue *CInc) { | |||
15856 | switch (N->getOpcode()) { | |||
15857 | case ISD::ADD: | |||
15858 | case ISD::OR: { | |||
15859 | if (isa<ConstantSDNode>(N->getOperand(1))) { | |||
15860 | *Ptr = N->getOperand(0); | |||
15861 | *CInc = N->getOperand(1); | |||
15862 | return true; | |||
15863 | } | |||
15864 | return false; | |||
15865 | } | |||
15866 | case ARMISD::VLD1_UPD: { | |||
15867 | if (isa<ConstantSDNode>(N->getOperand(2))) { | |||
15868 | *Ptr = N->getOperand(1); | |||
15869 | *CInc = N->getOperand(2); | |||
15870 | return true; | |||
15871 | } | |||
15872 | return false; | |||
15873 | } | |||
15874 | default: | |||
15875 | return false; | |||
15876 | } | |||
15877 | } | |||
15878 | ||||
15879 | static bool isValidBaseUpdate(SDNode *N, SDNode *User) { | |||
15880 | // Check that the add is independent of the load/store. | |||
15881 | // Otherwise, folding it would create a cycle. Search through Addr | |||
15882 | // as well, since the User may not be a direct user of Addr and | |||
15883 | // only share a base pointer. | |||
15884 | SmallPtrSet<const SDNode *, 32> Visited; | |||
15885 | SmallVector<const SDNode *, 16> Worklist; | |||
15886 | Worklist.push_back(N); | |||
15887 | Worklist.push_back(User); | |||
15888 | if (SDNode::hasPredecessorHelper(N, Visited, Worklist) || | |||
15889 | SDNode::hasPredecessorHelper(User, Visited, Worklist)) | |||
15890 | return false; | |||
15891 | return true; | |||
15892 | } | |||
15893 | ||||
15894 | /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, | |||
15895 | /// NEON load/store intrinsics, and generic vector load/stores, to merge | |||
15896 | /// base address updates. | |||
15897 | /// For generic load/stores, the memory type is assumed to be a vector. | |||
15898 | /// The caller is assumed to have checked legality. | |||
15899 | static SDValue CombineBaseUpdate(SDNode *N, | |||
15900 | TargetLowering::DAGCombinerInfo &DCI) { | |||
15901 | const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || | |||
15902 | N->getOpcode() == ISD::INTRINSIC_W_CHAIN); | |||
15903 | const bool isStore = N->getOpcode() == ISD::STORE; | |||
15904 | const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1); | |||
15905 | BaseUpdateTarget Target = {N, isIntrinsic, isStore, AddrOpIdx}; | |||
15906 | ||||
15907 | SDValue Addr = N->getOperand(AddrOpIdx); | |||
15908 | ||||
15909 | SmallVector<BaseUpdateUser, 8> BaseUpdates; | |||
15910 | ||||
15911 | // Search for a use of the address operand that is an increment. | |||
15912 | for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), | |||
15913 | UE = Addr.getNode()->use_end(); UI != UE; ++UI) { | |||
15914 | SDNode *User = *UI; | |||
15915 | if (UI.getUse().getResNo() != Addr.getResNo() || | |||
15916 | User->getNumOperands() != 2) | |||
15917 | continue; | |||
15918 | ||||
15919 | SDValue Inc = User->getOperand(UI.getOperandNo() == 1 ? 0 : 1); | |||
15920 | unsigned ConstInc = | |||
15921 | getPointerConstIncrement(User->getOpcode(), Addr, Inc, DCI.DAG); | |||
15922 | ||||
15923 | if (ConstInc || User->getOpcode() == ISD::ADD) | |||
15924 | BaseUpdates.push_back({User, Inc, ConstInc}); | |||
15925 | } | |||
15926 | ||||
15927 | // If the address is a constant pointer increment itself, find | |||
15928 | // another constant increment that has the same base operand | |||
15929 | SDValue Base; | |||
15930 | SDValue CInc; | |||
15931 | if (findPointerConstIncrement(Addr.getNode(), &Base, &CInc)) { | |||
15932 | unsigned Offset = | |||
15933 | getPointerConstIncrement(Addr->getOpcode(), Base, CInc, DCI.DAG); | |||
15934 | for (SDNode::use_iterator UI = Base->use_begin(), UE = Base->use_end(); | |||
15935 | UI != UE; ++UI) { | |||
15936 | ||||
15937 | SDNode *User = *UI; | |||
15938 | if (UI.getUse().getResNo() != Base.getResNo() || User == Addr.getNode() || | |||
15939 | User->getNumOperands() != 2) | |||
15940 | continue; | |||
15941 | ||||
15942 | SDValue UserInc = User->getOperand(UI.getOperandNo() == 0 ? 1 : 0); | |||
15943 | unsigned UserOffset = | |||
15944 | getPointerConstIncrement(User->getOpcode(), Base, UserInc, DCI.DAG); | |||
15945 | ||||
15946 | if (!UserOffset || UserOffset <= Offset) | |||
15947 | continue; | |||
15948 | ||||
15949 | unsigned NewConstInc = UserOffset - Offset; | |||
15950 | SDValue NewInc = DCI.DAG.getConstant(NewConstInc, SDLoc(N), MVT::i32); | |||
15951 | BaseUpdates.push_back({User, NewInc, NewConstInc}); | |||
15952 | } | |||
15953 | } | |||
15954 | ||||
15955 | // Try to fold the load/store with an update that matches memory | |||
15956 | // access size. This should work well for sequential loads. | |||
15957 | // | |||
15958 | // Filter out invalid updates as well. | |||
15959 | unsigned NumValidUpd = BaseUpdates.size(); | |||
15960 | for (unsigned I = 0; I < NumValidUpd;) { | |||
15961 | BaseUpdateUser &User = BaseUpdates[I]; | |||
15962 | if (!isValidBaseUpdate(N, User.N)) { | |||
15963 | --NumValidUpd; | |||
15964 | std::swap(BaseUpdates[I], BaseUpdates[NumValidUpd]); | |||
15965 | continue; | |||
15966 | } | |||
15967 | ||||
15968 | if (TryCombineBaseUpdate(Target, User, /*SimpleConstIncOnly=*/true, DCI)) | |||
15969 | return SDValue(); | |||
15970 | ++I; | |||
15971 | } | |||
15972 | BaseUpdates.resize(NumValidUpd); | |||
15973 | ||||
15974 | // Try to fold with other users. Non-constant updates are considered | |||
15975 | // first, and constant updates are sorted to not break a sequence of | |||
15976 | // strided accesses (if there is any). | |||
15977 | std::stable_sort(BaseUpdates.begin(), BaseUpdates.end(), | |||
15978 | [](const BaseUpdateUser &LHS, const BaseUpdateUser &RHS) { | |||
15979 | return LHS.ConstInc < RHS.ConstInc; | |||
15980 | }); | |||
15981 | for (BaseUpdateUser &User : BaseUpdates) { | |||
15982 | if (TryCombineBaseUpdate(Target, User, /*SimpleConstIncOnly=*/false, DCI)) | |||
15983 | return SDValue(); | |||
15984 | } | |||
15985 | return SDValue(); | |||
15986 | } | |||
15987 | ||||
15988 | static SDValue PerformVLDCombine(SDNode *N, | |||
15989 | TargetLowering::DAGCombinerInfo &DCI) { | |||
15990 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) | |||
15991 | return SDValue(); | |||
15992 | ||||
15993 | return CombineBaseUpdate(N, DCI); | |||
15994 | } | |||
15995 | ||||
15996 | static SDValue PerformMVEVLDCombine(SDNode *N, | |||
15997 | TargetLowering::DAGCombinerInfo &DCI) { | |||
15998 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) | |||
15999 | return SDValue(); | |||
16000 | ||||
16001 | SelectionDAG &DAG = DCI.DAG; | |||
16002 | SDValue Addr = N->getOperand(2); | |||
16003 | MemSDNode *MemN = cast<MemSDNode>(N); | |||
16004 | SDLoc dl(N); | |||
16005 | ||||
16006 | // For the stores, where there are multiple intrinsics we only actually want | |||
16007 | // to post-inc the last of the them. | |||
16008 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); | |||
16009 | if (IntNo == Intrinsic::arm_mve_vst2q && | |||
16010 | cast<ConstantSDNode>(N->getOperand(5))->getZExtValue() != 1) | |||
16011 | return SDValue(); | |||
16012 | if (IntNo == Intrinsic::arm_mve_vst4q && | |||
16013 | cast<ConstantSDNode>(N->getOperand(7))->getZExtValue() != 3) | |||
16014 | return SDValue(); | |||
16015 | ||||
16016 | // Search for a use of the address operand that is an increment. | |||
16017 | for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), | |||
16018 | UE = Addr.getNode()->use_end(); | |||
16019 | UI != UE; ++UI) { | |||
16020 | SDNode *User = *UI; | |||
16021 | if (User->getOpcode() != ISD::ADD || | |||
16022 | UI.getUse().getResNo() != Addr.getResNo()) | |||
16023 | continue; | |||
16024 | ||||
16025 | // Check that the add is independent of the load/store. Otherwise, folding | |||
16026 | // it would create a cycle. We can avoid searching through Addr as it's a | |||
16027 | // predecessor to both. | |||
16028 | SmallPtrSet<const SDNode *, 32> Visited; | |||
16029 | SmallVector<const SDNode *, 16> Worklist; | |||
16030 | Visited.insert(Addr.getNode()); | |||
16031 | Worklist.push_back(N); | |||
16032 | Worklist.push_back(User); | |||
16033 | if (SDNode::hasPredecessorHelper(N, Visited, Worklist) || | |||
16034 | SDNode::hasPredecessorHelper(User, Visited, Worklist)) | |||
16035 | continue; | |||
16036 | ||||
16037 | // Find the new opcode for the updating load/store. | |||
16038 | bool isLoadOp = true; | |||
16039 | unsigned NewOpc = 0; | |||
16040 | unsigned NumVecs = 0; | |||
16041 | switch (IntNo) { | |||
16042 | default: | |||
16043 | llvm_unreachable("unexpected intrinsic for MVE VLDn combine")::llvm::llvm_unreachable_internal("unexpected intrinsic for MVE VLDn combine" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 16043); | |||
16044 | case Intrinsic::arm_mve_vld2q: | |||
16045 | NewOpc = ARMISD::VLD2_UPD; | |||
16046 | NumVecs = 2; | |||
16047 | break; | |||
16048 | case Intrinsic::arm_mve_vld4q: | |||
16049 | NewOpc = ARMISD::VLD4_UPD; | |||
16050 | NumVecs = 4; | |||
16051 | break; | |||
16052 | case Intrinsic::arm_mve_vst2q: | |||
16053 | NewOpc = ARMISD::VST2_UPD; | |||
16054 | NumVecs = 2; | |||
16055 | isLoadOp = false; | |||
16056 | break; | |||
16057 | case Intrinsic::arm_mve_vst4q: | |||
16058 | NewOpc = ARMISD::VST4_UPD; | |||
16059 | NumVecs = 4; | |||
16060 | isLoadOp = false; | |||
16061 | break; | |||
16062 | } | |||
16063 | ||||
16064 | // Find the size of memory referenced by the load/store. | |||
16065 | EVT VecTy; | |||
16066 | if (isLoadOp) { | |||
16067 | VecTy = N->getValueType(0); | |||
16068 | } else { | |||
16069 | VecTy = N->getOperand(3).getValueType(); | |||
16070 | } | |||
16071 | ||||
16072 | unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; | |||
16073 | ||||
16074 | // If the increment is a constant, it must match the memory ref size. | |||
16075 | SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); | |||
16076 | ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode()); | |||
16077 | if (!CInc || CInc->getZExtValue() != NumBytes) | |||
16078 | continue; | |||
16079 | ||||
16080 | // Create the new updating load/store node. | |||
16081 | // First, create an SDVTList for the new updating node's results. | |||
16082 | EVT Tys[6]; | |||
16083 | unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); | |||
16084 | unsigned n; | |||
16085 | for (n = 0; n < NumResultVecs; ++n) | |||
16086 | Tys[n] = VecTy; | |||
16087 | Tys[n++] = MVT::i32; | |||
16088 | Tys[n] = MVT::Other; | |||
16089 | SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs + 2)); | |||
16090 | ||||
16091 | // Then, gather the new node's operands. | |||
16092 | SmallVector<SDValue, 8> Ops; | |||
16093 | Ops.push_back(N->getOperand(0)); // incoming chain | |||
16094 | Ops.push_back(N->getOperand(2)); // ptr | |||
16095 | Ops.push_back(Inc); | |||
16096 | ||||
16097 | for (unsigned i = 3; i < N->getNumOperands(); ++i) | |||
16098 | Ops.push_back(N->getOperand(i)); | |||
16099 | ||||
16100 | SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, VecTy, | |||
16101 | MemN->getMemOperand()); | |||
16102 | ||||
16103 | // Update the uses. | |||
16104 | SmallVector<SDValue, 5> NewResults; | |||
16105 | for (unsigned i = 0; i < NumResultVecs; ++i) | |||
16106 | NewResults.push_back(SDValue(UpdN.getNode(), i)); | |||
16107 | ||||
16108 | NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain | |||
16109 | DCI.CombineTo(N, NewResults); | |||
16110 | DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); | |||
16111 | ||||
16112 | break; | |||
16113 | } | |||
16114 | ||||
16115 | return SDValue(); | |||
16116 | } | |||
16117 | ||||
16118 | /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a | |||
16119 | /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic | |||
16120 | /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and | |||
16121 | /// return true. | |||
16122 | static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { | |||
16123 | SelectionDAG &DAG = DCI.DAG; | |||
16124 | EVT VT = N->getValueType(0); | |||
16125 | // vldN-dup instructions only support 64-bit vectors for N > 1. | |||
16126 | if (!VT.is64BitVector()) | |||
16127 | return false; | |||
16128 | ||||
16129 | // Check if the VDUPLANE operand is a vldN-dup intrinsic. | |||
16130 | SDNode *VLD = N->getOperand(0).getNode(); | |||
16131 | if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) | |||
16132 | return false; | |||
16133 | unsigned NumVecs = 0; | |||
16134 | unsigned NewOpc = 0; | |||
16135 | unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); | |||
16136 | if (IntNo == Intrinsic::arm_neon_vld2lane) { | |||
16137 | NumVecs = 2; | |||
16138 | NewOpc = ARMISD::VLD2DUP; | |||
16139 | } else if (IntNo == Intrinsic::arm_neon_vld3lane) { | |||
16140 | NumVecs = 3; | |||
16141 | NewOpc = ARMISD::VLD3DUP; | |||
16142 | } else if (IntNo == Intrinsic::arm_neon_vld4lane) { | |||
16143 | NumVecs = 4; | |||
16144 | NewOpc = ARMISD::VLD4DUP; | |||
16145 | } else { | |||
16146 | return false; | |||
16147 | } | |||
16148 | ||||
16149 | // First check that all the vldN-lane uses are VDUPLANEs and that the lane | |||
16150 | // numbers match the load. | |||
16151 | unsigned VLDLaneNo = | |||
16152 | cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); | |||
16153 | for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); | |||
16154 | UI != UE; ++UI) { | |||
16155 | // Ignore uses of the chain result. | |||
16156 | if (UI.getUse().getResNo() == NumVecs) | |||
16157 | continue; | |||
16158 | SDNode *User = *UI; | |||
16159 | if (User->getOpcode() != ARMISD::VDUPLANE || | |||
16160 | VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) | |||
16161 | return false; | |||
16162 | } | |||
16163 | ||||
16164 | // Create the vldN-dup node. | |||
16165 | EVT Tys[5]; | |||
16166 | unsigned n; | |||
16167 | for (n = 0; n < NumVecs; ++n) | |||
16168 | Tys[n] = VT; | |||
16169 | Tys[n] = MVT::Other; | |||
16170 | SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1)); | |||
16171 | SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; | |||
16172 | MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); | |||
16173 | SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, | |||
16174 | Ops, VLDMemInt->getMemoryVT(), | |||
16175 | VLDMemInt->getMemOperand()); | |||
16176 | ||||
16177 | // Update the uses. | |||
16178 | for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); | |||
16179 | UI != UE; ++UI) { | |||
16180 | unsigned ResNo = UI.getUse().getResNo(); | |||
16181 | // Ignore uses of the chain result. | |||
16182 | if (ResNo == NumVecs) | |||
16183 | continue; | |||
16184 | SDNode *User = *UI; | |||
16185 | DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); | |||
16186 | } | |||
16187 | ||||
16188 | // Now the vldN-lane intrinsic is dead except for its chain result. | |||
16189 | // Update uses of the chain. | |||
16190 | std::vector<SDValue> VLDDupResults; | |||
16191 | for (unsigned n = 0; n < NumVecs; ++n) | |||
16192 | VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); | |||
16193 | VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); | |||
16194 | DCI.CombineTo(VLD, VLDDupResults); | |||
16195 | ||||
16196 | return true; | |||
16197 | } | |||
16198 | ||||
16199 | /// PerformVDUPLANECombine - Target-specific dag combine xforms for | |||
16200 | /// ARMISD::VDUPLANE. | |||
16201 | static SDValue PerformVDUPLANECombine(SDNode *N, | |||
16202 | TargetLowering::DAGCombinerInfo &DCI, | |||
16203 | const ARMSubtarget *Subtarget) { | |||
16204 | SDValue Op = N->getOperand(0); | |||
16205 | EVT VT = N->getValueType(0); | |||
16206 | ||||
16207 | // On MVE, we just convert the VDUPLANE to a VDUP with an extract. | |||
16208 | if (Subtarget->hasMVEIntegerOps()) { | |||
16209 | EVT ExtractVT = VT.getVectorElementType(); | |||
16210 | // We need to ensure we are creating a legal type. | |||
16211 | if (!DCI.DAG.getTargetLoweringInfo().isTypeLegal(ExtractVT)) | |||
16212 | ExtractVT = MVT::i32; | |||
16213 | SDValue Extract = DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), ExtractVT, | |||
16214 | N->getOperand(0), N->getOperand(1)); | |||
16215 | return DCI.DAG.getNode(ARMISD::VDUP, SDLoc(N), VT, Extract); | |||
16216 | } | |||
16217 | ||||
16218 | // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses | |||
16219 | // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. | |||
16220 | if (CombineVLDDUP(N, DCI)) | |||
16221 | return SDValue(N, 0); | |||
16222 | ||||
16223 | // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is | |||
16224 | // redundant. Ignore bit_converts for now; element sizes are checked below. | |||
16225 | while (Op.getOpcode() == ISD::BITCAST) | |||
16226 | Op = Op.getOperand(0); | |||
16227 | if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) | |||
16228 | return SDValue(); | |||
16229 | ||||
16230 | // Make sure the VMOV element size is not bigger than the VDUPLANE elements. | |||
16231 | unsigned EltSize = Op.getScalarValueSizeInBits(); | |||
16232 | // The canonical VMOV for a zero vector uses a 32-bit element size. | |||
16233 | unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
16234 | unsigned EltBits; | |||
16235 | if (ARM_AM::decodeVMOVModImm(Imm, EltBits) == 0) | |||
16236 | EltSize = 8; | |||
16237 | if (EltSize > VT.getScalarSizeInBits()) | |||
16238 | return SDValue(); | |||
16239 | ||||
16240 | return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); | |||
16241 | } | |||
16242 | ||||
16243 | /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP. | |||
16244 | static SDValue PerformVDUPCombine(SDNode *N, SelectionDAG &DAG, | |||
16245 | const ARMSubtarget *Subtarget) { | |||
16246 | SDValue Op = N->getOperand(0); | |||
16247 | SDLoc dl(N); | |||
16248 | ||||
16249 | if (Subtarget->hasMVEIntegerOps()) { | |||
16250 | // Convert VDUP f32 -> VDUP BITCAST i32 under MVE, as we know the value will | |||
16251 | // need to come from a GPR. | |||
16252 | if (Op.getValueType() == MVT::f32) | |||
16253 | return DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), | |||
16254 | DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op)); | |||
16255 | else if (Op.getValueType() == MVT::f16) | |||
16256 | return DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), | |||
16257 | DAG.getNode(ARMISD::VMOVrh, dl, MVT::i32, Op)); | |||
16258 | } | |||
16259 | ||||
16260 | if (!Subtarget->hasNEON()) | |||
16261 | return SDValue(); | |||
16262 | ||||
16263 | // Match VDUP(LOAD) -> VLD1DUP. | |||
16264 | // We match this pattern here rather than waiting for isel because the | |||
16265 | // transform is only legal for unindexed loads. | |||
16266 | LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()); | |||
16267 | if (LD && Op.hasOneUse() && LD->isUnindexed() && | |||
16268 | LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) { | |||
16269 | SDValue Ops[] = {LD->getOperand(0), LD->getOperand(1), | |||
16270 | DAG.getConstant(LD->getAlignment(), SDLoc(N), MVT::i32)}; | |||
16271 | SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other); | |||
16272 | SDValue VLDDup = | |||
16273 | DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys, Ops, | |||
16274 | LD->getMemoryVT(), LD->getMemOperand()); | |||
16275 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), VLDDup.getValue(1)); | |||
16276 | return VLDDup; | |||
16277 | } | |||
16278 | ||||
16279 | return SDValue(); | |||
16280 | } | |||
16281 | ||||
16282 | static SDValue PerformLOADCombine(SDNode *N, | |||
16283 | TargetLowering::DAGCombinerInfo &DCI, | |||
16284 | const ARMSubtarget *Subtarget) { | |||
16285 | EVT VT = N->getValueType(0); | |||
16286 | ||||
16287 | // If this is a legal vector load, try to combine it into a VLD1_UPD. | |||
16288 | if (Subtarget->hasNEON() && ISD::isNormalLoad(N) && VT.isVector() && | |||
16289 | DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) | |||
16290 | return CombineBaseUpdate(N, DCI); | |||
16291 | ||||
16292 | return SDValue(); | |||
16293 | } | |||
16294 | ||||
16295 | // Optimize trunc store (of multiple scalars) to shuffle and store. First, | |||
16296 | // pack all of the elements in one place. Next, store to memory in fewer | |||
16297 | // chunks. | |||
16298 | static SDValue PerformTruncatingStoreCombine(StoreSDNode *St, | |||
16299 | SelectionDAG &DAG) { | |||
16300 | SDValue StVal = St->getValue(); | |||
16301 | EVT VT = StVal.getValueType(); | |||
16302 | if (!St->isTruncatingStore() || !VT.isVector()) | |||
16303 | return SDValue(); | |||
16304 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
16305 | EVT StVT = St->getMemoryVT(); | |||
16306 | unsigned NumElems = VT.getVectorNumElements(); | |||
16307 | assert(StVT != VT && "Cannot truncate to the same type")(static_cast <bool> (StVT != VT && "Cannot truncate to the same type" ) ? void (0) : __assert_fail ("StVT != VT && \"Cannot truncate to the same type\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 16307, __extension__ __PRETTY_FUNCTION__)); | |||
16308 | unsigned FromEltSz = VT.getScalarSizeInBits(); | |||
16309 | unsigned ToEltSz = StVT.getScalarSizeInBits(); | |||
16310 | ||||
16311 | // From, To sizes and ElemCount must be pow of two | |||
16312 | if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) | |||
16313 | return SDValue(); | |||
16314 | ||||
16315 | // We are going to use the original vector elt for storing. | |||
16316 | // Accumulated smaller vector elements must be a multiple of the store size. | |||
16317 | if (0 != (NumElems * FromEltSz) % ToEltSz) | |||
16318 | return SDValue(); | |||
16319 | ||||
16320 | unsigned SizeRatio = FromEltSz / ToEltSz; | |||
16321 | assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits())(static_cast <bool> (SizeRatio * NumElems * ToEltSz == VT .getSizeInBits()) ? void (0) : __assert_fail ("SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 16321, __extension__ __PRETTY_FUNCTION__)); | |||
16322 | ||||
16323 | // Create a type on which we perform the shuffle. | |||
16324 | EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(), | |||
16325 | NumElems * SizeRatio); | |||
16326 | assert(WideVecVT.getSizeInBits() == VT.getSizeInBits())(static_cast <bool> (WideVecVT.getSizeInBits() == VT.getSizeInBits ()) ? void (0) : __assert_fail ("WideVecVT.getSizeInBits() == VT.getSizeInBits()" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 16326, __extension__ __PRETTY_FUNCTION__)); | |||
16327 | ||||
16328 | SDLoc DL(St); | |||
16329 | SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal); | |||
16330 | SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); | |||
16331 | for (unsigned i = 0; i < NumElems; ++i) | |||
16332 | ShuffleVec[i] = DAG.getDataLayout().isBigEndian() ? (i + 1) * SizeRatio - 1 | |||
16333 | : i * SizeRatio; | |||
16334 | ||||
16335 | // Can't shuffle using an illegal type. | |||
16336 | if (!TLI.isTypeLegal(WideVecVT)) | |||
16337 | return SDValue(); | |||
16338 | ||||
16339 | SDValue Shuff = DAG.getVectorShuffle( | |||
16340 | WideVecVT, DL, WideVec, DAG.getUNDEF(WideVec.getValueType()), ShuffleVec); | |||
16341 | // At this point all of the data is stored at the bottom of the | |||
16342 | // register. We now need to save it to mem. | |||
16343 | ||||
16344 | // Find the largest store unit | |||
16345 | MVT StoreType = MVT::i8; | |||
16346 | for (MVT Tp : MVT::integer_valuetypes()) { | |||
16347 | if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) | |||
16348 | StoreType = Tp; | |||
16349 | } | |||
16350 | // Didn't find a legal store type. | |||
16351 | if (!TLI.isTypeLegal(StoreType)) | |||
16352 | return SDValue(); | |||
16353 | ||||
16354 | // Bitcast the original vector into a vector of store-size units | |||
16355 | EVT StoreVecVT = | |||
16356 | EVT::getVectorVT(*DAG.getContext(), StoreType, | |||
16357 | VT.getSizeInBits() / EVT(StoreType).getSizeInBits()); | |||
16358 | assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits())(static_cast <bool> (StoreVecVT.getSizeInBits() == VT.getSizeInBits ()) ? void (0) : __assert_fail ("StoreVecVT.getSizeInBits() == VT.getSizeInBits()" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 16358, __extension__ __PRETTY_FUNCTION__)); | |||
16359 | SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff); | |||
16360 | SmallVector<SDValue, 8> Chains; | |||
16361 | SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL, | |||
16362 | TLI.getPointerTy(DAG.getDataLayout())); | |||
16363 | SDValue BasePtr = St->getBasePtr(); | |||
16364 | ||||
16365 | // Perform one or more big stores into memory. | |||
16366 | unsigned E = (ToEltSz * NumElems) / StoreType.getSizeInBits(); | |||
16367 | for (unsigned I = 0; I < E; I++) { | |||
16368 | SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreType, | |||
16369 | ShuffWide, DAG.getIntPtrConstant(I, DL)); | |||
16370 | SDValue Ch = | |||
16371 | DAG.getStore(St->getChain(), DL, SubVec, BasePtr, St->getPointerInfo(), | |||
16372 | St->getAlignment(), St->getMemOperand()->getFlags()); | |||
16373 | BasePtr = | |||
16374 | DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, Increment); | |||
16375 | Chains.push_back(Ch); | |||
16376 | } | |||
16377 | return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); | |||
16378 | } | |||
16379 | ||||
16380 | // Try taking a single vector store from an fpround (which would otherwise turn | |||
16381 | // into an expensive buildvector) and splitting it into a series of narrowing | |||
16382 | // stores. | |||
16383 | static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St, | |||
16384 | SelectionDAG &DAG) { | |||
16385 | if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) | |||
16386 | return SDValue(); | |||
16387 | SDValue Trunc = St->getValue(); | |||
16388 | if (Trunc->getOpcode() != ISD::FP_ROUND) | |||
16389 | return SDValue(); | |||
16390 | EVT FromVT = Trunc->getOperand(0).getValueType(); | |||
16391 | EVT ToVT = Trunc.getValueType(); | |||
16392 | if (!ToVT.isVector()) | |||
16393 | return SDValue(); | |||
16394 | assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements())(static_cast <bool> (FromVT.getVectorNumElements() == ToVT .getVectorNumElements()) ? void (0) : __assert_fail ("FromVT.getVectorNumElements() == ToVT.getVectorNumElements()" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 16394, __extension__ __PRETTY_FUNCTION__)); | |||
16395 | EVT ToEltVT = ToVT.getVectorElementType(); | |||
16396 | EVT FromEltVT = FromVT.getVectorElementType(); | |||
16397 | ||||
16398 | if (FromEltVT != MVT::f32 || ToEltVT != MVT::f16) | |||
16399 | return SDValue(); | |||
16400 | ||||
16401 | unsigned NumElements = 4; | |||
16402 | if (FromVT.getVectorNumElements() % NumElements != 0) | |||
16403 | return SDValue(); | |||
16404 | ||||
16405 | // Test if the Trunc will be convertable to a VMOVN with a shuffle, and if so | |||
16406 | // use the VMOVN over splitting the store. We are looking for patterns of: | |||
16407 | // !rev: 0 N 1 N+1 2 N+2 ... | |||
16408 | // rev: N 0 N+1 1 N+2 2 ... | |||
16409 | // The shuffle may either be a single source (in which case N = NumElts/2) or | |||
16410 | // two inputs extended with concat to the same size (in which case N = | |||
16411 | // NumElts). | |||
16412 | auto isVMOVNShuffle = [&](ShuffleVectorSDNode *SVN, bool Rev) { | |||
16413 | ArrayRef<int> M = SVN->getMask(); | |||
16414 | unsigned NumElts = ToVT.getVectorNumElements(); | |||
16415 | if (SVN->getOperand(1).isUndef()) | |||
16416 | NumElts /= 2; | |||
16417 | ||||
16418 | unsigned Off0 = Rev ? NumElts : 0; | |||
16419 | unsigned Off1 = Rev ? 0 : NumElts; | |||
16420 | ||||
16421 | for (unsigned I = 0; I < NumElts; I += 2) { | |||
16422 | if (M[I] >= 0 && M[I] != (int)(Off0 + I / 2)) | |||
16423 | return false; | |||
16424 | if (M[I + 1] >= 0 && M[I + 1] != (int)(Off1 + I / 2)) | |||
16425 | return false; | |||
16426 | } | |||
16427 | ||||
16428 | return true; | |||
16429 | }; | |||
16430 | ||||
16431 | if (auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Trunc.getOperand(0))) | |||
16432 | if (isVMOVNShuffle(Shuffle, false) || isVMOVNShuffle(Shuffle, true)) | |||
16433 | return SDValue(); | |||
16434 | ||||
16435 | LLVMContext &C = *DAG.getContext(); | |||
16436 | SDLoc DL(St); | |||
16437 | // Details about the old store | |||
16438 | SDValue Ch = St->getChain(); | |||
16439 | SDValue BasePtr = St->getBasePtr(); | |||
16440 | Align Alignment = St->getOriginalAlign(); | |||
16441 | MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); | |||
16442 | AAMDNodes AAInfo = St->getAAInfo(); | |||
16443 | ||||
16444 | // We split the store into slices of NumElements. fp16 trunc stores are vcvt | |||
16445 | // and then stored as truncating integer stores. | |||
16446 | EVT NewFromVT = EVT::getVectorVT(C, FromEltVT, NumElements); | |||
16447 | EVT NewToVT = EVT::getVectorVT( | |||
16448 | C, EVT::getIntegerVT(C, ToEltVT.getSizeInBits()), NumElements); | |||
16449 | ||||
16450 | SmallVector<SDValue, 4> Stores; | |||
16451 | for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { | |||
16452 | unsigned NewOffset = i * NumElements * ToEltVT.getSizeInBits() / 8; | |||
16453 | SDValue NewPtr = | |||
16454 | DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::Fixed(NewOffset)); | |||
16455 | ||||
16456 | SDValue Extract = | |||
16457 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewFromVT, Trunc.getOperand(0), | |||
16458 | DAG.getConstant(i * NumElements, DL, MVT::i32)); | |||
16459 | ||||
16460 | SDValue FPTrunc = | |||
16461 | DAG.getNode(ARMISD::VCVTN, DL, MVT::v8f16, DAG.getUNDEF(MVT::v8f16), | |||
16462 | Extract, DAG.getConstant(0, DL, MVT::i32)); | |||
16463 | Extract = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, MVT::v4i32, FPTrunc); | |||
16464 | ||||
16465 | SDValue Store = DAG.getTruncStore( | |||
16466 | Ch, DL, Extract, NewPtr, St->getPointerInfo().getWithOffset(NewOffset), | |||
16467 | NewToVT, Alignment.value(), MMOFlags, AAInfo); | |||
16468 | Stores.push_back(Store); | |||
16469 | } | |||
16470 | return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores); | |||
16471 | } | |||
16472 | ||||
16473 | // Try taking a single vector store from an MVETRUNC (which would otherwise turn | |||
16474 | // into an expensive buildvector) and splitting it into a series of narrowing | |||
16475 | // stores. | |||
16476 | static SDValue PerformSplittingMVETruncToNarrowingStores(StoreSDNode *St, | |||
16477 | SelectionDAG &DAG) { | |||
16478 | if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) | |||
16479 | return SDValue(); | |||
16480 | SDValue Trunc = St->getValue(); | |||
16481 | if (Trunc->getOpcode() != ARMISD::MVETRUNC) | |||
16482 | return SDValue(); | |||
16483 | EVT FromVT = Trunc->getOperand(0).getValueType(); | |||
16484 | EVT ToVT = Trunc.getValueType(); | |||
16485 | ||||
16486 | LLVMContext &C = *DAG.getContext(); | |||
16487 | SDLoc DL(St); | |||
16488 | // Details about the old store | |||
16489 | SDValue Ch = St->getChain(); | |||
16490 | SDValue BasePtr = St->getBasePtr(); | |||
16491 | Align Alignment = St->getOriginalAlign(); | |||
16492 | MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); | |||
16493 | AAMDNodes AAInfo = St->getAAInfo(); | |||
16494 | ||||
16495 | EVT NewToVT = EVT::getVectorVT(C, ToVT.getVectorElementType(), | |||
16496 | FromVT.getVectorNumElements()); | |||
16497 | ||||
16498 | SmallVector<SDValue, 4> Stores; | |||
16499 | for (unsigned i = 0; i < Trunc.getNumOperands(); i++) { | |||
16500 | unsigned NewOffset = | |||
16501 | i * FromVT.getVectorNumElements() * ToVT.getScalarSizeInBits() / 8; | |||
16502 | SDValue NewPtr = | |||
16503 | DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::Fixed(NewOffset)); | |||
16504 | ||||
16505 | SDValue Extract = Trunc.getOperand(i); | |||
16506 | SDValue Store = DAG.getTruncStore( | |||
16507 | Ch, DL, Extract, NewPtr, St->getPointerInfo().getWithOffset(NewOffset), | |||
16508 | NewToVT, Alignment.value(), MMOFlags, AAInfo); | |||
16509 | Stores.push_back(Store); | |||
16510 | } | |||
16511 | return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores); | |||
16512 | } | |||
16513 | ||||
16514 | // Given a floating point store from an extracted vector, with an integer | |||
16515 | // VGETLANE that already exists, store the existing VGETLANEu directly. This can | |||
16516 | // help reduce fp register pressure, doesn't require the fp extract and allows | |||
16517 | // use of more integer post-inc stores not available with vstr. | |||
16518 | static SDValue PerformExtractFpToIntStores(StoreSDNode *St, SelectionDAG &DAG) { | |||
16519 | if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) | |||
16520 | return SDValue(); | |||
16521 | SDValue Extract = St->getValue(); | |||
16522 | EVT VT = Extract.getValueType(); | |||
16523 | // For now only uses f16. This may be useful for f32 too, but that will | |||
16524 | // be bitcast(extract), not the VGETLANEu we currently check here. | |||
16525 | if (VT != MVT::f16 || Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) | |||
16526 | return SDValue(); | |||
16527 | ||||
16528 | SDNode *GetLane = | |||
16529 | DAG.getNodeIfExists(ARMISD::VGETLANEu, DAG.getVTList(MVT::i32), | |||
16530 | {Extract.getOperand(0), Extract.getOperand(1)}); | |||
16531 | if (!GetLane) | |||
16532 | return SDValue(); | |||
16533 | ||||
16534 | LLVMContext &C = *DAG.getContext(); | |||
16535 | SDLoc DL(St); | |||
16536 | // Create a new integer store to replace the existing floating point version. | |||
16537 | SDValue Ch = St->getChain(); | |||
16538 | SDValue BasePtr = St->getBasePtr(); | |||
16539 | Align Alignment = St->getOriginalAlign(); | |||
16540 | MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); | |||
16541 | AAMDNodes AAInfo = St->getAAInfo(); | |||
16542 | EVT NewToVT = EVT::getIntegerVT(C, VT.getSizeInBits()); | |||
16543 | SDValue Store = DAG.getTruncStore(Ch, DL, SDValue(GetLane, 0), BasePtr, | |||
16544 | St->getPointerInfo(), NewToVT, | |||
16545 | Alignment.value(), MMOFlags, AAInfo); | |||
16546 | ||||
16547 | return Store; | |||
16548 | } | |||
16549 | ||||
16550 | /// PerformSTORECombine - Target-specific dag combine xforms for | |||
16551 | /// ISD::STORE. | |||
16552 | static SDValue PerformSTORECombine(SDNode *N, | |||
16553 | TargetLowering::DAGCombinerInfo &DCI, | |||
16554 | const ARMSubtarget *Subtarget) { | |||
16555 | StoreSDNode *St = cast<StoreSDNode>(N); | |||
16556 | if (St->isVolatile()) | |||
16557 | return SDValue(); | |||
16558 | SDValue StVal = St->getValue(); | |||
16559 | EVT VT = StVal.getValueType(); | |||
16560 | ||||
16561 | if (Subtarget->hasNEON()) | |||
16562 | if (SDValue Store = PerformTruncatingStoreCombine(St, DCI.DAG)) | |||
16563 | return Store; | |||
16564 | ||||
16565 | if (Subtarget->hasMVEIntegerOps()) { | |||
16566 | if (SDValue NewToken = PerformSplittingToNarrowingStores(St, DCI.DAG)) | |||
16567 | return NewToken; | |||
16568 | if (SDValue NewChain = PerformExtractFpToIntStores(St, DCI.DAG)) | |||
16569 | return NewChain; | |||
16570 | if (SDValue NewToken = | |||
16571 | PerformSplittingMVETruncToNarrowingStores(St, DCI.DAG)) | |||
16572 | return NewToken; | |||
16573 | } | |||
16574 | ||||
16575 | if (!ISD::isNormalStore(St)) | |||
16576 | return SDValue(); | |||
16577 | ||||
16578 | // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and | |||
16579 | // ARM stores of arguments in the same cache line. | |||
16580 | if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && | |||
16581 | StVal.getNode()->hasOneUse()) { | |||
16582 | SelectionDAG &DAG = DCI.DAG; | |||
16583 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); | |||
16584 | SDLoc DL(St); | |||
16585 | SDValue BasePtr = St->getBasePtr(); | |||
16586 | SDValue NewST1 = DAG.getStore( | |||
16587 | St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0), | |||
16588 | BasePtr, St->getPointerInfo(), St->getOriginalAlign(), | |||
16589 | St->getMemOperand()->getFlags()); | |||
16590 | ||||
16591 | SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, | |||
16592 | DAG.getConstant(4, DL, MVT::i32)); | |||
16593 | return DAG.getStore(NewST1.getValue(0), DL, | |||
16594 | StVal.getNode()->getOperand(isBigEndian ? 0 : 1), | |||
16595 | OffsetPtr, St->getPointerInfo().getWithOffset(4), | |||
16596 | St->getOriginalAlign(), | |||
16597 | St->getMemOperand()->getFlags()); | |||
16598 | } | |||
16599 | ||||
16600 | if (StVal.getValueType() == MVT::i64 && | |||
16601 | StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { | |||
16602 | ||||
16603 | // Bitcast an i64 store extracted from a vector to f64. | |||
16604 | // Otherwise, the i64 value will be legalized to a pair of i32 values. | |||
16605 | SelectionDAG &DAG = DCI.DAG; | |||
16606 | SDLoc dl(StVal); | |||
16607 | SDValue IntVec = StVal.getOperand(0); | |||
16608 | EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, | |||
16609 | IntVec.getValueType().getVectorNumElements()); | |||
16610 | SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); | |||
16611 | SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, | |||
16612 | Vec, StVal.getOperand(1)); | |||
16613 | dl = SDLoc(N); | |||
16614 | SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); | |||
16615 | // Make the DAGCombiner fold the bitcasts. | |||
16616 | DCI.AddToWorklist(Vec.getNode()); | |||
16617 | DCI.AddToWorklist(ExtElt.getNode()); | |||
16618 | DCI.AddToWorklist(V.getNode()); | |||
16619 | return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), | |||
16620 | St->getPointerInfo(), St->getAlignment(), | |||
16621 | St->getMemOperand()->getFlags(), St->getAAInfo()); | |||
16622 | } | |||
16623 | ||||
16624 | // If this is a legal vector store, try to combine it into a VST1_UPD. | |||
16625 | if (Subtarget->hasNEON() && ISD::isNormalStore(N) && VT.isVector() && | |||
16626 | DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) | |||
16627 | return CombineBaseUpdate(N, DCI); | |||
16628 | ||||
16629 | return SDValue(); | |||
16630 | } | |||
16631 | ||||
16632 | /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) | |||
16633 | /// can replace combinations of VMUL and VCVT (floating-point to integer) | |||
16634 | /// when the VMUL has a constant operand that is a power of 2. | |||
16635 | /// | |||
16636 | /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): | |||
16637 | /// vmul.f32 d16, d17, d16 | |||
16638 | /// vcvt.s32.f32 d16, d16 | |||
16639 | /// becomes: | |||
16640 | /// vcvt.s32.f32 d16, d16, #3 | |||
16641 | static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, | |||
16642 | const ARMSubtarget *Subtarget) { | |||
16643 | if (!Subtarget->hasNEON()) | |||
16644 | return SDValue(); | |||
16645 | ||||
16646 | SDValue Op = N->getOperand(0); | |||
16647 | if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || | |||
16648 | Op.getOpcode() != ISD::FMUL) | |||
16649 | return SDValue(); | |||
16650 | ||||
16651 | SDValue ConstVec = Op->getOperand(1); | |||
16652 | if (!isa<BuildVectorSDNode>(ConstVec)) | |||
16653 | return SDValue(); | |||
16654 | ||||
16655 | MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); | |||
16656 | uint32_t FloatBits = FloatTy.getSizeInBits(); | |||
16657 | MVT IntTy = N->getSimpleValueType(0).getVectorElementType(); | |||
16658 | uint32_t IntBits = IntTy.getSizeInBits(); | |||
16659 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); | |||
16660 | if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { | |||
16661 | // These instructions only exist converting from f32 to i32. We can handle | |||
16662 | // smaller integers by generating an extra truncate, but larger ones would | |||
16663 | // be lossy. We also can't handle anything other than 2 or 4 lanes, since | |||
16664 | // these intructions only support v2i32/v4i32 types. | |||
16665 | return SDValue(); | |||
16666 | } | |||
16667 | ||||
16668 | BitVector UndefElements; | |||
16669 | BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); | |||
16670 | int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); | |||
16671 | if (C == -1 || C == 0 || C > 32) | |||
16672 | return SDValue(); | |||
16673 | ||||
16674 | SDLoc dl(N); | |||
16675 | bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; | |||
16676 | unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : | |||
16677 | Intrinsic::arm_neon_vcvtfp2fxu; | |||
16678 | SDValue FixConv = DAG.getNode( | |||
16679 | ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, | |||
16680 | DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0), | |||
16681 | DAG.getConstant(C, dl, MVT::i32)); | |||
16682 | ||||
16683 | if (IntBits < FloatBits) | |||
16684 | FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv); | |||
16685 | ||||
16686 | return FixConv; | |||
16687 | } | |||
16688 | ||||
16689 | static SDValue PerformFAddVSelectCombine(SDNode *N, SelectionDAG &DAG, | |||
16690 | const ARMSubtarget *Subtarget) { | |||
16691 | if (!Subtarget->hasMVEFloatOps()) | |||
16692 | return SDValue(); | |||
16693 | ||||
16694 | // Turn (fadd x, (vselect c, y, -0.0)) into (vselect c, (fadd x, y), x) | |||
16695 | // The second form can be more easily turned into a predicated vadd, and | |||
16696 | // possibly combined into a fma to become a predicated vfma. | |||
16697 | SDValue Op0 = N->getOperand(0); | |||
16698 | SDValue Op1 = N->getOperand(1); | |||
16699 | EVT VT = N->getValueType(0); | |||
16700 | SDLoc DL(N); | |||
16701 | ||||
16702 | // The identity element for a fadd is -0.0, which these VMOV's represent. | |||
16703 | auto isNegativeZeroSplat = [&](SDValue Op) { | |||
16704 | if (Op.getOpcode() != ISD::BITCAST || | |||
16705 | Op.getOperand(0).getOpcode() != ARMISD::VMOVIMM) | |||
16706 | return false; | |||
16707 | if (VT == MVT::v4f32 && Op.getOperand(0).getConstantOperandVal(0) == 1664) | |||
16708 | return true; | |||
16709 | if (VT == MVT::v8f16 && Op.getOperand(0).getConstantOperandVal(0) == 2688) | |||
16710 | return true; | |||
16711 | return false; | |||
16712 | }; | |||
16713 | ||||
16714 | if (Op0.getOpcode() == ISD::VSELECT && Op1.getOpcode() != ISD::VSELECT) | |||
16715 | std::swap(Op0, Op1); | |||
16716 | ||||
16717 | if (Op1.getOpcode() != ISD::VSELECT || | |||
16718 | !isNegativeZeroSplat(Op1.getOperand(2))) | |||
16719 | return SDValue(); | |||
16720 | SDValue FAdd = | |||
16721 | DAG.getNode(ISD::FADD, DL, VT, Op0, Op1.getOperand(1), N->getFlags()); | |||
16722 | return DAG.getNode(ISD::VSELECT, DL, VT, Op1.getOperand(0), FAdd, Op0); | |||
16723 | } | |||
16724 | ||||
16725 | /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) | |||
16726 | /// can replace combinations of VCVT (integer to floating-point) and VDIV | |||
16727 | /// when the VDIV has a constant operand that is a power of 2. | |||
16728 | /// | |||
16729 | /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): | |||
16730 | /// vcvt.f32.s32 d16, d16 | |||
16731 | /// vdiv.f32 d16, d17, d16 | |||
16732 | /// becomes: | |||
16733 | /// vcvt.f32.s32 d16, d16, #3 | |||
16734 | static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG, | |||
16735 | const ARMSubtarget *Subtarget) { | |||
16736 | if (!Subtarget->hasNEON()) | |||
16737 | return SDValue(); | |||
16738 | ||||
16739 | SDValue Op = N->getOperand(0); | |||
16740 | unsigned OpOpcode = Op.getNode()->getOpcode(); | |||
16741 | if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() || | |||
16742 | (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) | |||
16743 | return SDValue(); | |||
16744 | ||||
16745 | SDValue ConstVec = N->getOperand(1); | |||
16746 | if (!isa<BuildVectorSDNode>(ConstVec)) | |||
16747 | return SDValue(); | |||
16748 | ||||
16749 | MVT FloatTy = N->getSimpleValueType(0).getVectorElementType(); | |||
16750 | uint32_t FloatBits = FloatTy.getSizeInBits(); | |||
16751 | MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType(); | |||
16752 | uint32_t IntBits = IntTy.getSizeInBits(); | |||
16753 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); | |||
16754 | if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { | |||
16755 | // These instructions only exist converting from i32 to f32. We can handle | |||
16756 | // smaller integers by generating an extra extend, but larger ones would | |||
16757 | // be lossy. We also can't handle anything other than 2 or 4 lanes, since | |||
16758 | // these intructions only support v2i32/v4i32 types. | |||
16759 | return SDValue(); | |||
16760 | } | |||
16761 | ||||
16762 | BitVector UndefElements; | |||
16763 | BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); | |||
16764 | int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); | |||
16765 | if (C == -1 || C == 0 || C > 32) | |||
16766 | return SDValue(); | |||
16767 | ||||
16768 | SDLoc dl(N); | |||
16769 | bool isSigned = OpOpcode == ISD::SINT_TO_FP; | |||
16770 | SDValue ConvInput = Op.getOperand(0); | |||
16771 | if (IntBits < FloatBits) | |||
16772 | ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, | |||
16773 | dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, | |||
16774 | ConvInput); | |||
16775 | ||||
16776 | unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : | |||
16777 | Intrinsic::arm_neon_vcvtfxu2fp; | |||
16778 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, | |||
16779 | Op.getValueType(), | |||
16780 | DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), | |||
16781 | ConvInput, DAG.getConstant(C, dl, MVT::i32)); | |||
16782 | } | |||
16783 | ||||
16784 | static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG, | |||
16785 | const ARMSubtarget *ST) { | |||
16786 | if (!ST->hasMVEIntegerOps()) | |||
16787 | return SDValue(); | |||
16788 | ||||
16789 | assert(N->getOpcode() == ISD::VECREDUCE_ADD)(static_cast <bool> (N->getOpcode() == ISD::VECREDUCE_ADD ) ? void (0) : __assert_fail ("N->getOpcode() == ISD::VECREDUCE_ADD" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 16789, __extension__ __PRETTY_FUNCTION__)); | |||
16790 | EVT ResVT = N->getValueType(0); | |||
16791 | SDValue N0 = N->getOperand(0); | |||
16792 | SDLoc dl(N); | |||
16793 | ||||
16794 | // Try to turn vecreduce_add(add(x, y)) into vecreduce(x) + vecreduce(y) | |||
16795 | if (ResVT == MVT::i32 && N0.getOpcode() == ISD::ADD && | |||
16796 | (N0.getValueType() == MVT::v4i32 || N0.getValueType() == MVT::v8i16 || | |||
16797 | N0.getValueType() == MVT::v16i8)) { | |||
16798 | SDValue Red0 = DAG.getNode(ISD::VECREDUCE_ADD, dl, ResVT, N0.getOperand(0)); | |||
16799 | SDValue Red1 = DAG.getNode(ISD::VECREDUCE_ADD, dl, ResVT, N0.getOperand(1)); | |||
16800 | return DAG.getNode(ISD::ADD, dl, ResVT, Red0, Red1); | |||
16801 | } | |||
16802 | ||||
16803 | // We are looking for something that will have illegal types if left alone, | |||
16804 | // but that we can convert to a single instruction under MVE. For example | |||
16805 | // vecreduce_add(sext(A, v8i32)) => VADDV.s16 A | |||
16806 | // or | |||
16807 | // vecreduce_add(mul(zext(A, v16i32), zext(B, v16i32))) => VMLADAV.u8 A, B | |||
16808 | ||||
16809 | // The legal cases are: | |||
16810 | // VADDV u/s 8/16/32 | |||
16811 | // VMLAV u/s 8/16/32 | |||
16812 | // VADDLV u/s 32 | |||
16813 | // VMLALV u/s 16/32 | |||
16814 | ||||
16815 | // If the input vector is smaller than legal (v4i8/v4i16 for example) we can | |||
16816 | // extend it and use v4i32 instead. | |||
16817 | auto ExtTypeMatches = [](SDValue A, ArrayRef<MVT> ExtTypes) { | |||
16818 | EVT AVT = A.getValueType(); | |||
16819 | return any_of(ExtTypes, [&](MVT Ty) { | |||
16820 | return AVT.getVectorNumElements() == Ty.getVectorNumElements() && | |||
16821 | AVT.bitsLE(Ty); | |||
16822 | }); | |||
16823 | }; | |||
16824 | auto ExtendIfNeeded = [&](SDValue A, unsigned ExtendCode) { | |||
16825 | EVT AVT = A.getValueType(); | |||
16826 | if (!AVT.is128BitVector()) | |||
16827 | A = DAG.getNode(ExtendCode, dl, | |||
16828 | AVT.changeVectorElementType(MVT::getIntegerVT( | |||
16829 | 128 / AVT.getVectorMinNumElements())), | |||
16830 | A); | |||
16831 | return A; | |||
16832 | }; | |||
16833 | auto IsVADDV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes) { | |||
16834 | if (ResVT != RetTy || N0->getOpcode() != ExtendCode) | |||
16835 | return SDValue(); | |||
16836 | SDValue A = N0->getOperand(0); | |||
16837 | if (ExtTypeMatches(A, ExtTypes)) | |||
16838 | return ExtendIfNeeded(A, ExtendCode); | |||
16839 | return SDValue(); | |||
16840 | }; | |||
16841 | auto IsPredVADDV = [&](MVT RetTy, unsigned ExtendCode, | |||
16842 | ArrayRef<MVT> ExtTypes, SDValue &Mask) { | |||
16843 | if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT || | |||
16844 | !ISD::isBuildVectorAllZeros(N0->getOperand(2).getNode())) | |||
16845 | return SDValue(); | |||
16846 | Mask = N0->getOperand(0); | |||
16847 | SDValue Ext = N0->getOperand(1); | |||
16848 | if (Ext->getOpcode() != ExtendCode) | |||
16849 | return SDValue(); | |||
16850 | SDValue A = Ext->getOperand(0); | |||
16851 | if (ExtTypeMatches(A, ExtTypes)) | |||
16852 | return ExtendIfNeeded(A, ExtendCode); | |||
16853 | return SDValue(); | |||
16854 | }; | |||
16855 | auto IsVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes, | |||
16856 | SDValue &A, SDValue &B) { | |||
16857 | // For a vmla we are trying to match a larger pattern: | |||
16858 | // ExtA = sext/zext A | |||
16859 | // ExtB = sext/zext B | |||
16860 | // Mul = mul ExtA, ExtB | |||
16861 | // vecreduce.add Mul | |||
16862 | // There might also be en extra extend between the mul and the addreduce, so | |||
16863 | // long as the bitwidth is high enough to make them equivalent (for example | |||
16864 | // original v8i16 might be mul at v8i32 and the reduce happens at v8i64). | |||
16865 | if (ResVT != RetTy) | |||
16866 | return false; | |||
16867 | SDValue Mul = N0; | |||
16868 | if (Mul->getOpcode() == ExtendCode && | |||
16869 | Mul->getOperand(0).getScalarValueSizeInBits() * 2 >= | |||
16870 | ResVT.getScalarSizeInBits()) | |||
16871 | Mul = Mul->getOperand(0); | |||
16872 | if (Mul->getOpcode() != ISD::MUL) | |||
16873 | return false; | |||
16874 | SDValue ExtA = Mul->getOperand(0); | |||
16875 | SDValue ExtB = Mul->getOperand(1); | |||
16876 | if (ExtA->getOpcode() != ExtendCode || ExtB->getOpcode() != ExtendCode) | |||
16877 | return false; | |||
16878 | A = ExtA->getOperand(0); | |||
16879 | B = ExtB->getOperand(0); | |||
16880 | if (ExtTypeMatches(A, ExtTypes) && ExtTypeMatches(B, ExtTypes)) { | |||
16881 | A = ExtendIfNeeded(A, ExtendCode); | |||
16882 | B = ExtendIfNeeded(B, ExtendCode); | |||
16883 | return true; | |||
16884 | } | |||
16885 | return false; | |||
16886 | }; | |||
16887 | auto IsPredVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes, | |||
16888 | SDValue &A, SDValue &B, SDValue &Mask) { | |||
16889 | // Same as the pattern above with a select for the zero predicated lanes | |||
16890 | // ExtA = sext/zext A | |||
16891 | // ExtB = sext/zext B | |||
16892 | // Mul = mul ExtA, ExtB | |||
16893 | // N0 = select Mask, Mul, 0 | |||
16894 | // vecreduce.add N0 | |||
16895 | if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT || | |||
16896 | !ISD::isBuildVectorAllZeros(N0->getOperand(2).getNode())) | |||
16897 | return false; | |||
16898 | Mask = N0->getOperand(0); | |||
16899 | SDValue Mul = N0->getOperand(1); | |||
16900 | if (Mul->getOpcode() == ExtendCode && | |||
16901 | Mul->getOperand(0).getScalarValueSizeInBits() * 2 >= | |||
16902 | ResVT.getScalarSizeInBits()) | |||
16903 | Mul = Mul->getOperand(0); | |||
16904 | if (Mul->getOpcode() != ISD::MUL) | |||
16905 | return false; | |||
16906 | SDValue ExtA = Mul->getOperand(0); | |||
16907 | SDValue ExtB = Mul->getOperand(1); | |||
16908 | if (ExtA->getOpcode() != ExtendCode || ExtB->getOpcode() != ExtendCode) | |||
16909 | return false; | |||
16910 | A = ExtA->getOperand(0); | |||
16911 | B = ExtB->getOperand(0); | |||
16912 | if (ExtTypeMatches(A, ExtTypes) && ExtTypeMatches(B, ExtTypes)) { | |||
16913 | A = ExtendIfNeeded(A, ExtendCode); | |||
16914 | B = ExtendIfNeeded(B, ExtendCode); | |||
16915 | return true; | |||
16916 | } | |||
16917 | return false; | |||
16918 | }; | |||
16919 | auto Create64bitNode = [&](unsigned Opcode, ArrayRef<SDValue> Ops) { | |||
16920 | // Split illegal MVT::v16i8->i64 vector reductions into two legal v8i16->i64 | |||
16921 | // reductions. The operands are extended with MVEEXT, but as they are | |||
16922 | // reductions the lane orders do not matter. MVEEXT may be combined with | |||
16923 | // loads to produce two extending loads, or else they will be expanded to | |||
16924 | // VREV/VMOVL. | |||
16925 | EVT VT = Ops[0].getValueType(); | |||
16926 | if (VT == MVT::v16i8) { | |||
16927 | assert((Opcode == ARMISD::VMLALVs || Opcode == ARMISD::VMLALVu) &&(static_cast <bool> ((Opcode == ARMISD::VMLALVs || Opcode == ARMISD::VMLALVu) && "Unexpected illegal long reduction opcode" ) ? void (0) : __assert_fail ("(Opcode == ARMISD::VMLALVs || Opcode == ARMISD::VMLALVu) && \"Unexpected illegal long reduction opcode\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 16928, __extension__ __PRETTY_FUNCTION__)) | |||
16928 | "Unexpected illegal long reduction opcode")(static_cast <bool> ((Opcode == ARMISD::VMLALVs || Opcode == ARMISD::VMLALVu) && "Unexpected illegal long reduction opcode" ) ? void (0) : __assert_fail ("(Opcode == ARMISD::VMLALVs || Opcode == ARMISD::VMLALVu) && \"Unexpected illegal long reduction opcode\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 16928, __extension__ __PRETTY_FUNCTION__)); | |||
16929 | bool IsUnsigned = Opcode == ARMISD::VMLALVu; | |||
16930 | ||||
16931 | SDValue Ext0 = | |||
16932 | DAG.getNode(IsUnsigned ? ARMISD::MVEZEXT : ARMISD::MVESEXT, dl, | |||
16933 | DAG.getVTList(MVT::v8i16, MVT::v8i16), Ops[0]); | |||
16934 | SDValue Ext1 = | |||
16935 | DAG.getNode(IsUnsigned ? ARMISD::MVEZEXT : ARMISD::MVESEXT, dl, | |||
16936 | DAG.getVTList(MVT::v8i16, MVT::v8i16), Ops[1]); | |||
16937 | ||||
16938 | SDValue MLA0 = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), | |||
16939 | Ext0, Ext1); | |||
16940 | SDValue MLA1 = | |||
16941 | DAG.getNode(IsUnsigned ? ARMISD::VMLALVAu : ARMISD::VMLALVAs, dl, | |||
16942 | DAG.getVTList(MVT::i32, MVT::i32), MLA0, MLA0.getValue(1), | |||
16943 | Ext0.getValue(1), Ext1.getValue(1)); | |||
16944 | return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, MLA1, MLA1.getValue(1)); | |||
16945 | } | |||
16946 | SDValue Node = DAG.getNode(Opcode, dl, {MVT::i32, MVT::i32}, Ops); | |||
16947 | return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Node, | |||
16948 | SDValue(Node.getNode(), 1)); | |||
16949 | }; | |||
16950 | ||||
16951 | SDValue A, B; | |||
16952 | SDValue Mask; | |||
16953 | if (IsVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) | |||
16954 | return DAG.getNode(ARMISD::VMLAVs, dl, ResVT, A, B); | |||
16955 | if (IsVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) | |||
16956 | return DAG.getNode(ARMISD::VMLAVu, dl, ResVT, A, B); | |||
16957 | if (IsVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v16i8, MVT::v8i16, MVT::v4i32}, | |||
16958 | A, B)) | |||
16959 | return Create64bitNode(ARMISD::VMLALVs, {A, B}); | |||
16960 | if (IsVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v16i8, MVT::v8i16, MVT::v4i32}, | |||
16961 | A, B)) | |||
16962 | return Create64bitNode(ARMISD::VMLALVu, {A, B}); | |||
16963 | if (IsVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B)) | |||
16964 | return DAG.getNode(ISD::TRUNCATE, dl, ResVT, | |||
16965 | DAG.getNode(ARMISD::VMLAVs, dl, MVT::i32, A, B)); | |||
16966 | if (IsVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B)) | |||
16967 | return DAG.getNode(ISD::TRUNCATE, dl, ResVT, | |||
16968 | DAG.getNode(ARMISD::VMLAVu, dl, MVT::i32, A, B)); | |||
16969 | ||||
16970 | if (IsPredVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, | |||
16971 | Mask)) | |||
16972 | return DAG.getNode(ARMISD::VMLAVps, dl, ResVT, A, B, Mask); | |||
16973 | if (IsPredVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, | |||
16974 | Mask)) | |||
16975 | return DAG.getNode(ARMISD::VMLAVpu, dl, ResVT, A, B, Mask); | |||
16976 | if (IsPredVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B, | |||
16977 | Mask)) | |||
16978 | return Create64bitNode(ARMISD::VMLALVps, {A, B, Mask}); | |||
16979 | if (IsPredVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B, | |||
16980 | Mask)) | |||
16981 | return Create64bitNode(ARMISD::VMLALVpu, {A, B, Mask}); | |||
16982 | if (IsPredVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B, Mask)) | |||
16983 | return DAG.getNode(ISD::TRUNCATE, dl, ResVT, | |||
16984 | DAG.getNode(ARMISD::VMLAVps, dl, MVT::i32, A, B, Mask)); | |||
16985 | if (IsPredVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B, Mask)) | |||
16986 | return DAG.getNode(ISD::TRUNCATE, dl, ResVT, | |||
16987 | DAG.getNode(ARMISD::VMLAVpu, dl, MVT::i32, A, B, Mask)); | |||
16988 | ||||
16989 | if (SDValue A = IsVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8})) | |||
16990 | return DAG.getNode(ARMISD::VADDVs, dl, ResVT, A); | |||
16991 | if (SDValue A = IsVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8})) | |||
16992 | return DAG.getNode(ARMISD::VADDVu, dl, ResVT, A); | |||
16993 | if (SDValue A = IsVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32})) | |||
16994 | return Create64bitNode(ARMISD::VADDLVs, {A}); | |||
16995 | if (SDValue A = IsVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32})) | |||
16996 | return Create64bitNode(ARMISD::VADDLVu, {A}); | |||
16997 | if (SDValue A = IsVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8})) | |||
16998 | return DAG.getNode(ISD::TRUNCATE, dl, ResVT, | |||
16999 | DAG.getNode(ARMISD::VADDVs, dl, MVT::i32, A)); | |||
17000 | if (SDValue A = IsVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8})) | |||
17001 | return DAG.getNode(ISD::TRUNCATE, dl, ResVT, | |||
17002 | DAG.getNode(ARMISD::VADDVu, dl, MVT::i32, A)); | |||
17003 | ||||
17004 | if (SDValue A = IsPredVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask)) | |||
17005 | return DAG.getNode(ARMISD::VADDVps, dl, ResVT, A, Mask); | |||
17006 | if (SDValue A = IsPredVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask)) | |||
17007 | return DAG.getNode(ARMISD::VADDVpu, dl, ResVT, A, Mask); | |||
17008 | if (SDValue A = IsPredVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32}, Mask)) | |||
17009 | return Create64bitNode(ARMISD::VADDLVps, {A, Mask}); | |||
17010 | if (SDValue A = IsPredVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32}, Mask)) | |||
17011 | return Create64bitNode(ARMISD::VADDLVpu, {A, Mask}); | |||
17012 | if (SDValue A = IsPredVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, Mask)) | |||
17013 | return DAG.getNode(ISD::TRUNCATE, dl, ResVT, | |||
17014 | DAG.getNode(ARMISD::VADDVps, dl, MVT::i32, A, Mask)); | |||
17015 | if (SDValue A = IsPredVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, Mask)) | |||
17016 | return DAG.getNode(ISD::TRUNCATE, dl, ResVT, | |||
17017 | DAG.getNode(ARMISD::VADDVpu, dl, MVT::i32, A, Mask)); | |||
17018 | ||||
17019 | // Some complications. We can get a case where the two inputs of the mul are | |||
17020 | // the same, then the output sext will have been helpfully converted to a | |||
17021 | // zext. Turn it back. | |||
17022 | SDValue Op = N0; | |||
17023 | if (Op->getOpcode() == ISD::VSELECT) | |||
17024 | Op = Op->getOperand(1); | |||
17025 | if (Op->getOpcode() == ISD::ZERO_EXTEND && | |||
17026 | Op->getOperand(0)->getOpcode() == ISD::MUL) { | |||
17027 | SDValue Mul = Op->getOperand(0); | |||
17028 | if (Mul->getOperand(0) == Mul->getOperand(1) && | |||
17029 | Mul->getOperand(0)->getOpcode() == ISD::SIGN_EXTEND) { | |||
17030 | SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, N0->getValueType(0), Mul); | |||
17031 | if (Op != N0) | |||
17032 | Ext = DAG.getNode(ISD::VSELECT, dl, N0->getValueType(0), | |||
17033 | N0->getOperand(0), Ext, N0->getOperand(2)); | |||
17034 | return DAG.getNode(ISD::VECREDUCE_ADD, dl, ResVT, Ext); | |||
17035 | } | |||
17036 | } | |||
17037 | ||||
17038 | return SDValue(); | |||
17039 | } | |||
17040 | ||||
17041 | static SDValue PerformVMOVNCombine(SDNode *N, | |||
17042 | TargetLowering::DAGCombinerInfo &DCI) { | |||
17043 | SDValue Op0 = N->getOperand(0); | |||
17044 | SDValue Op1 = N->getOperand(1); | |||
17045 | unsigned IsTop = N->getConstantOperandVal(2); | |||
17046 | ||||
17047 | // VMOVNT a undef -> a | |||
17048 | // VMOVNB a undef -> a | |||
17049 | // VMOVNB undef a -> a | |||
17050 | if (Op1->isUndef()) | |||
17051 | return Op0; | |||
17052 | if (Op0->isUndef() && !IsTop) | |||
17053 | return Op1; | |||
17054 | ||||
17055 | // VMOVNt(c, VQMOVNb(a, b)) => VQMOVNt(c, b) | |||
17056 | // VMOVNb(c, VQMOVNb(a, b)) => VQMOVNb(c, b) | |||
17057 | if ((Op1->getOpcode() == ARMISD::VQMOVNs || | |||
17058 | Op1->getOpcode() == ARMISD::VQMOVNu) && | |||
17059 | Op1->getConstantOperandVal(2) == 0) | |||
17060 | return DCI.DAG.getNode(Op1->getOpcode(), SDLoc(Op1), N->getValueType(0), | |||
17061 | Op0, Op1->getOperand(1), N->getOperand(2)); | |||
17062 | ||||
17063 | // Only the bottom lanes from Qm (Op1) and either the top or bottom lanes from | |||
17064 | // Qd (Op0) are demanded from a VMOVN, depending on whether we are inserting | |||
17065 | // into the top or bottom lanes. | |||
17066 | unsigned NumElts = N->getValueType(0).getVectorNumElements(); | |||
17067 | APInt Op1DemandedElts = APInt::getSplat(NumElts, APInt::getLowBitsSet(2, 1)); | |||
17068 | APInt Op0DemandedElts = | |||
17069 | IsTop ? Op1DemandedElts | |||
17070 | : APInt::getSplat(NumElts, APInt::getHighBitsSet(2, 1)); | |||
17071 | ||||
17072 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); | |||
17073 | if (TLI.SimplifyDemandedVectorElts(Op0, Op0DemandedElts, DCI)) | |||
17074 | return SDValue(N, 0); | |||
17075 | if (TLI.SimplifyDemandedVectorElts(Op1, Op1DemandedElts, DCI)) | |||
17076 | return SDValue(N, 0); | |||
17077 | ||||
17078 | return SDValue(); | |||
17079 | } | |||
17080 | ||||
17081 | static SDValue PerformVQMOVNCombine(SDNode *N, | |||
17082 | TargetLowering::DAGCombinerInfo &DCI) { | |||
17083 | SDValue Op0 = N->getOperand(0); | |||
17084 | unsigned IsTop = N->getConstantOperandVal(2); | |||
17085 | ||||
17086 | unsigned NumElts = N->getValueType(0).getVectorNumElements(); | |||
17087 | APInt Op0DemandedElts = | |||
17088 | APInt::getSplat(NumElts, IsTop ? APInt::getLowBitsSet(2, 1) | |||
17089 | : APInt::getHighBitsSet(2, 1)); | |||
17090 | ||||
17091 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); | |||
17092 | if (TLI.SimplifyDemandedVectorElts(Op0, Op0DemandedElts, DCI)) | |||
17093 | return SDValue(N, 0); | |||
17094 | return SDValue(); | |||
17095 | } | |||
17096 | ||||
17097 | static SDValue PerformLongShiftCombine(SDNode *N, SelectionDAG &DAG) { | |||
17098 | SDLoc DL(N); | |||
17099 | SDValue Op0 = N->getOperand(0); | |||
17100 | SDValue Op1 = N->getOperand(1); | |||
17101 | ||||
17102 | // Turn X << -C -> X >> C and viceversa. The negative shifts can come up from | |||
17103 | // uses of the intrinsics. | |||
17104 | if (auto C = dyn_cast<ConstantSDNode>(N->getOperand(2))) { | |||
17105 | int ShiftAmt = C->getSExtValue(); | |||
17106 | if (ShiftAmt == 0) { | |||
17107 | SDValue Merge = DAG.getMergeValues({Op0, Op1}, DL); | |||
17108 | DAG.ReplaceAllUsesWith(N, Merge.getNode()); | |||
17109 | return SDValue(); | |||
17110 | } | |||
17111 | ||||
17112 | if (ShiftAmt >= -32 && ShiftAmt < 0) { | |||
17113 | unsigned NewOpcode = | |||
17114 | N->getOpcode() == ARMISD::LSLL ? ARMISD::LSRL : ARMISD::LSLL; | |||
17115 | SDValue NewShift = DAG.getNode(NewOpcode, DL, N->getVTList(), Op0, Op1, | |||
17116 | DAG.getConstant(-ShiftAmt, DL, MVT::i32)); | |||
17117 | DAG.ReplaceAllUsesWith(N, NewShift.getNode()); | |||
17118 | return NewShift; | |||
17119 | } | |||
17120 | } | |||
17121 | ||||
17122 | return SDValue(); | |||
17123 | } | |||
17124 | ||||
17125 | /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. | |||
17126 | SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N, | |||
17127 | DAGCombinerInfo &DCI) const { | |||
17128 | SelectionDAG &DAG = DCI.DAG; | |||
17129 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); | |||
17130 | switch (IntNo) { | |||
17131 | default: | |||
17132 | // Don't do anything for most intrinsics. | |||
17133 | break; | |||
17134 | ||||
17135 | // Vector shifts: check for immediate versions and lower them. | |||
17136 | // Note: This is done during DAG combining instead of DAG legalizing because | |||
17137 | // the build_vectors for 64-bit vector element shift counts are generally | |||
17138 | // not legal, and it is hard to see their values after they get legalized to | |||
17139 | // loads from a constant pool. | |||
17140 | case Intrinsic::arm_neon_vshifts: | |||
17141 | case Intrinsic::arm_neon_vshiftu: | |||
17142 | case Intrinsic::arm_neon_vrshifts: | |||
17143 | case Intrinsic::arm_neon_vrshiftu: | |||
17144 | case Intrinsic::arm_neon_vrshiftn: | |||
17145 | case Intrinsic::arm_neon_vqshifts: | |||
17146 | case Intrinsic::arm_neon_vqshiftu: | |||
17147 | case Intrinsic::arm_neon_vqshiftsu: | |||
17148 | case Intrinsic::arm_neon_vqshiftns: | |||
17149 | case Intrinsic::arm_neon_vqshiftnu: | |||
17150 | case Intrinsic::arm_neon_vqshiftnsu: | |||
17151 | case Intrinsic::arm_neon_vqrshiftns: | |||
17152 | case Intrinsic::arm_neon_vqrshiftnu: | |||
17153 | case Intrinsic::arm_neon_vqrshiftnsu: { | |||
17154 | EVT VT = N->getOperand(1).getValueType(); | |||
17155 | int64_t Cnt; | |||
17156 | unsigned VShiftOpc = 0; | |||
17157 | ||||
17158 | switch (IntNo) { | |||
17159 | case Intrinsic::arm_neon_vshifts: | |||
17160 | case Intrinsic::arm_neon_vshiftu: | |||
17161 | if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { | |||
17162 | VShiftOpc = ARMISD::VSHLIMM; | |||
17163 | break; | |||
17164 | } | |||
17165 | if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { | |||
17166 | VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? ARMISD::VSHRsIMM | |||
17167 | : ARMISD::VSHRuIMM); | |||
17168 | break; | |||
17169 | } | |||
17170 | return SDValue(); | |||
17171 | ||||
17172 | case Intrinsic::arm_neon_vrshifts: | |||
17173 | case Intrinsic::arm_neon_vrshiftu: | |||
17174 | if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) | |||
17175 | break; | |||
17176 | return SDValue(); | |||
17177 | ||||
17178 | case Intrinsic::arm_neon_vqshifts: | |||
17179 | case Intrinsic::arm_neon_vqshiftu: | |||
17180 | if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) | |||
17181 | break; | |||
17182 | return SDValue(); | |||
17183 | ||||
17184 | case Intrinsic::arm_neon_vqshiftsu: | |||
17185 | if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) | |||
17186 | break; | |||
17187 | llvm_unreachable("invalid shift count for vqshlu intrinsic")::llvm::llvm_unreachable_internal("invalid shift count for vqshlu intrinsic" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 17187); | |||
17188 | ||||
17189 | case Intrinsic::arm_neon_vrshiftn: | |||
17190 | case Intrinsic::arm_neon_vqshiftns: | |||
17191 | case Intrinsic::arm_neon_vqshiftnu: | |||
17192 | case Intrinsic::arm_neon_vqshiftnsu: | |||
17193 | case Intrinsic::arm_neon_vqrshiftns: | |||
17194 | case Intrinsic::arm_neon_vqrshiftnu: | |||
17195 | case Intrinsic::arm_neon_vqrshiftnsu: | |||
17196 | // Narrowing shifts require an immediate right shift. | |||
17197 | if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) | |||
17198 | break; | |||
17199 | llvm_unreachable("invalid shift count for narrowing vector shift "::llvm::llvm_unreachable_internal("invalid shift count for narrowing vector shift " "intrinsic", "llvm/lib/Target/ARM/ARMISelLowering.cpp", 17200 ) | |||
17200 | "intrinsic")::llvm::llvm_unreachable_internal("invalid shift count for narrowing vector shift " "intrinsic", "llvm/lib/Target/ARM/ARMISelLowering.cpp", 17200 ); | |||
17201 | ||||
17202 | default: | |||
17203 | llvm_unreachable("unhandled vector shift")::llvm::llvm_unreachable_internal("unhandled vector shift", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 17203); | |||
17204 | } | |||
17205 | ||||
17206 | switch (IntNo) { | |||
17207 | case Intrinsic::arm_neon_vshifts: | |||
17208 | case Intrinsic::arm_neon_vshiftu: | |||
17209 | // Opcode already set above. | |||
17210 | break; | |||
17211 | case Intrinsic::arm_neon_vrshifts: | |||
17212 | VShiftOpc = ARMISD::VRSHRsIMM; | |||
17213 | break; | |||
17214 | case Intrinsic::arm_neon_vrshiftu: | |||
17215 | VShiftOpc = ARMISD::VRSHRuIMM; | |||
17216 | break; | |||
17217 | case Intrinsic::arm_neon_vrshiftn: | |||
17218 | VShiftOpc = ARMISD::VRSHRNIMM; | |||
17219 | break; | |||
17220 | case Intrinsic::arm_neon_vqshifts: | |||
17221 | VShiftOpc = ARMISD::VQSHLsIMM; | |||
17222 | break; | |||
17223 | case Intrinsic::arm_neon_vqshiftu: | |||
17224 | VShiftOpc = ARMISD::VQSHLuIMM; | |||
17225 | break; | |||
17226 | case Intrinsic::arm_neon_vqshiftsu: | |||
17227 | VShiftOpc = ARMISD::VQSHLsuIMM; | |||
17228 | break; | |||
17229 | case Intrinsic::arm_neon_vqshiftns: | |||
17230 | VShiftOpc = ARMISD::VQSHRNsIMM; | |||
17231 | break; | |||
17232 | case Intrinsic::arm_neon_vqshiftnu: | |||
17233 | VShiftOpc = ARMISD::VQSHRNuIMM; | |||
17234 | break; | |||
17235 | case Intrinsic::arm_neon_vqshiftnsu: | |||
17236 | VShiftOpc = ARMISD::VQSHRNsuIMM; | |||
17237 | break; | |||
17238 | case Intrinsic::arm_neon_vqrshiftns: | |||
17239 | VShiftOpc = ARMISD::VQRSHRNsIMM; | |||
17240 | break; | |||
17241 | case Intrinsic::arm_neon_vqrshiftnu: | |||
17242 | VShiftOpc = ARMISD::VQRSHRNuIMM; | |||
17243 | break; | |||
17244 | case Intrinsic::arm_neon_vqrshiftnsu: | |||
17245 | VShiftOpc = ARMISD::VQRSHRNsuIMM; | |||
17246 | break; | |||
17247 | } | |||
17248 | ||||
17249 | SDLoc dl(N); | |||
17250 | return DAG.getNode(VShiftOpc, dl, N->getValueType(0), | |||
17251 | N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32)); | |||
17252 | } | |||
17253 | ||||
17254 | case Intrinsic::arm_neon_vshiftins: { | |||
17255 | EVT VT = N->getOperand(1).getValueType(); | |||
17256 | int64_t Cnt; | |||
17257 | unsigned VShiftOpc = 0; | |||
17258 | ||||
17259 | if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) | |||
17260 | VShiftOpc = ARMISD::VSLIIMM; | |||
17261 | else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) | |||
17262 | VShiftOpc = ARMISD::VSRIIMM; | |||
17263 | else { | |||
17264 | llvm_unreachable("invalid shift count for vsli/vsri intrinsic")::llvm::llvm_unreachable_internal("invalid shift count for vsli/vsri intrinsic" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 17264); | |||
17265 | } | |||
17266 | ||||
17267 | SDLoc dl(N); | |||
17268 | return DAG.getNode(VShiftOpc, dl, N->getValueType(0), | |||
17269 | N->getOperand(1), N->getOperand(2), | |||
17270 | DAG.getConstant(Cnt, dl, MVT::i32)); | |||
17271 | } | |||
17272 | ||||
17273 | case Intrinsic::arm_neon_vqrshifts: | |||
17274 | case Intrinsic::arm_neon_vqrshiftu: | |||
17275 | // No immediate versions of these to check for. | |||
17276 | break; | |||
17277 | ||||
17278 | case Intrinsic::arm_mve_vqdmlah: | |||
17279 | case Intrinsic::arm_mve_vqdmlash: | |||
17280 | case Intrinsic::arm_mve_vqrdmlah: | |||
17281 | case Intrinsic::arm_mve_vqrdmlash: | |||
17282 | case Intrinsic::arm_mve_vmla_n_predicated: | |||
17283 | case Intrinsic::arm_mve_vmlas_n_predicated: | |||
17284 | case Intrinsic::arm_mve_vqdmlah_predicated: | |||
17285 | case Intrinsic::arm_mve_vqdmlash_predicated: | |||
17286 | case Intrinsic::arm_mve_vqrdmlah_predicated: | |||
17287 | case Intrinsic::arm_mve_vqrdmlash_predicated: { | |||
17288 | // These intrinsics all take an i32 scalar operand which is narrowed to the | |||
17289 | // size of a single lane of the vector type they return. So we don't need | |||
17290 | // any bits of that operand above that point, which allows us to eliminate | |||
17291 | // uxth/sxth. | |||
17292 | unsigned BitWidth = N->getValueType(0).getScalarSizeInBits(); | |||
17293 | APInt DemandedMask = APInt::getLowBitsSet(32, BitWidth); | |||
17294 | if (SimplifyDemandedBits(N->getOperand(3), DemandedMask, DCI)) | |||
17295 | return SDValue(); | |||
17296 | break; | |||
17297 | } | |||
17298 | ||||
17299 | case Intrinsic::arm_mve_minv: | |||
17300 | case Intrinsic::arm_mve_maxv: | |||
17301 | case Intrinsic::arm_mve_minav: | |||
17302 | case Intrinsic::arm_mve_maxav: | |||
17303 | case Intrinsic::arm_mve_minv_predicated: | |||
17304 | case Intrinsic::arm_mve_maxv_predicated: | |||
17305 | case Intrinsic::arm_mve_minav_predicated: | |||
17306 | case Intrinsic::arm_mve_maxav_predicated: { | |||
17307 | // These intrinsics all take an i32 scalar operand which is narrowed to the | |||
17308 | // size of a single lane of the vector type they take as the other input. | |||
17309 | unsigned BitWidth = N->getOperand(2)->getValueType(0).getScalarSizeInBits(); | |||
17310 | APInt DemandedMask = APInt::getLowBitsSet(32, BitWidth); | |||
17311 | if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) | |||
17312 | return SDValue(); | |||
17313 | break; | |||
17314 | } | |||
17315 | ||||
17316 | case Intrinsic::arm_mve_addv: { | |||
17317 | // Turn this intrinsic straight into the appropriate ARMISD::VADDV node, | |||
17318 | // which allow PerformADDVecReduce to turn it into VADDLV when possible. | |||
17319 | bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); | |||
17320 | unsigned Opc = Unsigned ? ARMISD::VADDVu : ARMISD::VADDVs; | |||
17321 | return DAG.getNode(Opc, SDLoc(N), N->getVTList(), N->getOperand(1)); | |||
17322 | } | |||
17323 | ||||
17324 | case Intrinsic::arm_mve_addlv: | |||
17325 | case Intrinsic::arm_mve_addlv_predicated: { | |||
17326 | // Same for these, but ARMISD::VADDLV has to be followed by a BUILD_PAIR | |||
17327 | // which recombines the two outputs into an i64 | |||
17328 | bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); | |||
17329 | unsigned Opc = IntNo == Intrinsic::arm_mve_addlv ? | |||
17330 | (Unsigned ? ARMISD::VADDLVu : ARMISD::VADDLVs) : | |||
17331 | (Unsigned ? ARMISD::VADDLVpu : ARMISD::VADDLVps); | |||
17332 | ||||
17333 | SmallVector<SDValue, 4> Ops; | |||
17334 | for (unsigned i = 1, e = N->getNumOperands(); i < e; i++) | |||
17335 | if (i != 2) // skip the unsigned flag | |||
17336 | Ops.push_back(N->getOperand(i)); | |||
17337 | ||||
17338 | SDLoc dl(N); | |||
17339 | SDValue val = DAG.getNode(Opc, dl, {MVT::i32, MVT::i32}, Ops); | |||
17340 | return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, val.getValue(0), | |||
17341 | val.getValue(1)); | |||
17342 | } | |||
17343 | } | |||
17344 | ||||
17345 | return SDValue(); | |||
17346 | } | |||
17347 | ||||
17348 | /// PerformShiftCombine - Checks for immediate versions of vector shifts and | |||
17349 | /// lowers them. As with the vector shift intrinsics, this is done during DAG | |||
17350 | /// combining instead of DAG legalizing because the build_vectors for 64-bit | |||
17351 | /// vector element shift counts are generally not legal, and it is hard to see | |||
17352 | /// their values after they get legalized to loads from a constant pool. | |||
17353 | static SDValue PerformShiftCombine(SDNode *N, | |||
17354 | TargetLowering::DAGCombinerInfo &DCI, | |||
17355 | const ARMSubtarget *ST) { | |||
17356 | SelectionDAG &DAG = DCI.DAG; | |||
17357 | EVT VT = N->getValueType(0); | |||
17358 | ||||
17359 | if (ST->isThumb1Only() && N->getOpcode() == ISD::SHL && VT == MVT::i32 && | |||
17360 | N->getOperand(0)->getOpcode() == ISD::AND && | |||
17361 | N->getOperand(0)->hasOneUse()) { | |||
17362 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) | |||
17363 | return SDValue(); | |||
17364 | // Look for the pattern (shl (and x, AndMask), ShiftAmt). This doesn't | |||
17365 | // usually show up because instcombine prefers to canonicalize it to | |||
17366 | // (and (shl x, ShiftAmt) (shl AndMask, ShiftAmt)), but the shift can come | |||
17367 | // out of GEP lowering in some cases. | |||
17368 | SDValue N0 = N->getOperand(0); | |||
17369 | ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(N->getOperand(1)); | |||
17370 | if (!ShiftAmtNode) | |||
17371 | return SDValue(); | |||
17372 | uint32_t ShiftAmt = static_cast<uint32_t>(ShiftAmtNode->getZExtValue()); | |||
17373 | ConstantSDNode *AndMaskNode = dyn_cast<ConstantSDNode>(N0->getOperand(1)); | |||
17374 | if (!AndMaskNode) | |||
17375 | return SDValue(); | |||
17376 | uint32_t AndMask = static_cast<uint32_t>(AndMaskNode->getZExtValue()); | |||
17377 | // Don't transform uxtb/uxth. | |||
17378 | if (AndMask == 255 || AndMask == 65535) | |||
17379 | return SDValue(); | |||
17380 | if (isMask_32(AndMask)) { | |||
17381 | uint32_t MaskedBits = countLeadingZeros(AndMask); | |||
17382 | if (MaskedBits > ShiftAmt) { | |||
17383 | SDLoc DL(N); | |||
17384 | SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0), | |||
17385 | DAG.getConstant(MaskedBits, DL, MVT::i32)); | |||
17386 | return DAG.getNode( | |||
17387 | ISD::SRL, DL, MVT::i32, SHL, | |||
17388 | DAG.getConstant(MaskedBits - ShiftAmt, DL, MVT::i32)); | |||
17389 | } | |||
17390 | } | |||
17391 | } | |||
17392 | ||||
17393 | // Nothing to be done for scalar shifts. | |||
17394 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
17395 | if (!VT.isVector() || !TLI.isTypeLegal(VT)) | |||
17396 | return SDValue(); | |||
17397 | if (ST->hasMVEIntegerOps() && VT == MVT::v2i64) | |||
17398 | return SDValue(); | |||
17399 | ||||
17400 | int64_t Cnt; | |||
17401 | ||||
17402 | switch (N->getOpcode()) { | |||
17403 | default: llvm_unreachable("unexpected shift opcode")::llvm::llvm_unreachable_internal("unexpected shift opcode", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 17403); | |||
17404 | ||||
17405 | case ISD::SHL: | |||
17406 | if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) { | |||
17407 | SDLoc dl(N); | |||
17408 | return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0), | |||
17409 | DAG.getConstant(Cnt, dl, MVT::i32)); | |||
17410 | } | |||
17411 | break; | |||
17412 | ||||
17413 | case ISD::SRA: | |||
17414 | case ISD::SRL: | |||
17415 | if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { | |||
17416 | unsigned VShiftOpc = | |||
17417 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); | |||
17418 | SDLoc dl(N); | |||
17419 | return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), | |||
17420 | DAG.getConstant(Cnt, dl, MVT::i32)); | |||
17421 | } | |||
17422 | } | |||
17423 | return SDValue(); | |||
17424 | } | |||
17425 | ||||
17426 | // Look for a sign/zero/fpextend extend of a larger than legal load. This can be | |||
17427 | // split into multiple extending loads, which are simpler to deal with than an | |||
17428 | // arbitrary extend. For fp extends we use an integer extending load and a VCVTL | |||
17429 | // to convert the type to an f32. | |||
17430 | static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG) { | |||
17431 | SDValue N0 = N->getOperand(0); | |||
17432 | if (N0.getOpcode() != ISD::LOAD) | |||
17433 | return SDValue(); | |||
17434 | LoadSDNode *LD = cast<LoadSDNode>(N0.getNode()); | |||
17435 | if (!LD->isSimple() || !N0.hasOneUse() || LD->isIndexed() || | |||
17436 | LD->getExtensionType() != ISD::NON_EXTLOAD) | |||
17437 | return SDValue(); | |||
17438 | EVT FromVT = LD->getValueType(0); | |||
17439 | EVT ToVT = N->getValueType(0); | |||
17440 | if (!ToVT.isVector()) | |||
17441 | return SDValue(); | |||
17442 | assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements())(static_cast <bool> (FromVT.getVectorNumElements() == ToVT .getVectorNumElements()) ? void (0) : __assert_fail ("FromVT.getVectorNumElements() == ToVT.getVectorNumElements()" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 17442, __extension__ __PRETTY_FUNCTION__)); | |||
17443 | EVT ToEltVT = ToVT.getVectorElementType(); | |||
17444 | EVT FromEltVT = FromVT.getVectorElementType(); | |||
17445 | ||||
17446 | unsigned NumElements = 0; | |||
17447 | if (ToEltVT == MVT::i32 && FromEltVT == MVT::i8) | |||
17448 | NumElements = 4; | |||
17449 | if (ToEltVT == MVT::f32 && FromEltVT == MVT::f16) | |||
17450 | NumElements = 4; | |||
17451 | if (NumElements == 0 || | |||
17452 | (FromEltVT != MVT::f16 && FromVT.getVectorNumElements() == NumElements) || | |||
17453 | FromVT.getVectorNumElements() % NumElements != 0 || | |||
17454 | !isPowerOf2_32(NumElements)) | |||
17455 | return SDValue(); | |||
17456 | ||||
17457 | LLVMContext &C = *DAG.getContext(); | |||
17458 | SDLoc DL(LD); | |||
17459 | // Details about the old load | |||
17460 | SDValue Ch = LD->getChain(); | |||
17461 | SDValue BasePtr = LD->getBasePtr(); | |||
17462 | Align Alignment = LD->getOriginalAlign(); | |||
17463 | MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags(); | |||
17464 | AAMDNodes AAInfo = LD->getAAInfo(); | |||
17465 | ||||
17466 | ISD::LoadExtType NewExtType = | |||
17467 | N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD; | |||
17468 | SDValue Offset = DAG.getUNDEF(BasePtr.getValueType()); | |||
17469 | EVT NewFromVT = EVT::getVectorVT( | |||
17470 | C, EVT::getIntegerVT(C, FromEltVT.getScalarSizeInBits()), NumElements); | |||
17471 | EVT NewToVT = EVT::getVectorVT( | |||
17472 | C, EVT::getIntegerVT(C, ToEltVT.getScalarSizeInBits()), NumElements); | |||
17473 | ||||
17474 | SmallVector<SDValue, 4> Loads; | |||
17475 | SmallVector<SDValue, 4> Chains; | |||
17476 | for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { | |||
17477 | unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8; | |||
17478 | SDValue NewPtr = | |||
17479 | DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::Fixed(NewOffset)); | |||
17480 | ||||
17481 | SDValue NewLoad = | |||
17482 | DAG.getLoad(ISD::UNINDEXED, NewExtType, NewToVT, DL, Ch, NewPtr, Offset, | |||
17483 | LD->getPointerInfo().getWithOffset(NewOffset), NewFromVT, | |||
17484 | Alignment, MMOFlags, AAInfo); | |||
17485 | Loads.push_back(NewLoad); | |||
17486 | Chains.push_back(SDValue(NewLoad.getNode(), 1)); | |||
17487 | } | |||
17488 | ||||
17489 | // Float truncs need to extended with VCVTB's into their floating point types. | |||
17490 | if (FromEltVT == MVT::f16) { | |||
17491 | SmallVector<SDValue, 4> Extends; | |||
17492 | ||||
17493 | for (unsigned i = 0; i < Loads.size(); i++) { | |||
17494 | SDValue LoadBC = | |||
17495 | DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, MVT::v8f16, Loads[i]); | |||
17496 | SDValue FPExt = DAG.getNode(ARMISD::VCVTL, DL, MVT::v4f32, LoadBC, | |||
17497 | DAG.getConstant(0, DL, MVT::i32)); | |||
17498 | Extends.push_back(FPExt); | |||
17499 | } | |||
17500 | ||||
17501 | Loads = Extends; | |||
17502 | } | |||
17503 | ||||
17504 | SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); | |||
17505 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewChain); | |||
17506 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, ToVT, Loads); | |||
17507 | } | |||
17508 | ||||
17509 | /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, | |||
17510 | /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. | |||
17511 | static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, | |||
17512 | const ARMSubtarget *ST) { | |||
17513 | SDValue N0 = N->getOperand(0); | |||
17514 | ||||
17515 | // Check for sign- and zero-extensions of vector extract operations of 8- and | |||
17516 | // 16-bit vector elements. NEON and MVE support these directly. They are | |||
17517 | // handled during DAG combining because type legalization will promote them | |||
17518 | // to 32-bit types and it is messy to recognize the operations after that. | |||
17519 | if ((ST->hasNEON() || ST->hasMVEIntegerOps()) && | |||
17520 | N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { | |||
17521 | SDValue Vec = N0.getOperand(0); | |||
17522 | SDValue Lane = N0.getOperand(1); | |||
17523 | EVT VT = N->getValueType(0); | |||
17524 | EVT EltVT = N0.getValueType(); | |||
17525 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
17526 | ||||
17527 | if (VT == MVT::i32 && | |||
17528 | (EltVT == MVT::i8 || EltVT == MVT::i16) && | |||
17529 | TLI.isTypeLegal(Vec.getValueType()) && | |||
17530 | isa<ConstantSDNode>(Lane)) { | |||
17531 | ||||
17532 | unsigned Opc = 0; | |||
17533 | switch (N->getOpcode()) { | |||
17534 | default: llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 17534); | |||
17535 | case ISD::SIGN_EXTEND: | |||
17536 | Opc = ARMISD::VGETLANEs; | |||
17537 | break; | |||
17538 | case ISD::ZERO_EXTEND: | |||
17539 | case ISD::ANY_EXTEND: | |||
17540 | Opc = ARMISD::VGETLANEu; | |||
17541 | break; | |||
17542 | } | |||
17543 | return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane); | |||
17544 | } | |||
17545 | } | |||
17546 | ||||
17547 | if (ST->hasMVEIntegerOps()) | |||
17548 | if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG)) | |||
17549 | return NewLoad; | |||
17550 | ||||
17551 | return SDValue(); | |||
17552 | } | |||
17553 | ||||
17554 | static SDValue PerformFPExtendCombine(SDNode *N, SelectionDAG &DAG, | |||
17555 | const ARMSubtarget *ST) { | |||
17556 | if (ST->hasMVEFloatOps()) | |||
17557 | if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG)) | |||
17558 | return NewLoad; | |||
17559 | ||||
17560 | return SDValue(); | |||
17561 | } | |||
17562 | ||||
17563 | // Lower smin(smax(x, C1), C2) to ssat or usat, if they have saturating | |||
17564 | // constant bounds. | |||
17565 | static SDValue PerformMinMaxToSatCombine(SDValue Op, SelectionDAG &DAG, | |||
17566 | const ARMSubtarget *Subtarget) { | |||
17567 | if ((Subtarget->isThumb() || !Subtarget->hasV6Ops()) && | |||
17568 | !Subtarget->isThumb2()) | |||
17569 | return SDValue(); | |||
17570 | ||||
17571 | EVT VT = Op.getValueType(); | |||
17572 | SDValue Op0 = Op.getOperand(0); | |||
17573 | ||||
17574 | if (VT != MVT::i32 || | |||
17575 | (Op0.getOpcode() != ISD::SMIN && Op0.getOpcode() != ISD::SMAX) || | |||
17576 | !isa<ConstantSDNode>(Op.getOperand(1)) || | |||
17577 | !isa<ConstantSDNode>(Op0.getOperand(1))) | |||
17578 | return SDValue(); | |||
17579 | ||||
17580 | SDValue Min = Op; | |||
17581 | SDValue Max = Op0; | |||
17582 | SDValue Input = Op0.getOperand(0); | |||
17583 | if (Min.getOpcode() == ISD::SMAX) | |||
17584 | std::swap(Min, Max); | |||
17585 | ||||
17586 | APInt MinC = Min.getConstantOperandAPInt(1); | |||
17587 | APInt MaxC = Max.getConstantOperandAPInt(1); | |||
17588 | ||||
17589 | if (Min.getOpcode() != ISD::SMIN || Max.getOpcode() != ISD::SMAX || | |||
17590 | !(MinC + 1).isPowerOf2()) | |||
17591 | return SDValue(); | |||
17592 | ||||
17593 | SDLoc DL(Op); | |||
17594 | if (MinC == ~MaxC) | |||
17595 | return DAG.getNode(ARMISD::SSAT, DL, VT, Input, | |||
17596 | DAG.getConstant(MinC.countTrailingOnes(), DL, VT)); | |||
17597 | if (MaxC == 0) | |||
17598 | return DAG.getNode(ARMISD::USAT, DL, VT, Input, | |||
17599 | DAG.getConstant(MinC.countTrailingOnes(), DL, VT)); | |||
17600 | ||||
17601 | return SDValue(); | |||
17602 | } | |||
17603 | ||||
17604 | /// PerformMinMaxCombine - Target-specific DAG combining for creating truncating | |||
17605 | /// saturates. | |||
17606 | static SDValue PerformMinMaxCombine(SDNode *N, SelectionDAG &DAG, | |||
17607 | const ARMSubtarget *ST) { | |||
17608 | EVT VT = N->getValueType(0); | |||
17609 | SDValue N0 = N->getOperand(0); | |||
17610 | ||||
17611 | if (VT == MVT::i32) | |||
17612 | return PerformMinMaxToSatCombine(SDValue(N, 0), DAG, ST); | |||
17613 | ||||
17614 | if (!ST->hasMVEIntegerOps()) | |||
17615 | return SDValue(); | |||
17616 | ||||
17617 | if (SDValue V = PerformVQDMULHCombine(N, DAG)) | |||
17618 | return V; | |||
17619 | ||||
17620 | if (VT != MVT::v4i32 && VT != MVT::v8i16) | |||
17621 | return SDValue(); | |||
17622 | ||||
17623 | auto IsSignedSaturate = [&](SDNode *Min, SDNode *Max) { | |||
17624 | // Check one is a smin and the other is a smax | |||
17625 | if (Min->getOpcode() != ISD::SMIN) | |||
17626 | std::swap(Min, Max); | |||
17627 | if (Min->getOpcode() != ISD::SMIN || Max->getOpcode() != ISD::SMAX) | |||
17628 | return false; | |||
17629 | ||||
17630 | APInt SaturateC; | |||
17631 | if (VT == MVT::v4i32) | |||
17632 | SaturateC = APInt(32, (1 << 15) - 1, true); | |||
17633 | else //if (VT == MVT::v8i16) | |||
17634 | SaturateC = APInt(16, (1 << 7) - 1, true); | |||
17635 | ||||
17636 | APInt MinC, MaxC; | |||
17637 | if (!ISD::isConstantSplatVector(Min->getOperand(1).getNode(), MinC) || | |||
17638 | MinC != SaturateC) | |||
17639 | return false; | |||
17640 | if (!ISD::isConstantSplatVector(Max->getOperand(1).getNode(), MaxC) || | |||
17641 | MaxC != ~SaturateC) | |||
17642 | return false; | |||
17643 | return true; | |||
17644 | }; | |||
17645 | ||||
17646 | if (IsSignedSaturate(N, N0.getNode())) { | |||
17647 | SDLoc DL(N); | |||
17648 | MVT ExtVT, HalfVT; | |||
17649 | if (VT == MVT::v4i32) { | |||
17650 | HalfVT = MVT::v8i16; | |||
17651 | ExtVT = MVT::v4i16; | |||
17652 | } else { // if (VT == MVT::v8i16) | |||
17653 | HalfVT = MVT::v16i8; | |||
17654 | ExtVT = MVT::v8i8; | |||
17655 | } | |||
17656 | ||||
17657 | // Create a VQMOVNB with undef top lanes, then signed extended into the top | |||
17658 | // half. That extend will hopefully be removed if only the bottom bits are | |||
17659 | // demanded (though a truncating store, for example). | |||
17660 | SDValue VQMOVN = | |||
17661 | DAG.getNode(ARMISD::VQMOVNs, DL, HalfVT, DAG.getUNDEF(HalfVT), | |||
17662 | N0->getOperand(0), DAG.getConstant(0, DL, MVT::i32)); | |||
17663 | SDValue Bitcast = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, VQMOVN); | |||
17664 | return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Bitcast, | |||
17665 | DAG.getValueType(ExtVT)); | |||
17666 | } | |||
17667 | ||||
17668 | auto IsUnsignedSaturate = [&](SDNode *Min) { | |||
17669 | // For unsigned, we just need to check for <= 0xffff | |||
17670 | if (Min->getOpcode() != ISD::UMIN) | |||
17671 | return false; | |||
17672 | ||||
17673 | APInt SaturateC; | |||
17674 | if (VT == MVT::v4i32) | |||
17675 | SaturateC = APInt(32, (1 << 16) - 1, true); | |||
17676 | else //if (VT == MVT::v8i16) | |||
17677 | SaturateC = APInt(16, (1 << 8) - 1, true); | |||
17678 | ||||
17679 | APInt MinC; | |||
17680 | if (!ISD::isConstantSplatVector(Min->getOperand(1).getNode(), MinC) || | |||
17681 | MinC != SaturateC) | |||
17682 | return false; | |||
17683 | return true; | |||
17684 | }; | |||
17685 | ||||
17686 | if (IsUnsignedSaturate(N)) { | |||
17687 | SDLoc DL(N); | |||
17688 | MVT HalfVT; | |||
17689 | unsigned ExtConst; | |||
17690 | if (VT == MVT::v4i32) { | |||
17691 | HalfVT = MVT::v8i16; | |||
17692 | ExtConst = 0x0000FFFF; | |||
17693 | } else { //if (VT == MVT::v8i16) | |||
17694 | HalfVT = MVT::v16i8; | |||
17695 | ExtConst = 0x00FF; | |||
17696 | } | |||
17697 | ||||
17698 | // Create a VQMOVNB with undef top lanes, then ZExt into the top half with | |||
17699 | // an AND. That extend will hopefully be removed if only the bottom bits are | |||
17700 | // demanded (though a truncating store, for example). | |||
17701 | SDValue VQMOVN = | |||
17702 | DAG.getNode(ARMISD::VQMOVNu, DL, HalfVT, DAG.getUNDEF(HalfVT), N0, | |||
17703 | DAG.getConstant(0, DL, MVT::i32)); | |||
17704 | SDValue Bitcast = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, VQMOVN); | |||
17705 | return DAG.getNode(ISD::AND, DL, VT, Bitcast, | |||
17706 | DAG.getConstant(ExtConst, DL, VT)); | |||
17707 | } | |||
17708 | ||||
17709 | return SDValue(); | |||
17710 | } | |||
17711 | ||||
17712 | static const APInt *isPowerOf2Constant(SDValue V) { | |||
17713 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); | |||
17714 | if (!C) | |||
17715 | return nullptr; | |||
17716 | const APInt *CV = &C->getAPIntValue(); | |||
17717 | return CV->isPowerOf2() ? CV : nullptr; | |||
17718 | } | |||
17719 | ||||
17720 | SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const { | |||
17721 | // If we have a CMOV, OR and AND combination such as: | |||
17722 | // if (x & CN) | |||
17723 | // y |= CM; | |||
17724 | // | |||
17725 | // And: | |||
17726 | // * CN is a single bit; | |||
17727 | // * All bits covered by CM are known zero in y | |||
17728 | // | |||
17729 | // Then we can convert this into a sequence of BFI instructions. This will | |||
17730 | // always be a win if CM is a single bit, will always be no worse than the | |||
17731 | // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is | |||
17732 | // three bits (due to the extra IT instruction). | |||
17733 | ||||
17734 | SDValue Op0 = CMOV->getOperand(0); | |||
17735 | SDValue Op1 = CMOV->getOperand(1); | |||
17736 | auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2)); | |||
17737 | auto CC = CCNode->getAPIntValue().getLimitedValue(); | |||
17738 | SDValue CmpZ = CMOV->getOperand(4); | |||
17739 | ||||
17740 | // The compare must be against zero. | |||
17741 | if (!isNullConstant(CmpZ->getOperand(1))) | |||
17742 | return SDValue(); | |||
17743 | ||||
17744 | assert(CmpZ->getOpcode() == ARMISD::CMPZ)(static_cast <bool> (CmpZ->getOpcode() == ARMISD::CMPZ ) ? void (0) : __assert_fail ("CmpZ->getOpcode() == ARMISD::CMPZ" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 17744, __extension__ __PRETTY_FUNCTION__)); | |||
17745 | SDValue And = CmpZ->getOperand(0); | |||
17746 | if (And->getOpcode() != ISD::AND) | |||
17747 | return SDValue(); | |||
17748 | const APInt *AndC = isPowerOf2Constant(And->getOperand(1)); | |||
17749 | if (!AndC) | |||
17750 | return SDValue(); | |||
17751 | SDValue X = And->getOperand(0); | |||
17752 | ||||
17753 | if (CC == ARMCC::EQ) { | |||
17754 | // We're performing an "equal to zero" compare. Swap the operands so we | |||
17755 | // canonicalize on a "not equal to zero" compare. | |||
17756 | std::swap(Op0, Op1); | |||
17757 | } else { | |||
17758 | assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?")(static_cast <bool> (CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?" ) ? void (0) : __assert_fail ("CC == ARMCC::NE && \"How can a CMPZ node not be EQ or NE?\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 17758, __extension__ __PRETTY_FUNCTION__)); | |||
17759 | } | |||
17760 | ||||
17761 | if (Op1->getOpcode() != ISD::OR) | |||
17762 | return SDValue(); | |||
17763 | ||||
17764 | ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1)); | |||
17765 | if (!OrC) | |||
17766 | return SDValue(); | |||
17767 | SDValue Y = Op1->getOperand(0); | |||
17768 | ||||
17769 | if (Op0 != Y) | |||
17770 | return SDValue(); | |||
17771 | ||||
17772 | // Now, is it profitable to continue? | |||
17773 | APInt OrCI = OrC->getAPIntValue(); | |||
17774 | unsigned Heuristic = Subtarget->isThumb() ? 3 : 2; | |||
17775 | if (OrCI.countPopulation() > Heuristic) | |||
17776 | return SDValue(); | |||
17777 | ||||
17778 | // Lastly, can we determine that the bits defined by OrCI | |||
17779 | // are zero in Y? | |||
17780 | KnownBits Known = DAG.computeKnownBits(Y); | |||
17781 | if ((OrCI & Known.Zero) != OrCI) | |||
17782 | return SDValue(); | |||
17783 | ||||
17784 | // OK, we can do the combine. | |||
17785 | SDValue V = Y; | |||
17786 | SDLoc dl(X); | |||
17787 | EVT VT = X.getValueType(); | |||
17788 | unsigned BitInX = AndC->logBase2(); | |||
17789 | ||||
17790 | if (BitInX != 0) { | |||
17791 | // We must shift X first. | |||
17792 | X = DAG.getNode(ISD::SRL, dl, VT, X, | |||
17793 | DAG.getConstant(BitInX, dl, VT)); | |||
17794 | } | |||
17795 | ||||
17796 | for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits(); | |||
17797 | BitInY < NumActiveBits; ++BitInY) { | |||
17798 | if (OrCI[BitInY] == 0) | |||
17799 | continue; | |||
17800 | APInt Mask(VT.getSizeInBits(), 0); | |||
17801 | Mask.setBit(BitInY); | |||
17802 | V = DAG.getNode(ARMISD::BFI, dl, VT, V, X, | |||
17803 | // Confusingly, the operand is an *inverted* mask. | |||
17804 | DAG.getConstant(~Mask, dl, VT)); | |||
17805 | } | |||
17806 | ||||
17807 | return V; | |||
17808 | } | |||
17809 | ||||
17810 | // Given N, the value controlling the conditional branch, search for the loop | |||
17811 | // intrinsic, returning it, along with how the value is used. We need to handle | |||
17812 | // patterns such as the following: | |||
17813 | // (brcond (xor (setcc (loop.decrement), 0, ne), 1), exit) | |||
17814 | // (brcond (setcc (loop.decrement), 0, eq), exit) | |||
17815 | // (brcond (setcc (loop.decrement), 0, ne), header) | |||
17816 | static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm, | |||
17817 | bool &Negate) { | |||
17818 | switch (N->getOpcode()) { | |||
17819 | default: | |||
17820 | break; | |||
17821 | case ISD::XOR: { | |||
17822 | if (!isa<ConstantSDNode>(N.getOperand(1))) | |||
17823 | return SDValue(); | |||
17824 | if (!cast<ConstantSDNode>(N.getOperand(1))->isOne()) | |||
17825 | return SDValue(); | |||
17826 | Negate = !Negate; | |||
17827 | return SearchLoopIntrinsic(N.getOperand(0), CC, Imm, Negate); | |||
17828 | } | |||
17829 | case ISD::SETCC: { | |||
17830 | auto *Const = dyn_cast<ConstantSDNode>(N.getOperand(1)); | |||
17831 | if (!Const) | |||
17832 | return SDValue(); | |||
17833 | if (Const->isZero()) | |||
17834 | Imm = 0; | |||
17835 | else if (Const->isOne()) | |||
17836 | Imm = 1; | |||
17837 | else | |||
17838 | return SDValue(); | |||
17839 | CC = cast<CondCodeSDNode>(N.getOperand(2))->get(); | |||
17840 | return SearchLoopIntrinsic(N->getOperand(0), CC, Imm, Negate); | |||
17841 | } | |||
17842 | case ISD::INTRINSIC_W_CHAIN: { | |||
17843 | unsigned IntOp = cast<ConstantSDNode>(N.getOperand(1))->getZExtValue(); | |||
17844 | if (IntOp != Intrinsic::test_start_loop_iterations && | |||
17845 | IntOp != Intrinsic::loop_decrement_reg) | |||
17846 | return SDValue(); | |||
17847 | return N; | |||
17848 | } | |||
17849 | } | |||
17850 | return SDValue(); | |||
17851 | } | |||
17852 | ||||
17853 | static SDValue PerformHWLoopCombine(SDNode *N, | |||
17854 | TargetLowering::DAGCombinerInfo &DCI, | |||
17855 | const ARMSubtarget *ST) { | |||
17856 | ||||
17857 | // The hwloop intrinsics that we're interested are used for control-flow, | |||
17858 | // either for entering or exiting the loop: | |||
17859 | // - test.start.loop.iterations will test whether its operand is zero. If it | |||
17860 | // is zero, the proceeding branch should not enter the loop. | |||
17861 | // - loop.decrement.reg also tests whether its operand is zero. If it is | |||
17862 | // zero, the proceeding branch should not branch back to the beginning of | |||
17863 | // the loop. | |||
17864 | // So here, we need to check that how the brcond is using the result of each | |||
17865 | // of the intrinsics to ensure that we're branching to the right place at the | |||
17866 | // right time. | |||
17867 | ||||
17868 | ISD::CondCode CC; | |||
17869 | SDValue Cond; | |||
17870 | int Imm = 1; | |||
17871 | bool Negate = false; | |||
17872 | SDValue Chain = N->getOperand(0); | |||
17873 | SDValue Dest; | |||
17874 | ||||
17875 | if (N->getOpcode() == ISD::BRCOND) { | |||
17876 | CC = ISD::SETEQ; | |||
17877 | Cond = N->getOperand(1); | |||
17878 | Dest = N->getOperand(2); | |||
17879 | } else { | |||
17880 | assert(N->getOpcode() == ISD::BR_CC && "Expected BRCOND or BR_CC!")(static_cast <bool> (N->getOpcode() == ISD::BR_CC && "Expected BRCOND or BR_CC!") ? void (0) : __assert_fail ("N->getOpcode() == ISD::BR_CC && \"Expected BRCOND or BR_CC!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 17880, __extension__ __PRETTY_FUNCTION__)); | |||
17881 | CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); | |||
17882 | Cond = N->getOperand(2); | |||
17883 | Dest = N->getOperand(4); | |||
17884 | if (auto *Const = dyn_cast<ConstantSDNode>(N->getOperand(3))) { | |||
17885 | if (!Const->isOne() && !Const->isZero()) | |||
17886 | return SDValue(); | |||
17887 | Imm = Const->getZExtValue(); | |||
17888 | } else | |||
17889 | return SDValue(); | |||
17890 | } | |||
17891 | ||||
17892 | SDValue Int = SearchLoopIntrinsic(Cond, CC, Imm, Negate); | |||
17893 | if (!Int) | |||
17894 | return SDValue(); | |||
17895 | ||||
17896 | if (Negate) | |||
17897 | CC = ISD::getSetCCInverse(CC, /* Integer inverse */ MVT::i32); | |||
17898 | ||||
17899 | auto IsTrueIfZero = [](ISD::CondCode CC, int Imm) { | |||
17900 | return (CC == ISD::SETEQ && Imm == 0) || | |||
17901 | (CC == ISD::SETNE && Imm == 1) || | |||
17902 | (CC == ISD::SETLT && Imm == 1) || | |||
17903 | (CC == ISD::SETULT && Imm == 1); | |||
17904 | }; | |||
17905 | ||||
17906 | auto IsFalseIfZero = [](ISD::CondCode CC, int Imm) { | |||
17907 | return (CC == ISD::SETEQ && Imm == 1) || | |||
17908 | (CC == ISD::SETNE && Imm == 0) || | |||
17909 | (CC == ISD::SETGT && Imm == 0) || | |||
17910 | (CC == ISD::SETUGT && Imm == 0) || | |||
17911 | (CC == ISD::SETGE && Imm == 1) || | |||
17912 | (CC == ISD::SETUGE && Imm == 1); | |||
17913 | }; | |||
17914 | ||||
17915 | assert((IsTrueIfZero(CC, Imm) || IsFalseIfZero(CC, Imm)) &&(static_cast <bool> ((IsTrueIfZero(CC, Imm) || IsFalseIfZero (CC, Imm)) && "unsupported condition") ? void (0) : __assert_fail ("(IsTrueIfZero(CC, Imm) || IsFalseIfZero(CC, Imm)) && \"unsupported condition\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 17916, __extension__ __PRETTY_FUNCTION__)) | |||
17916 | "unsupported condition")(static_cast <bool> ((IsTrueIfZero(CC, Imm) || IsFalseIfZero (CC, Imm)) && "unsupported condition") ? void (0) : __assert_fail ("(IsTrueIfZero(CC, Imm) || IsFalseIfZero(CC, Imm)) && \"unsupported condition\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 17916, __extension__ __PRETTY_FUNCTION__)); | |||
17917 | ||||
17918 | SDLoc dl(Int); | |||
17919 | SelectionDAG &DAG = DCI.DAG; | |||
17920 | SDValue Elements = Int.getOperand(2); | |||
17921 | unsigned IntOp = cast<ConstantSDNode>(Int->getOperand(1))->getZExtValue(); | |||
17922 | assert((N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR)(static_cast <bool> ((N->hasOneUse() && N-> use_begin()->getOpcode() == ISD::BR) && "expected single br user" ) ? void (0) : __assert_fail ("(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR) && \"expected single br user\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 17923, __extension__ __PRETTY_FUNCTION__)) | |||
17923 | && "expected single br user")(static_cast <bool> ((N->hasOneUse() && N-> use_begin()->getOpcode() == ISD::BR) && "expected single br user" ) ? void (0) : __assert_fail ("(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR) && \"expected single br user\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 17923, __extension__ __PRETTY_FUNCTION__)); | |||
17924 | SDNode *Br = *N->use_begin(); | |||
17925 | SDValue OtherTarget = Br->getOperand(1); | |||
17926 | ||||
17927 | // Update the unconditional branch to branch to the given Dest. | |||
17928 | auto UpdateUncondBr = [](SDNode *Br, SDValue Dest, SelectionDAG &DAG) { | |||
17929 | SDValue NewBrOps[] = { Br->getOperand(0), Dest }; | |||
17930 | SDValue NewBr = DAG.getNode(ISD::BR, SDLoc(Br), MVT::Other, NewBrOps); | |||
17931 | DAG.ReplaceAllUsesOfValueWith(SDValue(Br, 0), NewBr); | |||
17932 | }; | |||
17933 | ||||
17934 | if (IntOp == Intrinsic::test_start_loop_iterations) { | |||
17935 | SDValue Res; | |||
17936 | SDValue Setup = DAG.getNode(ARMISD::WLSSETUP, dl, MVT::i32, Elements); | |||
17937 | // We expect this 'instruction' to branch when the counter is zero. | |||
17938 | if (IsTrueIfZero(CC, Imm)) { | |||
17939 | SDValue Ops[] = {Chain, Setup, Dest}; | |||
17940 | Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops); | |||
17941 | } else { | |||
17942 | // The logic is the reverse of what we need for WLS, so find the other | |||
17943 | // basic block target: the target of the proceeding br. | |||
17944 | UpdateUncondBr(Br, Dest, DAG); | |||
17945 | ||||
17946 | SDValue Ops[] = {Chain, Setup, OtherTarget}; | |||
17947 | Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops); | |||
17948 | } | |||
17949 | // Update LR count to the new value | |||
17950 | DAG.ReplaceAllUsesOfValueWith(Int.getValue(0), Setup); | |||
17951 | // Update chain | |||
17952 | DAG.ReplaceAllUsesOfValueWith(Int.getValue(2), Int.getOperand(0)); | |||
17953 | return Res; | |||
17954 | } else { | |||
17955 | SDValue Size = DAG.getTargetConstant( | |||
17956 | cast<ConstantSDNode>(Int.getOperand(3))->getZExtValue(), dl, MVT::i32); | |||
17957 | SDValue Args[] = { Int.getOperand(0), Elements, Size, }; | |||
17958 | SDValue LoopDec = DAG.getNode(ARMISD::LOOP_DEC, dl, | |||
17959 | DAG.getVTList(MVT::i32, MVT::Other), Args); | |||
17960 | DAG.ReplaceAllUsesWith(Int.getNode(), LoopDec.getNode()); | |||
17961 | ||||
17962 | // We expect this instruction to branch when the count is not zero. | |||
17963 | SDValue Target = IsFalseIfZero(CC, Imm) ? Dest : OtherTarget; | |||
17964 | ||||
17965 | // Update the unconditional branch to target the loop preheader if we've | |||
17966 | // found the condition has been reversed. | |||
17967 | if (Target == OtherTarget) | |||
17968 | UpdateUncondBr(Br, Dest, DAG); | |||
17969 | ||||
17970 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, | |||
17971 | SDValue(LoopDec.getNode(), 1), Chain); | |||
17972 | ||||
17973 | SDValue EndArgs[] = { Chain, SDValue(LoopDec.getNode(), 0), Target }; | |||
17974 | return DAG.getNode(ARMISD::LE, dl, MVT::Other, EndArgs); | |||
17975 | } | |||
17976 | return SDValue(); | |||
17977 | } | |||
17978 | ||||
17979 | /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND. | |||
17980 | SDValue | |||
17981 | ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const { | |||
17982 | SDValue Cmp = N->getOperand(4); | |||
17983 | if (Cmp.getOpcode() != ARMISD::CMPZ) | |||
17984 | // Only looking at NE cases. | |||
17985 | return SDValue(); | |||
17986 | ||||
17987 | EVT VT = N->getValueType(0); | |||
17988 | SDLoc dl(N); | |||
17989 | SDValue LHS = Cmp.getOperand(0); | |||
17990 | SDValue RHS = Cmp.getOperand(1); | |||
17991 | SDValue Chain = N->getOperand(0); | |||
17992 | SDValue BB = N->getOperand(1); | |||
17993 | SDValue ARMcc = N->getOperand(2); | |||
17994 | ARMCC::CondCodes CC = | |||
17995 | (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); | |||
17996 | ||||
17997 | // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0)) | |||
17998 | // -> (brcond Chain BB CC CPSR Cmp) | |||
17999 | if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() && | |||
18000 | LHS->getOperand(0)->getOpcode() == ARMISD::CMOV && | |||
18001 | LHS->getOperand(0)->hasOneUse()) { | |||
18002 | auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0)); | |||
18003 | auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1)); | |||
18004 | auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); | |||
18005 | auto *RHSC = dyn_cast<ConstantSDNode>(RHS); | |||
18006 | if ((LHS00C && LHS00C->getZExtValue() == 0) && | |||
18007 | (LHS01C && LHS01C->getZExtValue() == 1) && | |||
18008 | (LHS1C && LHS1C->getZExtValue() == 1) && | |||
18009 | (RHSC && RHSC->getZExtValue() == 0)) { | |||
18010 | return DAG.getNode( | |||
18011 | ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2), | |||
18012 | LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4)); | |||
18013 | } | |||
18014 | } | |||
18015 | ||||
18016 | return SDValue(); | |||
18017 | } | |||
18018 | ||||
18019 | /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. | |||
18020 | SDValue | |||
18021 | ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { | |||
18022 | SDValue Cmp = N->getOperand(4); | |||
18023 | if (Cmp.getOpcode() != ARMISD::CMPZ) | |||
18024 | // Only looking at EQ and NE cases. | |||
18025 | return SDValue(); | |||
18026 | ||||
18027 | EVT VT = N->getValueType(0); | |||
18028 | SDLoc dl(N); | |||
18029 | SDValue LHS = Cmp.getOperand(0); | |||
18030 | SDValue RHS = Cmp.getOperand(1); | |||
18031 | SDValue FalseVal = N->getOperand(0); | |||
18032 | SDValue TrueVal = N->getOperand(1); | |||
18033 | SDValue ARMcc = N->getOperand(2); | |||
18034 | ARMCC::CondCodes CC = | |||
18035 | (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); | |||
18036 | ||||
18037 | // BFI is only available on V6T2+. | |||
18038 | if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) { | |||
18039 | SDValue R = PerformCMOVToBFICombine(N, DAG); | |||
18040 | if (R) | |||
18041 | return R; | |||
18042 | } | |||
18043 | ||||
18044 | // Simplify | |||
18045 | // mov r1, r0 | |||
18046 | // cmp r1, x | |||
18047 | // mov r0, y | |||
18048 | // moveq r0, x | |||
18049 | // to | |||
18050 | // cmp r0, x | |||
18051 | // movne r0, y | |||
18052 | // | |||
18053 | // mov r1, r0 | |||
18054 | // cmp r1, x | |||
18055 | // mov r0, x | |||
18056 | // movne r0, y | |||
18057 | // to | |||
18058 | // cmp r0, x | |||
18059 | // movne r0, y | |||
18060 | /// FIXME: Turn this into a target neutral optimization? | |||
18061 | SDValue Res; | |||
18062 | if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { | |||
18063 | Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, | |||
18064 | N->getOperand(3), Cmp); | |||
18065 | } else if (CC == ARMCC::EQ && TrueVal == RHS) { | |||
18066 | SDValue ARMcc; | |||
18067 | SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); | |||
18068 | Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, | |||
18069 | N->getOperand(3), NewCmp); | |||
18070 | } | |||
18071 | ||||
18072 | // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0)) | |||
18073 | // -> (cmov F T CC CPSR Cmp) | |||
18074 | if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) { | |||
18075 | auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)); | |||
18076 | auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); | |||
18077 | auto *RHSC = dyn_cast<ConstantSDNode>(RHS); | |||
18078 | if ((LHS0C && LHS0C->getZExtValue() == 0) && | |||
18079 | (LHS1C && LHS1C->getZExtValue() == 1) && | |||
18080 | (RHSC && RHSC->getZExtValue() == 0)) { | |||
18081 | return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, | |||
18082 | LHS->getOperand(2), LHS->getOperand(3), | |||
18083 | LHS->getOperand(4)); | |||
18084 | } | |||
18085 | } | |||
18086 | ||||
18087 | if (!VT.isInteger()) | |||
18088 | return SDValue(); | |||
18089 | ||||
18090 | // Fold away an unneccessary CMPZ/CMOV | |||
18091 | // CMOV A, B, C1, $cpsr, (CMPZ (CMOV 1, 0, C2, D), 0) -> | |||
18092 | // if C1==EQ -> CMOV A, B, C2, $cpsr, D | |||
18093 | // if C1==NE -> CMOV A, B, NOT(C2), $cpsr, D | |||
18094 | if (N->getConstantOperandVal(2) == ARMCC::EQ || | |||
18095 | N->getConstantOperandVal(2) == ARMCC::NE) { | |||
18096 | ARMCC::CondCodes Cond; | |||
18097 | if (SDValue C = IsCMPZCSINC(N->getOperand(4).getNode(), Cond)) { | |||
18098 | if (N->getConstantOperandVal(2) == ARMCC::NE) | |||
18099 | Cond = ARMCC::getOppositeCondition(Cond); | |||
18100 | return DAG.getNode(N->getOpcode(), SDLoc(N), MVT::i32, N->getOperand(0), | |||
18101 | N->getOperand(1), | |||
18102 | DAG.getTargetConstant(Cond, SDLoc(N), MVT::i32), | |||
18103 | N->getOperand(3), C); | |||
18104 | } | |||
18105 | } | |||
18106 | ||||
18107 | // Materialize a boolean comparison for integers so we can avoid branching. | |||
18108 | if (isNullConstant(FalseVal)) { | |||
18109 | if (CC == ARMCC::EQ && isOneConstant(TrueVal)) { | |||
18110 | if (!Subtarget->isThumb1Only() && Subtarget->hasV5TOps()) { | |||
18111 | // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it | |||
18112 | // right 5 bits will make that 32 be 1, otherwise it will be 0. | |||
18113 | // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5 | |||
18114 | SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); | |||
18115 | Res = DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::CTLZ, dl, VT, Sub), | |||
18116 | DAG.getConstant(5, dl, MVT::i32)); | |||
18117 | } else { | |||
18118 | // CMOV 0, 1, ==, (CMPZ x, y) -> | |||
18119 | // (ADDCARRY (SUB x, y), t:0, t:1) | |||
18120 | // where t = (SUBCARRY 0, (SUB x, y), 0) | |||
18121 | // | |||
18122 | // The SUBCARRY computes 0 - (x - y) and this will give a borrow when | |||
18123 | // x != y. In other words, a carry C == 1 when x == y, C == 0 | |||
18124 | // otherwise. | |||
18125 | // The final ADDCARRY computes | |||
18126 | // x - y + (0 - (x - y)) + C == C | |||
18127 | SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); | |||
18128 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); | |||
18129 | SDValue Neg = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, Sub); | |||
18130 | // ISD::SUBCARRY returns a borrow but we want the carry here | |||
18131 | // actually. | |||
18132 | SDValue Carry = | |||
18133 | DAG.getNode(ISD::SUB, dl, MVT::i32, | |||
18134 | DAG.getConstant(1, dl, MVT::i32), Neg.getValue(1)); | |||
18135 | Res = DAG.getNode(ISD::ADDCARRY, dl, VTs, Sub, Neg, Carry); | |||
18136 | } | |||
18137 | } else if (CC == ARMCC::NE && !isNullConstant(RHS) && | |||
18138 | (!Subtarget->isThumb1Only() || isPowerOf2Constant(TrueVal))) { | |||
18139 | // This seems pointless but will allow us to combine it further below. | |||
18140 | // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1 | |||
18141 | SDValue Sub = | |||
18142 | DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS); | |||
18143 | SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR, | |||
18144 | Sub.getValue(1), SDValue()); | |||
18145 | Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, TrueVal, ARMcc, | |||
18146 | N->getOperand(3), CPSRGlue.getValue(1)); | |||
18147 | FalseVal = Sub; | |||
18148 | } | |||
18149 | } else if (isNullConstant(TrueVal)) { | |||
18150 | if (CC == ARMCC::EQ && !isNullConstant(RHS) && | |||
18151 | (!Subtarget->isThumb1Only() || isPowerOf2Constant(FalseVal))) { | |||
18152 | // This seems pointless but will allow us to combine it further below | |||
18153 | // Note that we change == for != as this is the dual for the case above. | |||
18154 | // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1 | |||
18155 | SDValue Sub = | |||
18156 | DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS); | |||
18157 | SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR, | |||
18158 | Sub.getValue(1), SDValue()); | |||
18159 | Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, FalseVal, | |||
18160 | DAG.getConstant(ARMCC::NE, dl, MVT::i32), | |||
18161 | N->getOperand(3), CPSRGlue.getValue(1)); | |||
18162 | FalseVal = Sub; | |||
18163 | } | |||
18164 | } | |||
18165 | ||||
18166 | // On Thumb1, the DAG above may be further combined if z is a power of 2 | |||
18167 | // (z == 2 ^ K). | |||
18168 | // CMOV (SUBS x, y), z, !=, (SUBS x, y):1 -> | |||
18169 | // t1 = (USUBO (SUB x, y), 1) | |||
18170 | // t2 = (SUBCARRY (SUB x, y), t1:0, t1:1) | |||
18171 | // Result = if K != 0 then (SHL t2:0, K) else t2:0 | |||
18172 | // | |||
18173 | // This also handles the special case of comparing against zero; it's | |||
18174 | // essentially, the same pattern, except there's no SUBS: | |||
18175 | // CMOV x, z, !=, (CMPZ x, 0) -> | |||
18176 | // t1 = (USUBO x, 1) | |||
18177 | // t2 = (SUBCARRY x, t1:0, t1:1) | |||
18178 | // Result = if K != 0 then (SHL t2:0, K) else t2:0 | |||
18179 | const APInt *TrueConst; | |||
18180 | if (Subtarget->isThumb1Only() && CC == ARMCC::NE && | |||
18181 | ((FalseVal.getOpcode() == ARMISD::SUBS && | |||
18182 | FalseVal.getOperand(0) == LHS && FalseVal.getOperand(1) == RHS) || | |||
18183 | (FalseVal == LHS && isNullConstant(RHS))) && | |||
18184 | (TrueConst = isPowerOf2Constant(TrueVal))) { | |||
18185 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); | |||
18186 | unsigned ShiftAmount = TrueConst->logBase2(); | |||
18187 | if (ShiftAmount) | |||
18188 | TrueVal = DAG.getConstant(1, dl, VT); | |||
18189 | SDValue Subc = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, TrueVal); | |||
18190 | Res = DAG.getNode(ISD::SUBCARRY, dl, VTs, FalseVal, Subc, Subc.getValue(1)); | |||
18191 | ||||
18192 | if (ShiftAmount) | |||
18193 | Res = DAG.getNode(ISD::SHL, dl, VT, Res, | |||
18194 | DAG.getConstant(ShiftAmount, dl, MVT::i32)); | |||
18195 | } | |||
18196 | ||||
18197 | if (Res.getNode()) { | |||
18198 | KnownBits Known = DAG.computeKnownBits(SDValue(N,0)); | |||
18199 | // Capture demanded bits information that would be otherwise lost. | |||
18200 | if (Known.Zero == 0xfffffffe) | |||
18201 | Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, | |||
18202 | DAG.getValueType(MVT::i1)); | |||
18203 | else if (Known.Zero == 0xffffff00) | |||
18204 | Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, | |||
18205 | DAG.getValueType(MVT::i8)); | |||
18206 | else if (Known.Zero == 0xffff0000) | |||
18207 | Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, | |||
18208 | DAG.getValueType(MVT::i16)); | |||
18209 | } | |||
18210 | ||||
18211 | return Res; | |||
18212 | } | |||
18213 | ||||
18214 | static SDValue PerformBITCASTCombine(SDNode *N, | |||
18215 | TargetLowering::DAGCombinerInfo &DCI, | |||
18216 | const ARMSubtarget *ST) { | |||
18217 | SelectionDAG &DAG = DCI.DAG; | |||
18218 | SDValue Src = N->getOperand(0); | |||
18219 | EVT DstVT = N->getValueType(0); | |||
18220 | ||||
18221 | // Convert v4f32 bitcast (v4i32 vdup (i32)) -> v4f32 vdup (i32) under MVE. | |||
18222 | if (ST->hasMVEIntegerOps() && Src.getOpcode() == ARMISD::VDUP) { | |||
18223 | EVT SrcVT = Src.getValueType(); | |||
18224 | if (SrcVT.getScalarSizeInBits() == DstVT.getScalarSizeInBits()) | |||
18225 | return DAG.getNode(ARMISD::VDUP, SDLoc(N), DstVT, Src.getOperand(0)); | |||
18226 | } | |||
18227 | ||||
18228 | // We may have a bitcast of something that has already had this bitcast | |||
18229 | // combine performed on it, so skip past any VECTOR_REG_CASTs. | |||
18230 | while (Src.getOpcode() == ARMISD::VECTOR_REG_CAST) | |||
18231 | Src = Src.getOperand(0); | |||
18232 | ||||
18233 | // Bitcast from element-wise VMOV or VMVN doesn't need VREV if the VREV that | |||
18234 | // would be generated is at least the width of the element type. | |||
18235 | EVT SrcVT = Src.getValueType(); | |||
18236 | if ((Src.getOpcode() == ARMISD::VMOVIMM || | |||
18237 | Src.getOpcode() == ARMISD::VMVNIMM || | |||
18238 | Src.getOpcode() == ARMISD::VMOVFPIMM) && | |||
18239 | SrcVT.getScalarSizeInBits() <= DstVT.getScalarSizeInBits() && | |||
18240 | DAG.getDataLayout().isBigEndian()) | |||
18241 | return DAG.getNode(ARMISD::VECTOR_REG_CAST, SDLoc(N), DstVT, Src); | |||
18242 | ||||
18243 | // bitcast(extract(x, n)); bitcast(extract(x, n+1)) -> VMOVRRD x | |||
18244 | if (SDValue R = PerformExtractEltToVMOVRRD(N, DCI)) | |||
18245 | return R; | |||
18246 | ||||
18247 | return SDValue(); | |||
18248 | } | |||
18249 | ||||
18250 | // Some combines for the MVETrunc truncations legalizer helper. Also lowers the | |||
18251 | // node into stack operations after legalizeOps. | |||
18252 | SDValue ARMTargetLowering::PerformMVETruncCombine( | |||
18253 | SDNode *N, TargetLowering::DAGCombinerInfo &DCI) const { | |||
18254 | SelectionDAG &DAG = DCI.DAG; | |||
18255 | EVT VT = N->getValueType(0); | |||
18256 | SDLoc DL(N); | |||
18257 | ||||
18258 | // MVETrunc(Undef, Undef) -> Undef | |||
18259 | if (all_of(N->ops(), [](SDValue Op) { return Op.isUndef(); })) | |||
18260 | return DAG.getUNDEF(VT); | |||
18261 | ||||
18262 | // MVETrunc(MVETrunc a b, MVETrunc c, d) -> MVETrunc | |||
18263 | if (N->getNumOperands() == 2 && | |||
18264 | N->getOperand(0).getOpcode() == ARMISD::MVETRUNC && | |||
18265 | N->getOperand(1).getOpcode() == ARMISD::MVETRUNC) | |||
18266 | return DAG.getNode(ARMISD::MVETRUNC, DL, VT, N->getOperand(0).getOperand(0), | |||
18267 | N->getOperand(0).getOperand(1), | |||
18268 | N->getOperand(1).getOperand(0), | |||
18269 | N->getOperand(1).getOperand(1)); | |||
18270 | ||||
18271 | // MVETrunc(shuffle, shuffle) -> VMOVN | |||
18272 | if (N->getNumOperands() == 2 && | |||
18273 | N->getOperand(0).getOpcode() == ISD::VECTOR_SHUFFLE && | |||
18274 | N->getOperand(1).getOpcode() == ISD::VECTOR_SHUFFLE) { | |||
18275 | auto *S0 = cast<ShuffleVectorSDNode>(N->getOperand(0).getNode()); | |||
18276 | auto *S1 = cast<ShuffleVectorSDNode>(N->getOperand(1).getNode()); | |||
18277 | ||||
18278 | if (S0->getOperand(0) == S1->getOperand(0) && | |||
18279 | S0->getOperand(1) == S1->getOperand(1)) { | |||
18280 | // Construct complete shuffle mask | |||
18281 | SmallVector<int, 8> Mask(S0->getMask().begin(), S0->getMask().end()); | |||
18282 | Mask.append(S1->getMask().begin(), S1->getMask().end()); | |||
18283 | ||||
18284 | if (isVMOVNTruncMask(Mask, VT, false)) | |||
18285 | return DAG.getNode( | |||
18286 | ARMISD::VMOVN, DL, VT, | |||
18287 | DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, S0->getOperand(0)), | |||
18288 | DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, S0->getOperand(1)), | |||
18289 | DAG.getConstant(1, DL, MVT::i32)); | |||
18290 | if (isVMOVNTruncMask(Mask, VT, true)) | |||
18291 | return DAG.getNode( | |||
18292 | ARMISD::VMOVN, DL, VT, | |||
18293 | DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, S0->getOperand(1)), | |||
18294 | DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, S0->getOperand(0)), | |||
18295 | DAG.getConstant(1, DL, MVT::i32)); | |||
18296 | } | |||
18297 | } | |||
18298 | ||||
18299 | // For MVETrunc of a buildvector or shuffle, it can be beneficial to lower the | |||
18300 | // truncate to a buildvector to allow the generic optimisations to kick in. | |||
18301 | if (all_of(N->ops(), [](SDValue Op) { | |||
18302 | return Op.getOpcode() == ISD::BUILD_VECTOR || | |||
18303 | Op.getOpcode() == ISD::VECTOR_SHUFFLE || | |||
18304 | (Op.getOpcode() == ISD::BITCAST && | |||
18305 | Op.getOperand(0).getOpcode() == ISD::BUILD_VECTOR); | |||
18306 | })) { | |||
18307 | SmallVector<SDValue, 8> Extracts; | |||
18308 | for (unsigned Op = 0; Op < N->getNumOperands(); Op++) { | |||
18309 | SDValue O = N->getOperand(Op); | |||
18310 | for (unsigned i = 0; i < O.getValueType().getVectorNumElements(); i++) { | |||
18311 | SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, O, | |||
18312 | DAG.getConstant(i, DL, MVT::i32)); | |||
18313 | Extracts.push_back(Ext); | |||
18314 | } | |||
18315 | } | |||
18316 | return DAG.getBuildVector(VT, DL, Extracts); | |||
18317 | } | |||
18318 | ||||
18319 | // If we are late in the legalization process and nothing has optimised | |||
18320 | // the trunc to anything better, lower it to a stack store and reload, | |||
18321 | // performing the truncation whilst keeping the lanes in the correct order: | |||
18322 | // VSTRH.32 a, stack; VSTRH.32 b, stack+8; VLDRW.32 stack; | |||
18323 | if (!DCI.isAfterLegalizeDAG()) | |||
18324 | return SDValue(); | |||
18325 | ||||
18326 | SDValue StackPtr = DAG.CreateStackTemporary(TypeSize::Fixed(16), Align(4)); | |||
18327 | int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); | |||
18328 | int NumIns = N->getNumOperands(); | |||
18329 | assert((NumIns == 2 || NumIns == 4) &&(static_cast <bool> ((NumIns == 2 || NumIns == 4) && "Expected 2 or 4 inputs to an MVETrunc") ? void (0) : __assert_fail ("(NumIns == 2 || NumIns == 4) && \"Expected 2 or 4 inputs to an MVETrunc\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 18330, __extension__ __PRETTY_FUNCTION__)) | |||
18330 | "Expected 2 or 4 inputs to an MVETrunc")(static_cast <bool> ((NumIns == 2 || NumIns == 4) && "Expected 2 or 4 inputs to an MVETrunc") ? void (0) : __assert_fail ("(NumIns == 2 || NumIns == 4) && \"Expected 2 or 4 inputs to an MVETrunc\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 18330, __extension__ __PRETTY_FUNCTION__)); | |||
18331 | EVT StoreVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); | |||
18332 | if (N->getNumOperands() == 4) | |||
18333 | StoreVT = StoreVT.getHalfNumVectorElementsVT(*DAG.getContext()); | |||
18334 | ||||
18335 | SmallVector<SDValue> Chains; | |||
18336 | for (int I = 0; I < NumIns; I++) { | |||
18337 | SDValue Ptr = DAG.getNode( | |||
18338 | ISD::ADD, DL, StackPtr.getValueType(), StackPtr, | |||
18339 | DAG.getConstant(I * 16 / NumIns, DL, StackPtr.getValueType())); | |||
18340 | MachinePointerInfo MPI = MachinePointerInfo::getFixedStack( | |||
18341 | DAG.getMachineFunction(), SPFI, I * 16 / NumIns); | |||
18342 | SDValue Ch = DAG.getTruncStore(DAG.getEntryNode(), DL, N->getOperand(I), | |||
18343 | Ptr, MPI, StoreVT, Align(4)); | |||
18344 | Chains.push_back(Ch); | |||
18345 | } | |||
18346 | ||||
18347 | SDValue Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); | |||
18348 | MachinePointerInfo MPI = | |||
18349 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI, 0); | |||
18350 | return DAG.getLoad(VT, DL, Chain, StackPtr, MPI, Align(4)); | |||
18351 | } | |||
18352 | ||||
18353 | // Take a MVEEXT(load x) and split that into (extload x, extload x+8) | |||
18354 | static SDValue PerformSplittingMVEEXTToWideningLoad(SDNode *N, | |||
18355 | SelectionDAG &DAG) { | |||
18356 | SDValue N0 = N->getOperand(0); | |||
18357 | LoadSDNode *LD = dyn_cast<LoadSDNode>(N0.getNode()); | |||
18358 | if (!LD || !LD->isSimple() || !N0.hasOneUse() || LD->isIndexed()) | |||
18359 | return SDValue(); | |||
18360 | ||||
18361 | EVT FromVT = LD->getMemoryVT(); | |||
18362 | EVT ToVT = N->getValueType(0); | |||
18363 | if (!ToVT.isVector()) | |||
18364 | return SDValue(); | |||
18365 | assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements() * 2)(static_cast <bool> (FromVT.getVectorNumElements() == ToVT .getVectorNumElements() * 2) ? void (0) : __assert_fail ("FromVT.getVectorNumElements() == ToVT.getVectorNumElements() * 2" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 18365, __extension__ __PRETTY_FUNCTION__)); | |||
18366 | EVT ToEltVT = ToVT.getVectorElementType(); | |||
18367 | EVT FromEltVT = FromVT.getVectorElementType(); | |||
18368 | ||||
18369 | unsigned NumElements = 0; | |||
18370 | if (ToEltVT == MVT::i32 && (FromEltVT == MVT::i16 || FromEltVT == MVT::i8)) | |||
18371 | NumElements = 4; | |||
18372 | if (ToEltVT == MVT::i16 && FromEltVT == MVT::i8) | |||
18373 | NumElements = 8; | |||
18374 | assert(NumElements != 0)(static_cast <bool> (NumElements != 0) ? void (0) : __assert_fail ("NumElements != 0", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 18374, __extension__ __PRETTY_FUNCTION__)); | |||
18375 | ||||
18376 | ISD::LoadExtType NewExtType = | |||
18377 | N->getOpcode() == ARMISD::MVESEXT ? ISD::SEXTLOAD : ISD::ZEXTLOAD; | |||
18378 | if (LD->getExtensionType() != ISD::NON_EXTLOAD && | |||
18379 | LD->getExtensionType() != ISD::EXTLOAD && | |||
18380 | LD->getExtensionType() != NewExtType) | |||
18381 | return SDValue(); | |||
18382 | ||||
18383 | LLVMContext &C = *DAG.getContext(); | |||
18384 | SDLoc DL(LD); | |||
18385 | // Details about the old load | |||
18386 | SDValue Ch = LD->getChain(); | |||
18387 | SDValue BasePtr = LD->getBasePtr(); | |||
18388 | Align Alignment = LD->getOriginalAlign(); | |||
18389 | MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags(); | |||
18390 | AAMDNodes AAInfo = LD->getAAInfo(); | |||
18391 | ||||
18392 | SDValue Offset = DAG.getUNDEF(BasePtr.getValueType()); | |||
18393 | EVT NewFromVT = EVT::getVectorVT( | |||
18394 | C, EVT::getIntegerVT(C, FromEltVT.getScalarSizeInBits()), NumElements); | |||
18395 | EVT NewToVT = EVT::getVectorVT( | |||
18396 | C, EVT::getIntegerVT(C, ToEltVT.getScalarSizeInBits()), NumElements); | |||
18397 | ||||
18398 | SmallVector<SDValue, 4> Loads; | |||
18399 | SmallVector<SDValue, 4> Chains; | |||
18400 | for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { | |||
18401 | unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8; | |||
18402 | SDValue NewPtr = | |||
18403 | DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::Fixed(NewOffset)); | |||
18404 | ||||
18405 | SDValue NewLoad = | |||
18406 | DAG.getLoad(ISD::UNINDEXED, NewExtType, NewToVT, DL, Ch, NewPtr, Offset, | |||
18407 | LD->getPointerInfo().getWithOffset(NewOffset), NewFromVT, | |||
18408 | Alignment, MMOFlags, AAInfo); | |||
18409 | Loads.push_back(NewLoad); | |||
18410 | Chains.push_back(SDValue(NewLoad.getNode(), 1)); | |||
18411 | } | |||
18412 | ||||
18413 | SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); | |||
18414 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewChain); | |||
18415 | return DAG.getMergeValues(Loads, DL); | |||
18416 | } | |||
18417 | ||||
18418 | // Perform combines for MVEEXT. If it has not be optimized to anything better | |||
18419 | // before lowering, it gets converted to stack store and extloads performing the | |||
18420 | // extend whilst still keeping the same lane ordering. | |||
18421 | SDValue ARMTargetLowering::PerformMVEExtCombine( | |||
18422 | SDNode *N, TargetLowering::DAGCombinerInfo &DCI) const { | |||
18423 | SelectionDAG &DAG = DCI.DAG; | |||
18424 | EVT VT = N->getValueType(0); | |||
18425 | SDLoc DL(N); | |||
18426 | assert(N->getNumValues() == 2 && "Expected MVEEXT with 2 elements")(static_cast <bool> (N->getNumValues() == 2 && "Expected MVEEXT with 2 elements") ? void (0) : __assert_fail ("N->getNumValues() == 2 && \"Expected MVEEXT with 2 elements\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 18426, __extension__ __PRETTY_FUNCTION__)); | |||
18427 | assert((VT == MVT::v4i32 || VT == MVT::v8i16) && "Unexpected MVEEXT type")(static_cast <bool> ((VT == MVT::v4i32 || VT == MVT::v8i16 ) && "Unexpected MVEEXT type") ? void (0) : __assert_fail ("(VT == MVT::v4i32 || VT == MVT::v8i16) && \"Unexpected MVEEXT type\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 18427, __extension__ __PRETTY_FUNCTION__)); | |||
18428 | ||||
18429 | EVT ExtVT = N->getOperand(0).getValueType().getHalfNumVectorElementsVT( | |||
18430 | *DAG.getContext()); | |||
18431 | auto Extend = [&](SDValue V) { | |||
18432 | SDValue VVT = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, V); | |||
18433 | return N->getOpcode() == ARMISD::MVESEXT | |||
18434 | ? DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, VVT, | |||
18435 | DAG.getValueType(ExtVT)) | |||
18436 | : DAG.getZeroExtendInReg(VVT, DL, ExtVT); | |||
18437 | }; | |||
18438 | ||||
18439 | // MVEEXT(VDUP) -> SIGN_EXTEND_INREG(VDUP) | |||
18440 | if (N->getOperand(0).getOpcode() == ARMISD::VDUP) { | |||
18441 | SDValue Ext = Extend(N->getOperand(0)); | |||
18442 | return DAG.getMergeValues({Ext, Ext}, DL); | |||
18443 | } | |||
18444 | ||||
18445 | // MVEEXT(shuffle) -> SIGN_EXTEND_INREG/ZERO_EXTEND_INREG | |||
18446 | if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(N->getOperand(0))) { | |||
18447 | ArrayRef<int> Mask = SVN->getMask(); | |||
18448 | assert(Mask.size() == 2 * VT.getVectorNumElements())(static_cast <bool> (Mask.size() == 2 * VT.getVectorNumElements ()) ? void (0) : __assert_fail ("Mask.size() == 2 * VT.getVectorNumElements()" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 18448, __extension__ __PRETTY_FUNCTION__)); | |||
18449 | assert(Mask.size() == SVN->getValueType(0).getVectorNumElements())(static_cast <bool> (Mask.size() == SVN->getValueType (0).getVectorNumElements()) ? void (0) : __assert_fail ("Mask.size() == SVN->getValueType(0).getVectorNumElements()" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 18449, __extension__ __PRETTY_FUNCTION__)); | |||
18450 | unsigned Rev = VT == MVT::v4i32 ? ARMISD::VREV32 : ARMISD::VREV16; | |||
18451 | SDValue Op0 = SVN->getOperand(0); | |||
18452 | SDValue Op1 = SVN->getOperand(1); | |||
18453 | ||||
18454 | auto CheckInregMask = [&](int Start, int Offset) { | |||
18455 | for (int Idx = 0, E = VT.getVectorNumElements(); Idx < E; ++Idx) | |||
18456 | if (Mask[Start + Idx] >= 0 && Mask[Start + Idx] != Idx * 2 + Offset) | |||
18457 | return false; | |||
18458 | return true; | |||
18459 | }; | |||
18460 | SDValue V0 = SDValue(N, 0); | |||
18461 | SDValue V1 = SDValue(N, 1); | |||
18462 | if (CheckInregMask(0, 0)) | |||
18463 | V0 = Extend(Op0); | |||
18464 | else if (CheckInregMask(0, 1)) | |||
18465 | V0 = Extend(DAG.getNode(Rev, DL, SVN->getValueType(0), Op0)); | |||
18466 | else if (CheckInregMask(0, Mask.size())) | |||
18467 | V0 = Extend(Op1); | |||
18468 | else if (CheckInregMask(0, Mask.size() + 1)) | |||
18469 | V0 = Extend(DAG.getNode(Rev, DL, SVN->getValueType(0), Op1)); | |||
18470 | ||||
18471 | if (CheckInregMask(VT.getVectorNumElements(), Mask.size())) | |||
18472 | V1 = Extend(Op1); | |||
18473 | else if (CheckInregMask(VT.getVectorNumElements(), Mask.size() + 1)) | |||
18474 | V1 = Extend(DAG.getNode(Rev, DL, SVN->getValueType(0), Op1)); | |||
18475 | else if (CheckInregMask(VT.getVectorNumElements(), 0)) | |||
18476 | V1 = Extend(Op0); | |||
18477 | else if (CheckInregMask(VT.getVectorNumElements(), 1)) | |||
18478 | V1 = Extend(DAG.getNode(Rev, DL, SVN->getValueType(0), Op0)); | |||
18479 | ||||
18480 | if (V0.getNode() != N || V1.getNode() != N) | |||
18481 | return DAG.getMergeValues({V0, V1}, DL); | |||
18482 | } | |||
18483 | ||||
18484 | // MVEEXT(load) -> extload, extload | |||
18485 | if (N->getOperand(0)->getOpcode() == ISD::LOAD) | |||
18486 | if (SDValue L = PerformSplittingMVEEXTToWideningLoad(N, DAG)) | |||
18487 | return L; | |||
18488 | ||||
18489 | if (!DCI.isAfterLegalizeDAG()) | |||
18490 | return SDValue(); | |||
18491 | ||||
18492 | // Lower to a stack store and reload: | |||
18493 | // VSTRW.32 a, stack; VLDRH.32 stack; VLDRH.32 stack+8; | |||
18494 | SDValue StackPtr = DAG.CreateStackTemporary(TypeSize::Fixed(16), Align(4)); | |||
18495 | int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); | |||
18496 | int NumOuts = N->getNumValues(); | |||
18497 | assert((NumOuts == 2 || NumOuts == 4) &&(static_cast <bool> ((NumOuts == 2 || NumOuts == 4) && "Expected 2 or 4 outputs to an MVEEXT") ? void (0) : __assert_fail ("(NumOuts == 2 || NumOuts == 4) && \"Expected 2 or 4 outputs to an MVEEXT\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 18498, __extension__ __PRETTY_FUNCTION__)) | |||
18498 | "Expected 2 or 4 outputs to an MVEEXT")(static_cast <bool> ((NumOuts == 2 || NumOuts == 4) && "Expected 2 or 4 outputs to an MVEEXT") ? void (0) : __assert_fail ("(NumOuts == 2 || NumOuts == 4) && \"Expected 2 or 4 outputs to an MVEEXT\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 18498, __extension__ __PRETTY_FUNCTION__)); | |||
18499 | EVT LoadVT = N->getOperand(0).getValueType().getHalfNumVectorElementsVT( | |||
18500 | *DAG.getContext()); | |||
18501 | if (N->getNumOperands() == 4) | |||
18502 | LoadVT = LoadVT.getHalfNumVectorElementsVT(*DAG.getContext()); | |||
18503 | ||||
18504 | MachinePointerInfo MPI = | |||
18505 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI, 0); | |||
18506 | SDValue Chain = DAG.getStore(DAG.getEntryNode(), DL, N->getOperand(0), | |||
18507 | StackPtr, MPI, Align(4)); | |||
18508 | ||||
18509 | SmallVector<SDValue> Loads; | |||
18510 | for (int I = 0; I < NumOuts; I++) { | |||
18511 | SDValue Ptr = DAG.getNode( | |||
18512 | ISD::ADD, DL, StackPtr.getValueType(), StackPtr, | |||
18513 | DAG.getConstant(I * 16 / NumOuts, DL, StackPtr.getValueType())); | |||
18514 | MachinePointerInfo MPI = MachinePointerInfo::getFixedStack( | |||
18515 | DAG.getMachineFunction(), SPFI, I * 16 / NumOuts); | |||
18516 | SDValue Load = DAG.getExtLoad( | |||
18517 | N->getOpcode() == ARMISD::MVESEXT ? ISD::SEXTLOAD : ISD::ZEXTLOAD, DL, | |||
18518 | VT, Chain, Ptr, MPI, LoadVT, Align(4)); | |||
18519 | Loads.push_back(Load); | |||
18520 | } | |||
18521 | ||||
18522 | return DAG.getMergeValues(Loads, DL); | |||
18523 | } | |||
18524 | ||||
18525 | SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, | |||
18526 | DAGCombinerInfo &DCI) const { | |||
18527 | switch (N->getOpcode()) { | |||
18528 | default: break; | |||
18529 | case ISD::SELECT_CC: | |||
18530 | case ISD::SELECT: return PerformSELECTCombine(N, DCI, Subtarget); | |||
18531 | case ISD::VSELECT: return PerformVSELECTCombine(N, DCI, Subtarget); | |||
18532 | case ISD::SETCC: return PerformVSetCCToVCTPCombine(N, DCI, Subtarget); | |||
18533 | case ISD::ABS: return PerformABSCombine(N, DCI, Subtarget); | |||
18534 | case ARMISD::ADDE: return PerformADDECombine(N, DCI, Subtarget); | |||
18535 | case ARMISD::UMLAL: return PerformUMLALCombine(N, DCI.DAG, Subtarget); | |||
18536 | case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); | |||
18537 | case ISD::SUB: return PerformSUBCombine(N, DCI, Subtarget); | |||
18538 | case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); | |||
18539 | case ISD::OR: return PerformORCombine(N, DCI, Subtarget); | |||
18540 | case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); | |||
18541 | case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); | |||
18542 | case ISD::BRCOND: | |||
18543 | case ISD::BR_CC: return PerformHWLoopCombine(N, DCI, Subtarget); | |||
18544 | case ARMISD::ADDC: | |||
18545 | case ARMISD::SUBC: return PerformAddcSubcCombine(N, DCI, Subtarget); | |||
18546 | case ARMISD::SUBE: return PerformAddeSubeCombine(N, DCI, Subtarget); | |||
18547 | case ARMISD::BFI: return PerformBFICombine(N, DCI.DAG); | |||
18548 | case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget); | |||
18549 | case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); | |||
18550 | case ARMISD::VMOVhr: return PerformVMOVhrCombine(N, DCI); | |||
18551 | case ARMISD::VMOVrh: return PerformVMOVrhCombine(N, DCI.DAG); | |||
18552 | case ISD::STORE: return PerformSTORECombine(N, DCI, Subtarget); | |||
18553 | case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget); | |||
18554 | case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); | |||
18555 | case ISD::EXTRACT_VECTOR_ELT: | |||
18556 | return PerformExtractEltCombine(N, DCI, Subtarget); | |||
18557 | case ISD::SIGN_EXTEND_INREG: return PerformSignExtendInregCombine(N, DCI.DAG); | |||
18558 | case ISD::INSERT_SUBVECTOR: return PerformInsertSubvectorCombine(N, DCI); | |||
18559 | case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); | |||
18560 | case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI, Subtarget); | |||
18561 | case ARMISD::VDUP: return PerformVDUPCombine(N, DCI.DAG, Subtarget); | |||
18562 | case ISD::FP_TO_SINT: | |||
18563 | case ISD::FP_TO_UINT: | |||
18564 | return PerformVCVTCombine(N, DCI.DAG, Subtarget); | |||
18565 | case ISD::FADD: | |||
18566 | return PerformFAddVSelectCombine(N, DCI.DAG, Subtarget); | |||
18567 | case ISD::FDIV: | |||
18568 | return PerformVDIVCombine(N, DCI.DAG, Subtarget); | |||
18569 | case ISD::INTRINSIC_WO_CHAIN: | |||
18570 | return PerformIntrinsicCombine(N, DCI); | |||
18571 | case ISD::SHL: | |||
18572 | case ISD::SRA: | |||
18573 | case ISD::SRL: | |||
18574 | return PerformShiftCombine(N, DCI, Subtarget); | |||
18575 | case ISD::SIGN_EXTEND: | |||
18576 | case ISD::ZERO_EXTEND: | |||
18577 | case ISD::ANY_EXTEND: | |||
18578 | return PerformExtendCombine(N, DCI.DAG, Subtarget); | |||
18579 | case ISD::FP_EXTEND: | |||
18580 | return PerformFPExtendCombine(N, DCI.DAG, Subtarget); | |||
18581 | case ISD::SMIN: | |||
18582 | case ISD::UMIN: | |||
18583 | case ISD::SMAX: | |||
18584 | case ISD::UMAX: | |||
18585 | return PerformMinMaxCombine(N, DCI.DAG, Subtarget); | |||
18586 | case ARMISD::CMOV: | |||
18587 | return PerformCMOVCombine(N, DCI.DAG); | |||
18588 | case ARMISD::BRCOND: | |||
18589 | return PerformBRCONDCombine(N, DCI.DAG); | |||
18590 | case ARMISD::CMPZ: | |||
18591 | return PerformCMPZCombine(N, DCI.DAG); | |||
18592 | case ARMISD::CSINC: | |||
18593 | case ARMISD::CSINV: | |||
18594 | case ARMISD::CSNEG: | |||
18595 | return PerformCSETCombine(N, DCI.DAG); | |||
18596 | case ISD::LOAD: | |||
18597 | return PerformLOADCombine(N, DCI, Subtarget); | |||
18598 | case ARMISD::VLD1DUP: | |||
18599 | case ARMISD::VLD2DUP: | |||
18600 | case ARMISD::VLD3DUP: | |||
18601 | case ARMISD::VLD4DUP: | |||
18602 | return PerformVLDCombine(N, DCI); | |||
18603 | case ARMISD::BUILD_VECTOR: | |||
18604 | return PerformARMBUILD_VECTORCombine(N, DCI); | |||
18605 | case ISD::BITCAST: | |||
18606 | return PerformBITCASTCombine(N, DCI, Subtarget); | |||
18607 | case ARMISD::PREDICATE_CAST: | |||
18608 | return PerformPREDICATE_CASTCombine(N, DCI); | |||
18609 | case ARMISD::VECTOR_REG_CAST: | |||
18610 | return PerformVECTOR_REG_CASTCombine(N, DCI.DAG, Subtarget); | |||
18611 | case ARMISD::MVETRUNC: | |||
18612 | return PerformMVETruncCombine(N, DCI); | |||
18613 | case ARMISD::MVESEXT: | |||
18614 | case ARMISD::MVEZEXT: | |||
18615 | return PerformMVEExtCombine(N, DCI); | |||
18616 | case ARMISD::VCMP: | |||
18617 | return PerformVCMPCombine(N, DCI.DAG, Subtarget); | |||
18618 | case ISD::VECREDUCE_ADD: | |||
18619 | return PerformVECREDUCE_ADDCombine(N, DCI.DAG, Subtarget); | |||
18620 | case ARMISD::VMOVN: | |||
18621 | return PerformVMOVNCombine(N, DCI); | |||
18622 | case ARMISD::VQMOVNs: | |||
18623 | case ARMISD::VQMOVNu: | |||
18624 | return PerformVQMOVNCombine(N, DCI); | |||
18625 | case ARMISD::ASRL: | |||
18626 | case ARMISD::LSRL: | |||
18627 | case ARMISD::LSLL: | |||
18628 | return PerformLongShiftCombine(N, DCI.DAG); | |||
18629 | case ARMISD::SMULWB: { | |||
18630 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); | |||
18631 | APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); | |||
18632 | if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) | |||
18633 | return SDValue(); | |||
18634 | break; | |||
18635 | } | |||
18636 | case ARMISD::SMULWT: { | |||
18637 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); | |||
18638 | APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16); | |||
18639 | if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) | |||
18640 | return SDValue(); | |||
18641 | break; | |||
18642 | } | |||
18643 | case ARMISD::SMLALBB: | |||
18644 | case ARMISD::QADD16b: | |||
18645 | case ARMISD::QSUB16b: | |||
18646 | case ARMISD::UQADD16b: | |||
18647 | case ARMISD::UQSUB16b: { | |||
18648 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); | |||
18649 | APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); | |||
18650 | if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || | |||
18651 | (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) | |||
18652 | return SDValue(); | |||
18653 | break; | |||
18654 | } | |||
18655 | case ARMISD::SMLALBT: { | |||
18656 | unsigned LowWidth = N->getOperand(0).getValueType().getSizeInBits(); | |||
18657 | APInt LowMask = APInt::getLowBitsSet(LowWidth, 16); | |||
18658 | unsigned HighWidth = N->getOperand(1).getValueType().getSizeInBits(); | |||
18659 | APInt HighMask = APInt::getHighBitsSet(HighWidth, 16); | |||
18660 | if ((SimplifyDemandedBits(N->getOperand(0), LowMask, DCI)) || | |||
18661 | (SimplifyDemandedBits(N->getOperand(1), HighMask, DCI))) | |||
18662 | return SDValue(); | |||
18663 | break; | |||
18664 | } | |||
18665 | case ARMISD::SMLALTB: { | |||
18666 | unsigned HighWidth = N->getOperand(0).getValueType().getSizeInBits(); | |||
18667 | APInt HighMask = APInt::getHighBitsSet(HighWidth, 16); | |||
18668 | unsigned LowWidth = N->getOperand(1).getValueType().getSizeInBits(); | |||
18669 | APInt LowMask = APInt::getLowBitsSet(LowWidth, 16); | |||
18670 | if ((SimplifyDemandedBits(N->getOperand(0), HighMask, DCI)) || | |||
18671 | (SimplifyDemandedBits(N->getOperand(1), LowMask, DCI))) | |||
18672 | return SDValue(); | |||
18673 | break; | |||
18674 | } | |||
18675 | case ARMISD::SMLALTT: { | |||
18676 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); | |||
18677 | APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16); | |||
18678 | if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || | |||
18679 | (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) | |||
18680 | return SDValue(); | |||
18681 | break; | |||
18682 | } | |||
18683 | case ARMISD::QADD8b: | |||
18684 | case ARMISD::QSUB8b: | |||
18685 | case ARMISD::UQADD8b: | |||
18686 | case ARMISD::UQSUB8b: { | |||
18687 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); | |||
18688 | APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8); | |||
18689 | if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || | |||
18690 | (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) | |||
18691 | return SDValue(); | |||
18692 | break; | |||
18693 | } | |||
18694 | case ISD::INTRINSIC_VOID: | |||
18695 | case ISD::INTRINSIC_W_CHAIN: | |||
18696 | switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { | |||
18697 | case Intrinsic::arm_neon_vld1: | |||
18698 | case Intrinsic::arm_neon_vld1x2: | |||
18699 | case Intrinsic::arm_neon_vld1x3: | |||
18700 | case Intrinsic::arm_neon_vld1x4: | |||
18701 | case Intrinsic::arm_neon_vld2: | |||
18702 | case Intrinsic::arm_neon_vld3: | |||
18703 | case Intrinsic::arm_neon_vld4: | |||
18704 | case Intrinsic::arm_neon_vld2lane: | |||
18705 | case Intrinsic::arm_neon_vld3lane: | |||
18706 | case Intrinsic::arm_neon_vld4lane: | |||
18707 | case Intrinsic::arm_neon_vld2dup: | |||
18708 | case Intrinsic::arm_neon_vld3dup: | |||
18709 | case Intrinsic::arm_neon_vld4dup: | |||
18710 | case Intrinsic::arm_neon_vst1: | |||
18711 | case Intrinsic::arm_neon_vst1x2: | |||
18712 | case Intrinsic::arm_neon_vst1x3: | |||
18713 | case Intrinsic::arm_neon_vst1x4: | |||
18714 | case Intrinsic::arm_neon_vst2: | |||
18715 | case Intrinsic::arm_neon_vst3: | |||
18716 | case Intrinsic::arm_neon_vst4: | |||
18717 | case Intrinsic::arm_neon_vst2lane: | |||
18718 | case Intrinsic::arm_neon_vst3lane: | |||
18719 | case Intrinsic::arm_neon_vst4lane: | |||
18720 | return PerformVLDCombine(N, DCI); | |||
18721 | case Intrinsic::arm_mve_vld2q: | |||
18722 | case Intrinsic::arm_mve_vld4q: | |||
18723 | case Intrinsic::arm_mve_vst2q: | |||
18724 | case Intrinsic::arm_mve_vst4q: | |||
18725 | return PerformMVEVLDCombine(N, DCI); | |||
18726 | default: break; | |||
18727 | } | |||
18728 | break; | |||
18729 | } | |||
18730 | return SDValue(); | |||
18731 | } | |||
18732 | ||||
18733 | bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, | |||
18734 | EVT VT) const { | |||
18735 | return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); | |||
18736 | } | |||
18737 | ||||
18738 | bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, | |||
18739 | Align Alignment, | |||
18740 | MachineMemOperand::Flags, | |||
18741 | bool *Fast) const { | |||
18742 | // Depends what it gets converted into if the type is weird. | |||
18743 | if (!VT.isSimple()) | |||
18744 | return false; | |||
18745 | ||||
18746 | // The AllowsUnaligned flag models the SCTLR.A setting in ARM cpus | |||
18747 | bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); | |||
18748 | auto Ty = VT.getSimpleVT().SimpleTy; | |||
18749 | ||||
18750 | if (Ty == MVT::i8 || Ty == MVT::i16 || Ty == MVT::i32) { | |||
18751 | // Unaligned access can use (for example) LRDB, LRDH, LDR | |||
18752 | if (AllowsUnaligned) { | |||
18753 | if (Fast) | |||
18754 | *Fast = Subtarget->hasV7Ops(); | |||
18755 | return true; | |||
18756 | } | |||
18757 | } | |||
18758 | ||||
18759 | if (Ty == MVT::f64 || Ty == MVT::v2f64) { | |||
18760 | // For any little-endian targets with neon, we can support unaligned ld/st | |||
18761 | // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. | |||
18762 | // A big-endian target may also explicitly support unaligned accesses | |||
18763 | if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) { | |||
18764 | if (Fast) | |||
18765 | *Fast = true; | |||
18766 | return true; | |||
18767 | } | |||
18768 | } | |||
18769 | ||||
18770 | if (!Subtarget->hasMVEIntegerOps()) | |||
18771 | return false; | |||
18772 | ||||
18773 | // These are for predicates | |||
18774 | if ((Ty == MVT::v16i1 || Ty == MVT::v8i1 || Ty == MVT::v4i1 || | |||
18775 | Ty == MVT::v2i1)) { | |||
18776 | if (Fast) | |||
18777 | *Fast = true; | |||
18778 | return true; | |||
18779 | } | |||
18780 | ||||
18781 | // These are for truncated stores/narrowing loads. They are fine so long as | |||
18782 | // the alignment is at least the size of the item being loaded | |||
18783 | if ((Ty == MVT::v4i8 || Ty == MVT::v8i8 || Ty == MVT::v4i16) && | |||
18784 | Alignment >= VT.getScalarSizeInBits() / 8) { | |||
18785 | if (Fast) | |||
18786 | *Fast = true; | |||
18787 | return true; | |||
18788 | } | |||
18789 | ||||
18790 | // In little-endian MVE, the store instructions VSTRB.U8, VSTRH.U16 and | |||
18791 | // VSTRW.U32 all store the vector register in exactly the same format, and | |||
18792 | // differ only in the range of their immediate offset field and the required | |||
18793 | // alignment. So there is always a store that can be used, regardless of | |||
18794 | // actual type. | |||
18795 | // | |||
18796 | // For big endian, that is not the case. But can still emit a (VSTRB.U8; | |||
18797 | // VREV64.8) pair and get the same effect. This will likely be better than | |||
18798 | // aligning the vector through the stack. | |||
18799 | if (Ty == MVT::v16i8 || Ty == MVT::v8i16 || Ty == MVT::v8f16 || | |||
18800 | Ty == MVT::v4i32 || Ty == MVT::v4f32 || Ty == MVT::v2i64 || | |||
18801 | Ty == MVT::v2f64) { | |||
18802 | if (Fast) | |||
18803 | *Fast = true; | |||
18804 | return true; | |||
18805 | } | |||
18806 | ||||
18807 | return false; | |||
18808 | } | |||
18809 | ||||
18810 | ||||
18811 | EVT ARMTargetLowering::getOptimalMemOpType( | |||
18812 | const MemOp &Op, const AttributeList &FuncAttributes) const { | |||
18813 | // See if we can use NEON instructions for this... | |||
18814 | if ((Op.isMemcpy() || Op.isZeroMemset()) && Subtarget->hasNEON() && | |||
18815 | !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat)) { | |||
18816 | bool Fast; | |||
18817 | if (Op.size() >= 16 && | |||
18818 | (Op.isAligned(Align(16)) || | |||
18819 | (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, Align(1), | |||
18820 | MachineMemOperand::MONone, &Fast) && | |||
18821 | Fast))) { | |||
18822 | return MVT::v2f64; | |||
18823 | } else if (Op.size() >= 8 && | |||
18824 | (Op.isAligned(Align(8)) || | |||
18825 | (allowsMisalignedMemoryAccesses( | |||
18826 | MVT::f64, 0, Align(1), MachineMemOperand::MONone, &Fast) && | |||
18827 | Fast))) { | |||
18828 | return MVT::f64; | |||
18829 | } | |||
18830 | } | |||
18831 | ||||
18832 | // Let the target-independent logic figure it out. | |||
18833 | return MVT::Other; | |||
18834 | } | |||
18835 | ||||
18836 | // 64-bit integers are split into their high and low parts and held in two | |||
18837 | // different registers, so the trunc is free since the low register can just | |||
18838 | // be used. | |||
18839 | bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { | |||
18840 | if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) | |||
18841 | return false; | |||
18842 | unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); | |||
18843 | unsigned DestBits = DstTy->getPrimitiveSizeInBits(); | |||
18844 | return (SrcBits == 64 && DestBits == 32); | |||
18845 | } | |||
18846 | ||||
18847 | bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { | |||
18848 | if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() || | |||
18849 | !DstVT.isInteger()) | |||
18850 | return false; | |||
18851 | unsigned SrcBits = SrcVT.getSizeInBits(); | |||
18852 | unsigned DestBits = DstVT.getSizeInBits(); | |||
18853 | return (SrcBits == 64 && DestBits == 32); | |||
18854 | } | |||
18855 | ||||
18856 | bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { | |||
18857 | if (Val.getOpcode() != ISD::LOAD) | |||
18858 | return false; | |||
18859 | ||||
18860 | EVT VT1 = Val.getValueType(); | |||
18861 | if (!VT1.isSimple() || !VT1.isInteger() || | |||
18862 | !VT2.isSimple() || !VT2.isInteger()) | |||
18863 | return false; | |||
18864 | ||||
18865 | switch (VT1.getSimpleVT().SimpleTy) { | |||
18866 | default: break; | |||
18867 | case MVT::i1: | |||
18868 | case MVT::i8: | |||
18869 | case MVT::i16: | |||
18870 | // 8-bit and 16-bit loads implicitly zero-extend to 32-bits. | |||
18871 | return true; | |||
18872 | } | |||
18873 | ||||
18874 | return false; | |||
18875 | } | |||
18876 | ||||
18877 | bool ARMTargetLowering::isFNegFree(EVT VT) const { | |||
18878 | if (!VT.isSimple()) | |||
18879 | return false; | |||
18880 | ||||
18881 | // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that | |||
18882 | // negate values directly (fneg is free). So, we don't want to let the DAG | |||
18883 | // combiner rewrite fneg into xors and some other instructions. For f16 and | |||
18884 | // FullFP16 argument passing, some bitcast nodes may be introduced, | |||
18885 | // triggering this DAG combine rewrite, so we are avoiding that with this. | |||
18886 | switch (VT.getSimpleVT().SimpleTy) { | |||
18887 | default: break; | |||
18888 | case MVT::f16: | |||
18889 | return Subtarget->hasFullFP16(); | |||
18890 | } | |||
18891 | ||||
18892 | return false; | |||
18893 | } | |||
18894 | ||||
18895 | /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth | |||
18896 | /// of the vector elements. | |||
18897 | static bool areExtractExts(Value *Ext1, Value *Ext2) { | |||
18898 | auto areExtDoubled = [](Instruction *Ext) { | |||
18899 | return Ext->getType()->getScalarSizeInBits() == | |||
18900 | 2 * Ext->getOperand(0)->getType()->getScalarSizeInBits(); | |||
18901 | }; | |||
18902 | ||||
18903 | if (!match(Ext1, m_ZExtOrSExt(m_Value())) || | |||
18904 | !match(Ext2, m_ZExtOrSExt(m_Value())) || | |||
18905 | !areExtDoubled(cast<Instruction>(Ext1)) || | |||
18906 | !areExtDoubled(cast<Instruction>(Ext2))) | |||
18907 | return false; | |||
18908 | ||||
18909 | return true; | |||
18910 | } | |||
18911 | ||||
18912 | /// Check if sinking \p I's operands to I's basic block is profitable, because | |||
18913 | /// the operands can be folded into a target instruction, e.g. | |||
18914 | /// sext/zext can be folded into vsubl. | |||
18915 | bool ARMTargetLowering::shouldSinkOperands(Instruction *I, | |||
18916 | SmallVectorImpl<Use *> &Ops) const { | |||
18917 | if (!I->getType()->isVectorTy()) | |||
18918 | return false; | |||
18919 | ||||
18920 | if (Subtarget->hasNEON()) { | |||
18921 | switch (I->getOpcode()) { | |||
18922 | case Instruction::Sub: | |||
18923 | case Instruction::Add: { | |||
18924 | if (!areExtractExts(I->getOperand(0), I->getOperand(1))) | |||
18925 | return false; | |||
18926 | Ops.push_back(&I->getOperandUse(0)); | |||
18927 | Ops.push_back(&I->getOperandUse(1)); | |||
18928 | return true; | |||
18929 | } | |||
18930 | default: | |||
18931 | return false; | |||
18932 | } | |||
18933 | } | |||
18934 | ||||
18935 | if (!Subtarget->hasMVEIntegerOps()) | |||
18936 | return false; | |||
18937 | ||||
18938 | auto IsFMSMul = [&](Instruction *I) { | |||
18939 | if (!I->hasOneUse()) | |||
18940 | return false; | |||
18941 | auto *Sub = cast<Instruction>(*I->users().begin()); | |||
18942 | return Sub->getOpcode() == Instruction::FSub && Sub->getOperand(1) == I; | |||
18943 | }; | |||
18944 | auto IsFMS = [&](Instruction *I) { | |||
18945 | if (match(I->getOperand(0), m_FNeg(m_Value())) || | |||
18946 | match(I->getOperand(1), m_FNeg(m_Value()))) | |||
18947 | return true; | |||
18948 | return false; | |||
18949 | }; | |||
18950 | ||||
18951 | auto IsSinker = [&](Instruction *I, int Operand) { | |||
18952 | switch (I->getOpcode()) { | |||
18953 | case Instruction::Add: | |||
18954 | case Instruction::Mul: | |||
18955 | case Instruction::FAdd: | |||
18956 | case Instruction::ICmp: | |||
18957 | case Instruction::FCmp: | |||
18958 | return true; | |||
18959 | case Instruction::FMul: | |||
18960 | return !IsFMSMul(I); | |||
18961 | case Instruction::Sub: | |||
18962 | case Instruction::FSub: | |||
18963 | case Instruction::Shl: | |||
18964 | case Instruction::LShr: | |||
18965 | case Instruction::AShr: | |||
18966 | return Operand == 1; | |||
18967 | case Instruction::Call: | |||
18968 | if (auto *II = dyn_cast<IntrinsicInst>(I)) { | |||
18969 | switch (II->getIntrinsicID()) { | |||
18970 | case Intrinsic::fma: | |||
18971 | return !IsFMS(I); | |||
18972 | case Intrinsic::sadd_sat: | |||
18973 | case Intrinsic::uadd_sat: | |||
18974 | case Intrinsic::arm_mve_add_predicated: | |||
18975 | case Intrinsic::arm_mve_mul_predicated: | |||
18976 | case Intrinsic::arm_mve_qadd_predicated: | |||
18977 | case Intrinsic::arm_mve_vhadd: | |||
18978 | case Intrinsic::arm_mve_hadd_predicated: | |||
18979 | case Intrinsic::arm_mve_vqdmull: | |||
18980 | case Intrinsic::arm_mve_vqdmull_predicated: | |||
18981 | case Intrinsic::arm_mve_vqdmulh: | |||
18982 | case Intrinsic::arm_mve_qdmulh_predicated: | |||
18983 | case Intrinsic::arm_mve_vqrdmulh: | |||
18984 | case Intrinsic::arm_mve_qrdmulh_predicated: | |||
18985 | case Intrinsic::arm_mve_fma_predicated: | |||
18986 | return true; | |||
18987 | case Intrinsic::ssub_sat: | |||
18988 | case Intrinsic::usub_sat: | |||
18989 | case Intrinsic::arm_mve_sub_predicated: | |||
18990 | case Intrinsic::arm_mve_qsub_predicated: | |||
18991 | case Intrinsic::arm_mve_hsub_predicated: | |||
18992 | case Intrinsic::arm_mve_vhsub: | |||
18993 | return Operand == 1; | |||
18994 | default: | |||
18995 | return false; | |||
18996 | } | |||
18997 | } | |||
18998 | return false; | |||
18999 | default: | |||
19000 | return false; | |||
19001 | } | |||
19002 | }; | |||
19003 | ||||
19004 | for (auto OpIdx : enumerate(I->operands())) { | |||
19005 | Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get()); | |||
19006 | // Make sure we are not already sinking this operand | |||
19007 | if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; })) | |||
19008 | continue; | |||
19009 | ||||
19010 | Instruction *Shuffle = Op; | |||
19011 | if (Shuffle->getOpcode() == Instruction::BitCast) | |||
19012 | Shuffle = dyn_cast<Instruction>(Shuffle->getOperand(0)); | |||
19013 | // We are looking for a splat that can be sunk. | |||
19014 | if (!Shuffle || | |||
19015 | !match(Shuffle, m_Shuffle( | |||
19016 | m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()), | |||
19017 | m_Undef(), m_ZeroMask()))) | |||
19018 | continue; | |||
19019 | if (!IsSinker(I, OpIdx.index())) | |||
19020 | continue; | |||
19021 | ||||
19022 | // All uses of the shuffle should be sunk to avoid duplicating it across gpr | |||
19023 | // and vector registers | |||
19024 | for (Use &U : Op->uses()) { | |||
19025 | Instruction *Insn = cast<Instruction>(U.getUser()); | |||
19026 | if (!IsSinker(Insn, U.getOperandNo())) | |||
19027 | return false; | |||
19028 | } | |||
19029 | ||||
19030 | Ops.push_back(&Shuffle->getOperandUse(0)); | |||
19031 | if (Shuffle != Op) | |||
19032 | Ops.push_back(&Op->getOperandUse(0)); | |||
19033 | Ops.push_back(&OpIdx.value()); | |||
19034 | } | |||
19035 | return true; | |||
19036 | } | |||
19037 | ||||
19038 | Type *ARMTargetLowering::shouldConvertSplatType(ShuffleVectorInst *SVI) const { | |||
19039 | if (!Subtarget->hasMVEIntegerOps()) | |||
19040 | return nullptr; | |||
19041 | Type *SVIType = SVI->getType(); | |||
19042 | Type *ScalarType = SVIType->getScalarType(); | |||
19043 | ||||
19044 | if (ScalarType->isFloatTy()) | |||
19045 | return Type::getInt32Ty(SVIType->getContext()); | |||
19046 | if (ScalarType->isHalfTy()) | |||
19047 | return Type::getInt16Ty(SVIType->getContext()); | |||
19048 | return nullptr; | |||
19049 | } | |||
19050 | ||||
19051 | bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { | |||
19052 | EVT VT = ExtVal.getValueType(); | |||
19053 | ||||
19054 | if (!isTypeLegal(VT)) | |||
19055 | return false; | |||
19056 | ||||
19057 | if (auto *Ld = dyn_cast<MaskedLoadSDNode>(ExtVal.getOperand(0))) { | |||
19058 | if (Ld->isExpandingLoad()) | |||
19059 | return false; | |||
19060 | } | |||
19061 | ||||
19062 | if (Subtarget->hasMVEIntegerOps()) | |||
19063 | return true; | |||
19064 | ||||
19065 | // Don't create a loadext if we can fold the extension into a wide/long | |||
19066 | // instruction. | |||
19067 | // If there's more than one user instruction, the loadext is desirable no | |||
19068 | // matter what. There can be two uses by the same instruction. | |||
19069 | if (ExtVal->use_empty() || | |||
19070 | !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode())) | |||
19071 | return true; | |||
19072 | ||||
19073 | SDNode *U = *ExtVal->use_begin(); | |||
19074 | if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || | |||
19075 | U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHLIMM)) | |||
19076 | return false; | |||
19077 | ||||
19078 | return true; | |||
19079 | } | |||
19080 | ||||
19081 | bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { | |||
19082 | if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) | |||
19083 | return false; | |||
19084 | ||||
19085 | if (!isTypeLegal(EVT::getEVT(Ty1))) | |||
19086 | return false; | |||
19087 | ||||
19088 | assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop")(static_cast <bool> (Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop") ? void (0) : __assert_fail ("Ty1->getPrimitiveSizeInBits() <= 64 && \"i128 is probably not a noop\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 19088, __extension__ __PRETTY_FUNCTION__)); | |||
19089 | ||||
19090 | // Assuming the caller doesn't have a zeroext or signext return parameter, | |||
19091 | // truncation all the way down to i1 is valid. | |||
19092 | return true; | |||
19093 | } | |||
19094 | ||||
19095 | InstructionCost ARMTargetLowering::getScalingFactorCost(const DataLayout &DL, | |||
19096 | const AddrMode &AM, | |||
19097 | Type *Ty, | |||
19098 | unsigned AS) const { | |||
19099 | if (isLegalAddressingMode(DL, AM, Ty, AS)) { | |||
19100 | if (Subtarget->hasFPAO()) | |||
19101 | return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster | |||
19102 | return 0; | |||
19103 | } | |||
19104 | return -1; | |||
19105 | } | |||
19106 | ||||
19107 | /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster | |||
19108 | /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be | |||
19109 | /// expanded to FMAs when this method returns true, otherwise fmuladd is | |||
19110 | /// expanded to fmul + fadd. | |||
19111 | /// | |||
19112 | /// ARM supports both fused and unfused multiply-add operations; we already | |||
19113 | /// lower a pair of fmul and fadd to the latter so it's not clear that there | |||
19114 | /// would be a gain or that the gain would be worthwhile enough to risk | |||
19115 | /// correctness bugs. | |||
19116 | /// | |||
19117 | /// For MVE, we set this to true as it helps simplify the need for some | |||
19118 | /// patterns (and we don't have the non-fused floating point instruction). | |||
19119 | bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, | |||
19120 | EVT VT) const { | |||
19121 | if (!VT.isSimple()) | |||
19122 | return false; | |||
19123 | ||||
19124 | switch (VT.getSimpleVT().SimpleTy) { | |||
19125 | case MVT::v4f32: | |||
19126 | case MVT::v8f16: | |||
19127 | return Subtarget->hasMVEFloatOps(); | |||
19128 | case MVT::f16: | |||
19129 | return Subtarget->useFPVFMx16(); | |||
19130 | case MVT::f32: | |||
19131 | return Subtarget->useFPVFMx(); | |||
19132 | case MVT::f64: | |||
19133 | return Subtarget->useFPVFMx64(); | |||
19134 | default: | |||
19135 | break; | |||
19136 | } | |||
19137 | ||||
19138 | return false; | |||
19139 | } | |||
19140 | ||||
19141 | static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { | |||
19142 | if (V < 0) | |||
19143 | return false; | |||
19144 | ||||
19145 | unsigned Scale = 1; | |||
19146 | switch (VT.getSimpleVT().SimpleTy) { | |||
19147 | case MVT::i1: | |||
19148 | case MVT::i8: | |||
19149 | // Scale == 1; | |||
19150 | break; | |||
19151 | case MVT::i16: | |||
19152 | // Scale == 2; | |||
19153 | Scale = 2; | |||
19154 | break; | |||
19155 | default: | |||
19156 | // On thumb1 we load most things (i32, i64, floats, etc) with a LDR | |||
19157 | // Scale == 4; | |||
19158 | Scale = 4; | |||
19159 | break; | |||
19160 | } | |||
19161 | ||||
19162 | if ((V & (Scale - 1)) != 0) | |||
19163 | return false; | |||
19164 | return isUInt<5>(V / Scale); | |||
19165 | } | |||
19166 | ||||
19167 | static bool isLegalT2AddressImmediate(int64_t V, EVT VT, | |||
19168 | const ARMSubtarget *Subtarget) { | |||
19169 | if (!VT.isInteger() && !VT.isFloatingPoint()) | |||
19170 | return false; | |||
19171 | if (VT.isVector() && Subtarget->hasNEON()) | |||
19172 | return false; | |||
19173 | if (VT.isVector() && VT.isFloatingPoint() && Subtarget->hasMVEIntegerOps() && | |||
19174 | !Subtarget->hasMVEFloatOps()) | |||
19175 | return false; | |||
19176 | ||||
19177 | bool IsNeg = false; | |||
19178 | if (V < 0) { | |||
19179 | IsNeg = true; | |||
19180 | V = -V; | |||
19181 | } | |||
19182 | ||||
19183 | unsigned NumBytes = std::max((unsigned)VT.getSizeInBits() / 8, 1U); | |||
19184 | ||||
19185 | // MVE: size * imm7 | |||
19186 | if (VT.isVector() && Subtarget->hasMVEIntegerOps()) { | |||
19187 | switch (VT.getSimpleVT().getVectorElementType().SimpleTy) { | |||
19188 | case MVT::i32: | |||
19189 | case MVT::f32: | |||
19190 | return isShiftedUInt<7,2>(V); | |||
19191 | case MVT::i16: | |||
19192 | case MVT::f16: | |||
19193 | return isShiftedUInt<7,1>(V); | |||
19194 | case MVT::i8: | |||
19195 | return isUInt<7>(V); | |||
19196 | default: | |||
19197 | return false; | |||
19198 | } | |||
19199 | } | |||
19200 | ||||
19201 | // half VLDR: 2 * imm8 | |||
19202 | if (VT.isFloatingPoint() && NumBytes == 2 && Subtarget->hasFPRegs16()) | |||
19203 | return isShiftedUInt<8, 1>(V); | |||
19204 | // VLDR and LDRD: 4 * imm8 | |||
19205 | if ((VT.isFloatingPoint() && Subtarget->hasVFP2Base()) || NumBytes == 8) | |||
19206 | return isShiftedUInt<8, 2>(V); | |||
19207 | ||||
19208 | if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) { | |||
19209 | // + imm12 or - imm8 | |||
19210 | if (IsNeg) | |||
19211 | return isUInt<8>(V); | |||
19212 | return isUInt<12>(V); | |||
19213 | } | |||
19214 | ||||
19215 | return false; | |||
19216 | } | |||
19217 | ||||
19218 | /// isLegalAddressImmediate - Return true if the integer value can be used | |||
19219 | /// as the offset of the target addressing mode for load / store of the | |||
19220 | /// given type. | |||
19221 | static bool isLegalAddressImmediate(int64_t V, EVT VT, | |||
19222 | const ARMSubtarget *Subtarget) { | |||
19223 | if (V == 0) | |||
19224 | return true; | |||
19225 | ||||
19226 | if (!VT.isSimple()) | |||
19227 | return false; | |||
19228 | ||||
19229 | if (Subtarget->isThumb1Only()) | |||
19230 | return isLegalT1AddressImmediate(V, VT); | |||
19231 | else if (Subtarget->isThumb2()) | |||
19232 | return isLegalT2AddressImmediate(V, VT, Subtarget); | |||
19233 | ||||
19234 | // ARM mode. | |||
19235 | if (V < 0) | |||
19236 | V = - V; | |||
19237 | switch (VT.getSimpleVT().SimpleTy) { | |||
19238 | default: return false; | |||
19239 | case MVT::i1: | |||
19240 | case MVT::i8: | |||
19241 | case MVT::i32: | |||
19242 | // +- imm12 | |||
19243 | return isUInt<12>(V); | |||
19244 | case MVT::i16: | |||
19245 | // +- imm8 | |||
19246 | return isUInt<8>(V); | |||
19247 | case MVT::f32: | |||
19248 | case MVT::f64: | |||
19249 | if (!Subtarget->hasVFP2Base()) // FIXME: NEON? | |||
19250 | return false; | |||
19251 | return isShiftedUInt<8, 2>(V); | |||
19252 | } | |||
19253 | } | |||
19254 | ||||
19255 | bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, | |||
19256 | EVT VT) const { | |||
19257 | int Scale = AM.Scale; | |||
19258 | if (Scale < 0) | |||
19259 | return false; | |||
19260 | ||||
19261 | switch (VT.getSimpleVT().SimpleTy) { | |||
19262 | default: return false; | |||
19263 | case MVT::i1: | |||
19264 | case MVT::i8: | |||
19265 | case MVT::i16: | |||
19266 | case MVT::i32: | |||
19267 | if (Scale == 1) | |||
19268 | return true; | |||
19269 | // r + r << imm | |||
19270 | Scale = Scale & ~1; | |||
19271 | return Scale == 2 || Scale == 4 || Scale == 8; | |||
19272 | case MVT::i64: | |||
19273 | // FIXME: What are we trying to model here? ldrd doesn't have an r + r | |||
19274 | // version in Thumb mode. | |||
19275 | // r + r | |||
19276 | if (Scale == 1) | |||
19277 | return true; | |||
19278 | // r * 2 (this can be lowered to r + r). | |||
19279 | if (!AM.HasBaseReg && Scale == 2) | |||
19280 | return true; | |||
19281 | return false; | |||
19282 | case MVT::isVoid: | |||
19283 | // Note, we allow "void" uses (basically, uses that aren't loads or | |||
19284 | // stores), because arm allows folding a scale into many arithmetic | |||
19285 | // operations. This should be made more precise and revisited later. | |||
19286 | ||||
19287 | // Allow r << imm, but the imm has to be a multiple of two. | |||
19288 | if (Scale & 1) return false; | |||
19289 | return isPowerOf2_32(Scale); | |||
19290 | } | |||
19291 | } | |||
19292 | ||||
19293 | bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM, | |||
19294 | EVT VT) const { | |||
19295 | const int Scale = AM.Scale; | |||
19296 | ||||
19297 | // Negative scales are not supported in Thumb1. | |||
19298 | if (Scale < 0) | |||
19299 | return false; | |||
19300 | ||||
19301 | // Thumb1 addressing modes do not support register scaling excepting the | |||
19302 | // following cases: | |||
19303 | // 1. Scale == 1 means no scaling. | |||
19304 | // 2. Scale == 2 this can be lowered to r + r if there is no base register. | |||
19305 | return (Scale == 1) || (!AM.HasBaseReg && Scale == 2); | |||
19306 | } | |||
19307 | ||||
19308 | /// isLegalAddressingMode - Return true if the addressing mode represented | |||
19309 | /// by AM is legal for this target, for a load/store of the specified type. | |||
19310 | bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL, | |||
19311 | const AddrMode &AM, Type *Ty, | |||
19312 | unsigned AS, Instruction *I) const { | |||
19313 | EVT VT = getValueType(DL, Ty, true); | |||
19314 | if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) | |||
19315 | return false; | |||
19316 | ||||
19317 | // Can never fold addr of global into load/store. | |||
19318 | if (AM.BaseGV) | |||
19319 | return false; | |||
19320 | ||||
19321 | switch (AM.Scale) { | |||
19322 | case 0: // no scale reg, must be "r+i" or "r", or "i". | |||
19323 | break; | |||
19324 | default: | |||
19325 | // ARM doesn't support any R+R*scale+imm addr modes. | |||
19326 | if (AM.BaseOffs) | |||
19327 | return false; | |||
19328 | ||||
19329 | if (!VT.isSimple()) | |||
19330 | return false; | |||
19331 | ||||
19332 | if (Subtarget->isThumb1Only()) | |||
19333 | return isLegalT1ScaledAddressingMode(AM, VT); | |||
19334 | ||||
19335 | if (Subtarget->isThumb2()) | |||
19336 | return isLegalT2ScaledAddressingMode(AM, VT); | |||
19337 | ||||
19338 | int Scale = AM.Scale; | |||
19339 | switch (VT.getSimpleVT().SimpleTy) { | |||
19340 | default: return false; | |||
19341 | case MVT::i1: | |||
19342 | case MVT::i8: | |||
19343 | case MVT::i32: | |||
19344 | if (Scale < 0) Scale = -Scale; | |||
19345 | if (Scale == 1) | |||
19346 | return true; | |||
19347 | // r + r << imm | |||
19348 | return isPowerOf2_32(Scale & ~1); | |||
19349 | case MVT::i16: | |||
19350 | case MVT::i64: | |||
19351 | // r +/- r | |||
19352 | if (Scale == 1 || (AM.HasBaseReg && Scale == -1)) | |||
19353 | return true; | |||
19354 | // r * 2 (this can be lowered to r + r). | |||
19355 | if (!AM.HasBaseReg && Scale == 2) | |||
19356 | return true; | |||
19357 | return false; | |||
19358 | ||||
19359 | case MVT::isVoid: | |||
19360 | // Note, we allow "void" uses (basically, uses that aren't loads or | |||
19361 | // stores), because arm allows folding a scale into many arithmetic | |||
19362 | // operations. This should be made more precise and revisited later. | |||
19363 | ||||
19364 | // Allow r << imm, but the imm has to be a multiple of two. | |||
19365 | if (Scale & 1) return false; | |||
19366 | return isPowerOf2_32(Scale); | |||
19367 | } | |||
19368 | } | |||
19369 | return true; | |||
19370 | } | |||
19371 | ||||
19372 | /// isLegalICmpImmediate - Return true if the specified immediate is legal | |||
19373 | /// icmp immediate, that is the target has icmp instructions which can compare | |||
19374 | /// a register against the immediate without having to materialize the | |||
19375 | /// immediate into a register. | |||
19376 | bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { | |||
19377 | // Thumb2 and ARM modes can use cmn for negative immediates. | |||
19378 | if (!Subtarget->isThumb()) | |||
19379 | return ARM_AM::getSOImmVal((uint32_t)Imm) != -1 || | |||
19380 | ARM_AM::getSOImmVal(-(uint32_t)Imm) != -1; | |||
19381 | if (Subtarget->isThumb2()) | |||
19382 | return ARM_AM::getT2SOImmVal((uint32_t)Imm) != -1 || | |||
19383 | ARM_AM::getT2SOImmVal(-(uint32_t)Imm) != -1; | |||
19384 | // Thumb1 doesn't have cmn, and only 8-bit immediates. | |||
19385 | return Imm >= 0 && Imm <= 255; | |||
19386 | } | |||
19387 | ||||
19388 | /// isLegalAddImmediate - Return true if the specified immediate is a legal add | |||
19389 | /// *or sub* immediate, that is the target has add or sub instructions which can | |||
19390 | /// add a register with the immediate without having to materialize the | |||
19391 | /// immediate into a register. | |||
19392 | bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { | |||
19393 | // Same encoding for add/sub, just flip the sign. | |||
19394 | int64_t AbsImm = std::abs(Imm); | |||
19395 | if (!Subtarget->isThumb()) | |||
19396 | return ARM_AM::getSOImmVal(AbsImm) != -1; | |||
19397 | if (Subtarget->isThumb2()) | |||
19398 | return ARM_AM::getT2SOImmVal(AbsImm) != -1; | |||
19399 | // Thumb1 only has 8-bit unsigned immediate. | |||
19400 | return AbsImm >= 0 && AbsImm <= 255; | |||
19401 | } | |||
19402 | ||||
19403 | // Return false to prevent folding | |||
19404 | // (mul (add r, c0), c1) -> (add (mul r, c1), c0*c1) in DAGCombine, | |||
19405 | // if the folding leads to worse code. | |||
19406 | bool ARMTargetLowering::isMulAddWithConstProfitable(SDValue AddNode, | |||
19407 | SDValue ConstNode) const { | |||
19408 | // Let the DAGCombiner decide for vector types and large types. | |||
19409 | const EVT VT = AddNode.getValueType(); | |||
19410 | if (VT.isVector() || VT.getScalarSizeInBits() > 32) | |||
19411 | return true; | |||
19412 | ||||
19413 | // It is worse if c0 is legal add immediate, while c1*c0 is not | |||
19414 | // and has to be composed by at least two instructions. | |||
19415 | const ConstantSDNode *C0Node = cast<ConstantSDNode>(AddNode.getOperand(1)); | |||
19416 | const ConstantSDNode *C1Node = cast<ConstantSDNode>(ConstNode); | |||
19417 | const int64_t C0 = C0Node->getSExtValue(); | |||
19418 | APInt CA = C0Node->getAPIntValue() * C1Node->getAPIntValue(); | |||
19419 | if (!isLegalAddImmediate(C0) || isLegalAddImmediate(CA.getSExtValue())) | |||
19420 | return true; | |||
19421 | if (ConstantMaterializationCost((unsigned)CA.getZExtValue(), Subtarget) > 1) | |||
19422 | return false; | |||
19423 | ||||
19424 | // Default to true and let the DAGCombiner decide. | |||
19425 | return true; | |||
19426 | } | |||
19427 | ||||
19428 | static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, | |||
19429 | bool isSEXTLoad, SDValue &Base, | |||
19430 | SDValue &Offset, bool &isInc, | |||
19431 | SelectionDAG &DAG) { | |||
19432 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) | |||
19433 | return false; | |||
19434 | ||||
19435 | if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { | |||
19436 | // AddressingMode 3 | |||
19437 | Base = Ptr->getOperand(0); | |||
19438 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { | |||
19439 | int RHSC = (int)RHS->getZExtValue(); | |||
19440 | if (RHSC < 0 && RHSC > -256) { | |||
19441 | assert(Ptr->getOpcode() == ISD::ADD)(static_cast <bool> (Ptr->getOpcode() == ISD::ADD) ? void (0) : __assert_fail ("Ptr->getOpcode() == ISD::ADD", "llvm/lib/Target/ARM/ARMISelLowering.cpp", 19441, __extension__ __PRETTY_FUNCTION__)); | |||
19442 | isInc = false; | |||
19443 | Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); | |||
19444 | return true; | |||
19445 | } | |||
19446 | } | |||
19447 | isInc = (Ptr->getOpcode() == ISD::ADD); | |||
19448 | Offset = Ptr->getOperand(1); | |||
19449 | return true; | |||
19450 | } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { | |||
19451 | // AddressingMode 2 | |||
19452 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { | |||
19453 | int RHSC = (int)RHS->getZExtValue(); | |||
19454 | if (RHSC < 0 && RHSC > -0x1000) { | |||
19455 | assert(Ptr->getOpcode() == ISD::ADD)(static_cast <bool> (Ptr->getOpcode() == ISD::ADD) ? void (0) : __assert_fail ("Ptr->getOpcode() == ISD::ADD", "llvm/lib/Target/ARM/ARMISelLowering.cpp", 19455, __extension__ __PRETTY_FUNCTION__)); | |||
19456 | isInc = false; | |||
19457 | Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); | |||
19458 | Base = Ptr->getOperand(0); | |||
19459 | return true; | |||
19460 | } | |||
19461 | } | |||
19462 | ||||
19463 | if (Ptr->getOpcode() == ISD::ADD) { | |||
19464 | isInc = true; | |||
19465 | ARM_AM::ShiftOpc ShOpcVal= | |||
19466 | ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); | |||
19467 | if (ShOpcVal != ARM_AM::no_shift) { | |||
19468 | Base = Ptr->getOperand(1); | |||
19469 | Offset = Ptr->getOperand(0); | |||
19470 | } else { | |||
19471 | Base = Ptr->getOperand(0); | |||
19472 | Offset = Ptr->getOperand(1); | |||
19473 | } | |||
19474 | return true; | |||
19475 | } | |||
19476 | ||||
19477 | isInc = (Ptr->getOpcode() == ISD::ADD); | |||
19478 | Base = Ptr->getOperand(0); | |||
19479 | Offset = Ptr->getOperand(1); | |||
19480 | return true; | |||
19481 | } | |||
19482 | ||||
19483 | // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. | |||
19484 | return false; | |||
19485 | } | |||
19486 | ||||
19487 | static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, | |||
19488 | bool isSEXTLoad, SDValue &Base, | |||
19489 | SDValue &Offset, bool &isInc, | |||
19490 | SelectionDAG &DAG) { | |||
19491 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) | |||
19492 | return false; | |||
19493 | ||||
19494 | Base = Ptr->getOperand(0); | |||
19495 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { | |||
19496 | int RHSC = (int)RHS->getZExtValue(); | |||
19497 | if (RHSC < 0 && RHSC > -0x100) { // 8 bits. | |||
19498 | assert(Ptr->getOpcode() == ISD::ADD)(static_cast <bool> (Ptr->getOpcode() == ISD::ADD) ? void (0) : __assert_fail ("Ptr->getOpcode() == ISD::ADD", "llvm/lib/Target/ARM/ARMISelLowering.cpp", 19498, __extension__ __PRETTY_FUNCTION__)); | |||
19499 | isInc = false; | |||
19500 | Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); | |||
19501 | return true; | |||
19502 | } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. | |||
19503 | isInc = Ptr->getOpcode() == ISD::ADD; | |||
19504 | Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0)); | |||
19505 | return true; | |||
19506 | } | |||
19507 | } | |||
19508 | ||||
19509 | return false; | |||
19510 | } | |||
19511 | ||||
19512 | static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, Align Alignment, | |||
19513 | bool isSEXTLoad, bool IsMasked, bool isLE, | |||
19514 | SDValue &Base, SDValue &Offset, | |||
19515 | bool &isInc, SelectionDAG &DAG) { | |||
19516 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) | |||
19517 | return false; | |||
19518 | if (!isa<ConstantSDNode>(Ptr->getOperand(1))) | |||
19519 | return false; | |||
19520 | ||||
19521 | // We allow LE non-masked loads to change the type (for example use a vldrb.8 | |||
19522 | // as opposed to a vldrw.32). This can allow extra addressing modes or | |||
19523 | // alignments for what is otherwise an equivalent instruction. | |||
19524 | bool CanChangeType = isLE && !IsMasked; | |||
19525 | ||||
19526 | ConstantSDNode *RHS = cast<ConstantSDNode>(Ptr->getOperand(1)); | |||
19527 | int RHSC = (int)RHS->getZExtValue(); | |||
19528 | ||||
19529 | auto IsInRange = [&](int RHSC, int Limit, int Scale) { | |||
19530 | if (RHSC < 0 && RHSC > -Limit * Scale && RHSC % Scale == 0) { | |||
19531 | assert(Ptr->getOpcode() == ISD::ADD)(static_cast <bool> (Ptr->getOpcode() == ISD::ADD) ? void (0) : __assert_fail ("Ptr->getOpcode() == ISD::ADD", "llvm/lib/Target/ARM/ARMISelLowering.cpp", 19531, __extension__ __PRETTY_FUNCTION__)); | |||
19532 | isInc = false; | |||
19533 | Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); | |||
19534 | return true; | |||
19535 | } else if (RHSC > 0 && RHSC < Limit * Scale && RHSC % Scale == 0) { | |||
19536 | isInc = Ptr->getOpcode() == ISD::ADD; | |||
19537 | Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0)); | |||
19538 | return true; | |||
19539 | } | |||
19540 | return false; | |||
19541 | }; | |||
19542 | ||||
19543 | // Try to find a matching instruction based on s/zext, Alignment, Offset and | |||
19544 | // (in BE/masked) type. | |||
19545 | Base = Ptr->getOperand(0); | |||
19546 | if (VT == MVT::v4i16) { | |||
19547 | if (Alignment >= 2 && IsInRange(RHSC, 0x80, 2)) | |||
19548 | return true; | |||
19549 | } else if (VT == MVT::v4i8 || VT == MVT::v8i8) { | |||
19550 | if (IsInRange(RHSC, 0x80, 1)) | |||
19551 | return true; | |||
19552 | } else if (Alignment >= 4 && | |||
19553 | (CanChangeType || VT == MVT::v4i32 || VT == MVT::v4f32) && | |||
19554 | IsInRange(RHSC, 0x80, 4)) | |||
19555 | return true; | |||
19556 | else if (Alignment >= 2 && | |||
19557 | (CanChangeType || VT == MVT::v8i16 || VT == MVT::v8f16) && | |||
19558 | IsInRange(RHSC, 0x80, 2)) | |||
19559 | return true; | |||
19560 | else if ((CanChangeType || VT == MVT::v16i8) && IsInRange(RHSC, 0x80, 1)) | |||
19561 | return true; | |||
19562 | return false; | |||
19563 | } | |||
19564 | ||||
19565 | /// getPreIndexedAddressParts - returns true by value, base pointer and | |||
19566 | /// offset pointer and addressing mode by reference if the node's address | |||
19567 | /// can be legally represented as pre-indexed load / store address. | |||
19568 | bool | |||
19569 | ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, | |||
19570 | SDValue &Offset, | |||
19571 | ISD::MemIndexedMode &AM, | |||
19572 | SelectionDAG &DAG) const { | |||
19573 | if (Subtarget->isThumb1Only()) | |||
19574 | return false; | |||
19575 | ||||
19576 | EVT VT; | |||
19577 | SDValue Ptr; | |||
19578 | Align Alignment; | |||
19579 | bool isSEXTLoad = false; | |||
19580 | bool IsMasked = false; | |||
19581 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { | |||
19582 | Ptr = LD->getBasePtr(); | |||
19583 | VT = LD->getMemoryVT(); | |||
19584 | Alignment = LD->getAlign(); | |||
19585 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; | |||
19586 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { | |||
19587 | Ptr = ST->getBasePtr(); | |||
19588 | VT = ST->getMemoryVT(); | |||
19589 | Alignment = ST->getAlign(); | |||
19590 | } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) { | |||
19591 | Ptr = LD->getBasePtr(); | |||
19592 | VT = LD->getMemoryVT(); | |||
19593 | Alignment = LD->getAlign(); | |||
19594 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; | |||
19595 | IsMasked = true; | |||
19596 | } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) { | |||
19597 | Ptr = ST->getBasePtr(); | |||
19598 | VT = ST->getMemoryVT(); | |||
19599 | Alignment = ST->getAlign(); | |||
19600 | IsMasked = true; | |||
19601 | } else | |||
19602 | return false; | |||
19603 | ||||
19604 | bool isInc; | |||
19605 | bool isLegal = false; | |||
19606 | if (VT.isVector()) | |||
19607 | isLegal = Subtarget->hasMVEIntegerOps() && | |||
19608 | getMVEIndexedAddressParts( | |||
19609 | Ptr.getNode(), VT, Alignment, isSEXTLoad, IsMasked, | |||
19610 | Subtarget->isLittle(), Base, Offset, isInc, DAG); | |||
19611 | else { | |||
19612 | if (Subtarget->isThumb2()) | |||
19613 | isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, | |||
19614 | Offset, isInc, DAG); | |||
19615 | else | |||
19616 | isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, | |||
19617 | Offset, isInc, DAG); | |||
19618 | } | |||
19619 | if (!isLegal) | |||
19620 | return false; | |||
19621 | ||||
19622 | AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; | |||
19623 | return true; | |||
19624 | } | |||
19625 | ||||
19626 | /// getPostIndexedAddressParts - returns true by value, base pointer and | |||
19627 | /// offset pointer and addressing mode by reference if this node can be | |||
19628 | /// combined with a load / store to form a post-indexed load / store. | |||
19629 | bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, | |||
19630 | SDValue &Base, | |||
19631 | SDValue &Offset, | |||
19632 | ISD::MemIndexedMode &AM, | |||
19633 | SelectionDAG &DAG) const { | |||
19634 | EVT VT; | |||
19635 | SDValue Ptr; | |||
19636 | Align Alignment; | |||
19637 | bool isSEXTLoad = false, isNonExt; | |||
19638 | bool IsMasked = false; | |||
19639 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { | |||
19640 | VT = LD->getMemoryVT(); | |||
19641 | Ptr = LD->getBasePtr(); | |||
19642 | Alignment = LD->getAlign(); | |||
19643 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; | |||
19644 | isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; | |||
19645 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { | |||
19646 | VT = ST->getMemoryVT(); | |||
19647 | Ptr = ST->getBasePtr(); | |||
19648 | Alignment = ST->getAlign(); | |||
19649 | isNonExt = !ST->isTruncatingStore(); | |||
19650 | } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) { | |||
19651 | VT = LD->getMemoryVT(); | |||
19652 | Ptr = LD->getBasePtr(); | |||
19653 | Alignment = LD->getAlign(); | |||
19654 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; | |||
19655 | isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; | |||
19656 | IsMasked = true; | |||
19657 | } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) { | |||
19658 | VT = ST->getMemoryVT(); | |||
19659 | Ptr = ST->getBasePtr(); | |||
19660 | Alignment = ST->getAlign(); | |||
19661 | isNonExt = !ST->isTruncatingStore(); | |||
19662 | IsMasked = true; | |||
19663 | } else | |||
19664 | return false; | |||
19665 | ||||
19666 | if (Subtarget->isThumb1Only()) { | |||
19667 | // Thumb-1 can do a limited post-inc load or store as an updating LDM. It | |||
19668 | // must be non-extending/truncating, i32, with an offset of 4. | |||
19669 | assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!")(static_cast <bool> (Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!") ? void (0) : __assert_fail ("Op->getValueType(0) == MVT::i32 && \"Non-i32 post-inc op?!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 19669, __extension__ __PRETTY_FUNCTION__)); | |||
19670 | if (Op->getOpcode() != ISD::ADD || !isNonExt) | |||
19671 | return false; | |||
19672 | auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1)); | |||
19673 | if (!RHS || RHS->getZExtValue() != 4) | |||
19674 | return false; | |||
19675 | if (Alignment < Align(4)) | |||
19676 | return false; | |||
19677 | ||||
19678 | Offset = Op->getOperand(1); | |||
19679 | Base = Op->getOperand(0); | |||
19680 | AM = ISD::POST_INC; | |||
19681 | return true; | |||
19682 | } | |||
19683 | ||||
19684 | bool isInc; | |||
19685 | bool isLegal = false; | |||
19686 | if (VT.isVector()) | |||
19687 | isLegal = Subtarget->hasMVEIntegerOps() && | |||
19688 | getMVEIndexedAddressParts(Op, VT, Alignment, isSEXTLoad, IsMasked, | |||
19689 | Subtarget->isLittle(), Base, Offset, | |||
19690 | isInc, DAG); | |||
19691 | else { | |||
19692 | if (Subtarget->isThumb2()) | |||
19693 | isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, | |||
19694 | isInc, DAG); | |||
19695 | else | |||
19696 | isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, | |||
19697 | isInc, DAG); | |||
19698 | } | |||
19699 | if (!isLegal) | |||
19700 | return false; | |||
19701 | ||||
19702 | if (Ptr != Base) { | |||
19703 | // Swap base ptr and offset to catch more post-index load / store when | |||
19704 | // it's legal. In Thumb2 mode, offset must be an immediate. | |||
19705 | if (Ptr == Offset && Op->getOpcode() == ISD::ADD && | |||
19706 | !Subtarget->isThumb2()) | |||
19707 | std::swap(Base, Offset); | |||
19708 | ||||
19709 | // Post-indexed load / store update the base pointer. | |||
19710 | if (Ptr != Base) | |||
19711 | return false; | |||
19712 | } | |||
19713 | ||||
19714 | AM = isInc ? ISD::POST_INC : ISD::POST_DEC; | |||
19715 | return true; | |||
19716 | } | |||
19717 | ||||
19718 | void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, | |||
19719 | KnownBits &Known, | |||
19720 | const APInt &DemandedElts, | |||
19721 | const SelectionDAG &DAG, | |||
19722 | unsigned Depth) const { | |||
19723 | unsigned BitWidth = Known.getBitWidth(); | |||
19724 | Known.resetAll(); | |||
19725 | switch (Op.getOpcode()) { | |||
19726 | default: break; | |||
19727 | case ARMISD::ADDC: | |||
19728 | case ARMISD::ADDE: | |||
19729 | case ARMISD::SUBC: | |||
19730 | case ARMISD::SUBE: | |||
19731 | // Special cases when we convert a carry to a boolean. | |||
19732 | if (Op.getResNo() == 0) { | |||
19733 | SDValue LHS = Op.getOperand(0); | |||
19734 | SDValue RHS = Op.getOperand(1); | |||
19735 | // (ADDE 0, 0, C) will give us a single bit. | |||
19736 | if (Op->getOpcode() == ARMISD::ADDE && isNullConstant(LHS) && | |||
19737 | isNullConstant(RHS)) { | |||
19738 | Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); | |||
19739 | return; | |||
19740 | } | |||
19741 | } | |||
19742 | break; | |||
19743 | case ARMISD::CMOV: { | |||
19744 | // Bits are known zero/one if known on the LHS and RHS. | |||
19745 | Known = DAG.computeKnownBits(Op.getOperand(0), Depth+1); | |||
19746 | if (Known.isUnknown()) | |||
19747 | return; | |||
19748 | ||||
19749 | KnownBits KnownRHS = DAG.computeKnownBits(Op.getOperand(1), Depth+1); | |||
19750 | Known = KnownBits::commonBits(Known, KnownRHS); | |||
19751 | return; | |||
19752 | } | |||
19753 | case ISD::INTRINSIC_W_CHAIN: { | |||
19754 | ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1)); | |||
19755 | Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue()); | |||
19756 | switch (IntID) { | |||
19757 | default: return; | |||
19758 | case Intrinsic::arm_ldaex: | |||
19759 | case Intrinsic::arm_ldrex: { | |||
19760 | EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT(); | |||
19761 | unsigned MemBits = VT.getScalarSizeInBits(); | |||
19762 | Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); | |||
19763 | return; | |||
19764 | } | |||
19765 | } | |||
19766 | } | |||
19767 | case ARMISD::BFI: { | |||
19768 | // Conservatively, we can recurse down the first operand | |||
19769 | // and just mask out all affected bits. | |||
19770 | Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); | |||
19771 | ||||
19772 | // The operand to BFI is already a mask suitable for removing the bits it | |||
19773 | // sets. | |||
19774 | ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2)); | |||
19775 | const APInt &Mask = CI->getAPIntValue(); | |||
19776 | Known.Zero &= Mask; | |||
19777 | Known.One &= Mask; | |||
19778 | return; | |||
19779 | } | |||
19780 | case ARMISD::VGETLANEs: | |||
19781 | case ARMISD::VGETLANEu: { | |||
19782 | const SDValue &SrcSV = Op.getOperand(0); | |||
19783 | EVT VecVT = SrcSV.getValueType(); | |||
19784 | assert(VecVT.isVector() && "VGETLANE expected a vector type")(static_cast <bool> (VecVT.isVector() && "VGETLANE expected a vector type" ) ? void (0) : __assert_fail ("VecVT.isVector() && \"VGETLANE expected a vector type\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 19784, __extension__ __PRETTY_FUNCTION__)); | |||
19785 | const unsigned NumSrcElts = VecVT.getVectorNumElements(); | |||
19786 | ConstantSDNode *Pos = cast<ConstantSDNode>(Op.getOperand(1).getNode()); | |||
19787 | assert(Pos->getAPIntValue().ult(NumSrcElts) &&(static_cast <bool> (Pos->getAPIntValue().ult(NumSrcElts ) && "VGETLANE index out of bounds") ? void (0) : __assert_fail ("Pos->getAPIntValue().ult(NumSrcElts) && \"VGETLANE index out of bounds\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 19788, __extension__ __PRETTY_FUNCTION__)) | |||
19788 | "VGETLANE index out of bounds")(static_cast <bool> (Pos->getAPIntValue().ult(NumSrcElts ) && "VGETLANE index out of bounds") ? void (0) : __assert_fail ("Pos->getAPIntValue().ult(NumSrcElts) && \"VGETLANE index out of bounds\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 19788, __extension__ __PRETTY_FUNCTION__)); | |||
19789 | unsigned Idx = Pos->getZExtValue(); | |||
19790 | APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx); | |||
19791 | Known = DAG.computeKnownBits(SrcSV, DemandedElt, Depth + 1); | |||
19792 | ||||
19793 | EVT VT = Op.getValueType(); | |||
19794 | const unsigned DstSz = VT.getScalarSizeInBits(); | |||
19795 | const unsigned SrcSz = VecVT.getVectorElementType().getSizeInBits(); | |||
19796 | (void)SrcSz; | |||
19797 | assert(SrcSz == Known.getBitWidth())(static_cast <bool> (SrcSz == Known.getBitWidth()) ? void (0) : __assert_fail ("SrcSz == Known.getBitWidth()", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 19797, __extension__ __PRETTY_FUNCTION__)); | |||
19798 | assert(DstSz > SrcSz)(static_cast <bool> (DstSz > SrcSz) ? void (0) : __assert_fail ("DstSz > SrcSz", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 19798, __extension__ __PRETTY_FUNCTION__)); | |||
19799 | if (Op.getOpcode() == ARMISD::VGETLANEs) | |||
19800 | Known = Known.sext(DstSz); | |||
19801 | else { | |||
19802 | Known = Known.zext(DstSz); | |||
19803 | } | |||
19804 | assert(DstSz == Known.getBitWidth())(static_cast <bool> (DstSz == Known.getBitWidth()) ? void (0) : __assert_fail ("DstSz == Known.getBitWidth()", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 19804, __extension__ __PRETTY_FUNCTION__)); | |||
19805 | break; | |||
19806 | } | |||
19807 | case ARMISD::VMOVrh: { | |||
19808 | KnownBits KnownOp = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); | |||
19809 | assert(KnownOp.getBitWidth() == 16)(static_cast <bool> (KnownOp.getBitWidth() == 16) ? void (0) : __assert_fail ("KnownOp.getBitWidth() == 16", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 19809, __extension__ __PRETTY_FUNCTION__)); | |||
19810 | Known = KnownOp.zext(32); | |||
19811 | break; | |||
19812 | } | |||
19813 | case ARMISD::CSINC: | |||
19814 | case ARMISD::CSINV: | |||
19815 | case ARMISD::CSNEG: { | |||
19816 | KnownBits KnownOp0 = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); | |||
19817 | KnownBits KnownOp1 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1); | |||
19818 | ||||
19819 | // The result is either: | |||
19820 | // CSINC: KnownOp0 or KnownOp1 + 1 | |||
19821 | // CSINV: KnownOp0 or ~KnownOp1 | |||
19822 | // CSNEG: KnownOp0 or KnownOp1 * -1 | |||
19823 | if (Op.getOpcode() == ARMISD::CSINC) | |||
19824 | KnownOp1 = KnownBits::computeForAddSub( | |||
19825 | true, false, KnownOp1, KnownBits::makeConstant(APInt(32, 1))); | |||
19826 | else if (Op.getOpcode() == ARMISD::CSINV) | |||
19827 | std::swap(KnownOp1.Zero, KnownOp1.One); | |||
19828 | else if (Op.getOpcode() == ARMISD::CSNEG) | |||
19829 | KnownOp1 = KnownBits::mul( | |||
19830 | KnownOp1, KnownBits::makeConstant(APInt(32, -1))); | |||
19831 | ||||
19832 | Known = KnownBits::commonBits(KnownOp0, KnownOp1); | |||
19833 | break; | |||
19834 | } | |||
19835 | } | |||
19836 | } | |||
19837 | ||||
19838 | bool ARMTargetLowering::targetShrinkDemandedConstant( | |||
19839 | SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, | |||
19840 | TargetLoweringOpt &TLO) const { | |||
19841 | // Delay optimization, so we don't have to deal with illegal types, or block | |||
19842 | // optimizations. | |||
19843 | if (!TLO.LegalOps) | |||
19844 | return false; | |||
19845 | ||||
19846 | // Only optimize AND for now. | |||
19847 | if (Op.getOpcode() != ISD::AND) | |||
19848 | return false; | |||
19849 | ||||
19850 | EVT VT = Op.getValueType(); | |||
19851 | ||||
19852 | // Ignore vectors. | |||
19853 | if (VT.isVector()) | |||
19854 | return false; | |||
19855 | ||||
19856 | assert(VT == MVT::i32 && "Unexpected integer type")(static_cast <bool> (VT == MVT::i32 && "Unexpected integer type" ) ? void (0) : __assert_fail ("VT == MVT::i32 && \"Unexpected integer type\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 19856, __extension__ __PRETTY_FUNCTION__)); | |||
19857 | ||||
19858 | // Make sure the RHS really is a constant. | |||
19859 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); | |||
19860 | if (!C) | |||
19861 | return false; | |||
19862 | ||||
19863 | unsigned Mask = C->getZExtValue(); | |||
19864 | ||||
19865 | unsigned Demanded = DemandedBits.getZExtValue(); | |||
19866 | unsigned ShrunkMask = Mask & Demanded; | |||
19867 | unsigned ExpandedMask = Mask | ~Demanded; | |||
19868 | ||||
19869 | // If the mask is all zeros, let the target-independent code replace the | |||
19870 | // result with zero. | |||
19871 | if (ShrunkMask == 0) | |||
19872 | return false; | |||
19873 | ||||
19874 | // If the mask is all ones, erase the AND. (Currently, the target-independent | |||
19875 | // code won't do this, so we have to do it explicitly to avoid an infinite | |||
19876 | // loop in obscure cases.) | |||
19877 | if (ExpandedMask == ~0U) | |||
19878 | return TLO.CombineTo(Op, Op.getOperand(0)); | |||
19879 | ||||
19880 | auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool { | |||
19881 | return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0; | |||
19882 | }; | |||
19883 | auto UseMask = [Mask, Op, VT, &TLO](unsigned NewMask) -> bool { | |||
19884 | if (NewMask == Mask) | |||
19885 | return true; | |||
19886 | SDLoc DL(Op); | |||
19887 | SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT); | |||
19888 | SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC); | |||
19889 | return TLO.CombineTo(Op, NewOp); | |||
19890 | }; | |||
19891 | ||||
19892 | // Prefer uxtb mask. | |||
19893 | if (IsLegalMask(0xFF)) | |||
19894 | return UseMask(0xFF); | |||
19895 | ||||
19896 | // Prefer uxth mask. | |||
19897 | if (IsLegalMask(0xFFFF)) | |||
19898 | return UseMask(0xFFFF); | |||
19899 | ||||
19900 | // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2. | |||
19901 | // FIXME: Prefer a contiguous sequence of bits for other optimizations. | |||
19902 | if (ShrunkMask < 256) | |||
19903 | return UseMask(ShrunkMask); | |||
19904 | ||||
19905 | // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2. | |||
19906 | // FIXME: Prefer a contiguous sequence of bits for other optimizations. | |||
19907 | if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256) | |||
19908 | return UseMask(ExpandedMask); | |||
19909 | ||||
19910 | // Potential improvements: | |||
19911 | // | |||
19912 | // We could try to recognize lsls+lsrs or lsrs+lsls pairs here. | |||
19913 | // We could try to prefer Thumb1 immediates which can be lowered to a | |||
19914 | // two-instruction sequence. | |||
19915 | // We could try to recognize more legal ARM/Thumb2 immediates here. | |||
19916 | ||||
19917 | return false; | |||
19918 | } | |||
19919 | ||||
19920 | bool ARMTargetLowering::SimplifyDemandedBitsForTargetNode( | |||
19921 | SDValue Op, const APInt &OriginalDemandedBits, | |||
19922 | const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, | |||
19923 | unsigned Depth) const { | |||
19924 | unsigned Opc = Op.getOpcode(); | |||
19925 | ||||
19926 | switch (Opc) { | |||
19927 | case ARMISD::ASRL: | |||
19928 | case ARMISD::LSRL: { | |||
19929 | // If this is result 0 and the other result is unused, see if the demand | |||
19930 | // bits allow us to shrink this long shift into a standard small shift in | |||
19931 | // the opposite direction. | |||
19932 | if (Op.getResNo() == 0 && !Op->hasAnyUseOfValue(1) && | |||
19933 | isa<ConstantSDNode>(Op->getOperand(2))) { | |||
19934 | unsigned ShAmt = Op->getConstantOperandVal(2); | |||
19935 | if (ShAmt < 32 && OriginalDemandedBits.isSubsetOf(APInt::getAllOnes(32) | |||
19936 | << (32 - ShAmt))) | |||
19937 | return TLO.CombineTo( | |||
19938 | Op, TLO.DAG.getNode( | |||
19939 | ISD::SHL, SDLoc(Op), MVT::i32, Op.getOperand(1), | |||
19940 | TLO.DAG.getConstant(32 - ShAmt, SDLoc(Op), MVT::i32))); | |||
19941 | } | |||
19942 | break; | |||
19943 | } | |||
19944 | } | |||
19945 | ||||
19946 | return TargetLowering::SimplifyDemandedBitsForTargetNode( | |||
19947 | Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth); | |||
19948 | } | |||
19949 | ||||
19950 | //===----------------------------------------------------------------------===// | |||
19951 | // ARM Inline Assembly Support | |||
19952 | //===----------------------------------------------------------------------===// | |||
19953 | ||||
19954 | bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { | |||
19955 | // Looking for "rev" which is V6+. | |||
19956 | if (!Subtarget->hasV6Ops()) | |||
19957 | return false; | |||
19958 | ||||
19959 | InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand()); | |||
19960 | std::string AsmStr = IA->getAsmString(); | |||
19961 | SmallVector<StringRef, 4> AsmPieces; | |||
19962 | SplitString(AsmStr, AsmPieces, ";\n"); | |||
19963 | ||||
19964 | switch (AsmPieces.size()) { | |||
19965 | default: return false; | |||
19966 | case 1: | |||
19967 | AsmStr = std::string(AsmPieces[0]); | |||
19968 | AsmPieces.clear(); | |||
19969 | SplitString(AsmStr, AsmPieces, " \t,"); | |||
19970 | ||||
19971 | // rev $0, $1 | |||
19972 | if (AsmPieces.size() == 3 && | |||
19973 | AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && | |||
19974 | IA->getConstraintString().compare(0, 4, "=l,l") == 0) { | |||
19975 | IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); | |||
19976 | if (Ty && Ty->getBitWidth() == 32) | |||
19977 | return IntrinsicLowering::LowerToByteSwap(CI); | |||
19978 | } | |||
19979 | break; | |||
19980 | } | |||
19981 | ||||
19982 | return false; | |||
19983 | } | |||
19984 | ||||
19985 | const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const { | |||
19986 | // At this point, we have to lower this constraint to something else, so we | |||
19987 | // lower it to an "r" or "w". However, by doing this we will force the result | |||
19988 | // to be in register, while the X constraint is much more permissive. | |||
19989 | // | |||
19990 | // Although we are correct (we are free to emit anything, without | |||
19991 | // constraints), we might break use cases that would expect us to be more | |||
19992 | // efficient and emit something else. | |||
19993 | if (!Subtarget->hasVFP2Base()) | |||
19994 | return "r"; | |||
19995 | if (ConstraintVT.isFloatingPoint()) | |||
19996 | return "w"; | |||
19997 | if (ConstraintVT.isVector() && Subtarget->hasNEON() && | |||
19998 | (ConstraintVT.getSizeInBits() == 64 || | |||
19999 | ConstraintVT.getSizeInBits() == 128)) | |||
20000 | return "w"; | |||
20001 | ||||
20002 | return "r"; | |||
20003 | } | |||
20004 | ||||
20005 | /// getConstraintType - Given a constraint letter, return the type of | |||
20006 | /// constraint it is for this target. | |||
20007 | ARMTargetLowering::ConstraintType | |||
20008 | ARMTargetLowering::getConstraintType(StringRef Constraint) const { | |||
20009 | unsigned S = Constraint.size(); | |||
20010 | if (S == 1) { | |||
20011 | switch (Constraint[0]) { | |||
20012 | default: break; | |||
20013 | case 'l': return C_RegisterClass; | |||
20014 | case 'w': return C_RegisterClass; | |||
20015 | case 'h': return C_RegisterClass; | |||
20016 | case 'x': return C_RegisterClass; | |||
20017 | case 't': return C_RegisterClass; | |||
20018 | case 'j': return C_Immediate; // Constant for movw. | |||
20019 | // An address with a single base register. Due to the way we | |||
20020 | // currently handle addresses it is the same as an 'r' memory constraint. | |||
20021 | case 'Q': return C_Memory; | |||
20022 | } | |||
20023 | } else if (S == 2) { | |||
20024 | switch (Constraint[0]) { | |||
20025 | default: break; | |||
20026 | case 'T': return C_RegisterClass; | |||
20027 | // All 'U+' constraints are addresses. | |||
20028 | case 'U': return C_Memory; | |||
20029 | } | |||
20030 | } | |||
20031 | return TargetLowering::getConstraintType(Constraint); | |||
20032 | } | |||
20033 | ||||
20034 | /// Examine constraint type and operand type and determine a weight value. | |||
20035 | /// This object must already have been set up with the operand type | |||
20036 | /// and the current alternative constraint selected. | |||
20037 | TargetLowering::ConstraintWeight | |||
20038 | ARMTargetLowering::getSingleConstraintMatchWeight( | |||
20039 | AsmOperandInfo &info, const char *constraint) const { | |||
20040 | ConstraintWeight weight = CW_Invalid; | |||
20041 | Value *CallOperandVal = info.CallOperandVal; | |||
20042 | // If we don't have a value, we can't do a match, | |||
20043 | // but allow it at the lowest weight. | |||
20044 | if (!CallOperandVal) | |||
20045 | return CW_Default; | |||
20046 | Type *type = CallOperandVal->getType(); | |||
20047 | // Look at the constraint type. | |||
20048 | switch (*constraint) { | |||
20049 | default: | |||
20050 | weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); | |||
20051 | break; | |||
20052 | case 'l': | |||
20053 | if (type->isIntegerTy()) { | |||
20054 | if (Subtarget->isThumb()) | |||
20055 | weight = CW_SpecificReg; | |||
20056 | else | |||
20057 | weight = CW_Register; | |||
20058 | } | |||
20059 | break; | |||
20060 | case 'w': | |||
20061 | if (type->isFloatingPointTy()) | |||
20062 | weight = CW_Register; | |||
20063 | break; | |||
20064 | } | |||
20065 | return weight; | |||
20066 | } | |||
20067 | ||||
20068 | using RCPair = std::pair<unsigned, const TargetRegisterClass *>; | |||
20069 | ||||
20070 | RCPair ARMTargetLowering::getRegForInlineAsmConstraint( | |||
20071 | const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { | |||
20072 | switch (Constraint.size()) { | |||
20073 | case 1: | |||
20074 | // GCC ARM Constraint Letters | |||
20075 | switch (Constraint[0]) { | |||
20076 | case 'l': // Low regs or general regs. | |||
20077 | if (Subtarget->isThumb()) | |||
20078 | return RCPair(0U, &ARM::tGPRRegClass); | |||
20079 | return RCPair(0U, &ARM::GPRRegClass); | |||
20080 | case 'h': // High regs or no regs. | |||
20081 | if (Subtarget->isThumb()) | |||
20082 | return RCPair(0U, &ARM::hGPRRegClass); | |||
20083 | break; | |||
20084 | case 'r': | |||
20085 | if (Subtarget->isThumb1Only()) | |||
20086 | return RCPair(0U, &ARM::tGPRRegClass); | |||
20087 | return RCPair(0U, &ARM::GPRRegClass); | |||
20088 | case 'w': | |||
20089 | if (VT == MVT::Other) | |||
20090 | break; | |||
20091 | if (VT == MVT::f32) | |||
20092 | return RCPair(0U, &ARM::SPRRegClass); | |||
20093 | if (VT.getSizeInBits() == 64) | |||
20094 | return RCPair(0U, &ARM::DPRRegClass); | |||
20095 | if (VT.getSizeInBits() == 128) | |||
20096 | return RCPair(0U, &ARM::QPRRegClass); | |||
20097 | break; | |||
20098 | case 'x': | |||
20099 | if (VT == MVT::Other) | |||
20100 | break; | |||
20101 | if (VT == MVT::f32) | |||
20102 | return RCPair(0U, &ARM::SPR_8RegClass); | |||
20103 | if (VT.getSizeInBits() == 64) | |||
20104 | return RCPair(0U, &ARM::DPR_8RegClass); | |||
20105 | if (VT.getSizeInBits() == 128) | |||
20106 | return RCPair(0U, &ARM::QPR_8RegClass); | |||
20107 | break; | |||
20108 | case 't': | |||
20109 | if (VT == MVT::Other) | |||
20110 | break; | |||
20111 | if (VT == MVT::f32 || VT == MVT::i32) | |||
20112 | return RCPair(0U, &ARM::SPRRegClass); | |||
20113 | if (VT.getSizeInBits() == 64) | |||
20114 | return RCPair(0U, &ARM::DPR_VFP2RegClass); | |||
20115 | if (VT.getSizeInBits() == 128) | |||
20116 | return RCPair(0U, &ARM::QPR_VFP2RegClass); | |||
20117 | break; | |||
20118 | } | |||
20119 | break; | |||
20120 | ||||
20121 | case 2: | |||
20122 | if (Constraint[0] == 'T') { | |||
20123 | switch (Constraint[1]) { | |||
20124 | default: | |||
20125 | break; | |||
20126 | case 'e': | |||
20127 | return RCPair(0U, &ARM::tGPREvenRegClass); | |||
20128 | case 'o': | |||
20129 | return RCPair(0U, &ARM::tGPROddRegClass); | |||
20130 | } | |||
20131 | } | |||
20132 | break; | |||
20133 | ||||
20134 | default: | |||
20135 | break; | |||
20136 | } | |||
20137 | ||||
20138 | if (StringRef("{cc}").equals_insensitive(Constraint)) | |||
20139 | return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass); | |||
20140 | ||||
20141 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); | |||
20142 | } | |||
20143 | ||||
20144 | /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops | |||
20145 | /// vector. If it is invalid, don't add anything to Ops. | |||
20146 | void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, | |||
20147 | std::string &Constraint, | |||
20148 | std::vector<SDValue>&Ops, | |||
20149 | SelectionDAG &DAG) const { | |||
20150 | SDValue Result; | |||
20151 | ||||
20152 | // Currently only support length 1 constraints. | |||
20153 | if (Constraint.length() != 1) return; | |||
20154 | ||||
20155 | char ConstraintLetter = Constraint[0]; | |||
20156 | switch (ConstraintLetter) { | |||
20157 | default: break; | |||
20158 | case 'j': | |||
20159 | case 'I': case 'J': case 'K': case 'L': | |||
20160 | case 'M': case 'N': case 'O': | |||
20161 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); | |||
20162 | if (!C) | |||
20163 | return; | |||
20164 | ||||
20165 | int64_t CVal64 = C->getSExtValue(); | |||
20166 | int CVal = (int) CVal64; | |||
20167 | // None of these constraints allow values larger than 32 bits. Check | |||
20168 | // that the value fits in an int. | |||
20169 | if (CVal != CVal64) | |||
20170 | return; | |||
20171 | ||||
20172 | switch (ConstraintLetter) { | |||
20173 | case 'j': | |||
20174 | // Constant suitable for movw, must be between 0 and | |||
20175 | // 65535. | |||
20176 | if (Subtarget->hasV6T2Ops() || (Subtarget->hasV8MBaselineOps())) | |||
20177 | if (CVal >= 0 && CVal <= 65535) | |||
20178 | break; | |||
20179 | return; | |||
20180 | case 'I': | |||
20181 | if (Subtarget->isThumb1Only()) { | |||
20182 | // This must be a constant between 0 and 255, for ADD | |||
20183 | // immediates. | |||
20184 | if (CVal >= 0 && CVal <= 255) | |||
20185 | break; | |||
20186 | } else if (Subtarget->isThumb2()) { | |||
20187 | // A constant that can be used as an immediate value in a | |||
20188 | // data-processing instruction. | |||
20189 | if (ARM_AM::getT2SOImmVal(CVal) != -1) | |||
20190 | break; | |||
20191 | } else { | |||
20192 | // A constant that can be used as an immediate value in a | |||
20193 | // data-processing instruction. | |||
20194 | if (ARM_AM::getSOImmVal(CVal) != -1) | |||
20195 | break; | |||
20196 | } | |||
20197 | return; | |||
20198 | ||||
20199 | case 'J': | |||
20200 | if (Subtarget->isThumb1Only()) { | |||
20201 | // This must be a constant between -255 and -1, for negated ADD | |||
20202 | // immediates. This can be used in GCC with an "n" modifier that | |||
20203 | // prints the negated value, for use with SUB instructions. It is | |||
20204 | // not useful otherwise but is implemented for compatibility. | |||
20205 | if (CVal >= -255 && CVal <= -1) | |||
20206 | break; | |||
20207 | } else { | |||
20208 | // This must be a constant between -4095 and 4095. It is not clear | |||
20209 | // what this constraint is intended for. Implemented for | |||
20210 | // compatibility with GCC. | |||
20211 | if (CVal >= -4095 && CVal <= 4095) | |||
20212 | break; | |||
20213 | } | |||
20214 | return; | |||
20215 | ||||
20216 | case 'K': | |||
20217 | if (Subtarget->isThumb1Only()) { | |||
20218 | // A 32-bit value where only one byte has a nonzero value. Exclude | |||
20219 | // zero to match GCC. This constraint is used by GCC internally for | |||
20220 | // constants that can be loaded with a move/shift combination. | |||
20221 | // It is not useful otherwise but is implemented for compatibility. | |||
20222 | if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) | |||
20223 | break; | |||
20224 | } else if (Subtarget->isThumb2()) { | |||
20225 | // A constant whose bitwise inverse can be used as an immediate | |||
20226 | // value in a data-processing instruction. This can be used in GCC | |||
20227 | // with a "B" modifier that prints the inverted value, for use with | |||
20228 | // BIC and MVN instructions. It is not useful otherwise but is | |||
20229 | // implemented for compatibility. | |||
20230 | if (ARM_AM::getT2SOImmVal(~CVal) != -1) | |||
20231 | break; | |||
20232 | } else { | |||
20233 | // A constant whose bitwise inverse can be used as an immediate | |||
20234 | // value in a data-processing instruction. This can be used in GCC | |||
20235 | // with a "B" modifier that prints the inverted value, for use with | |||
20236 | // BIC and MVN instructions. It is not useful otherwise but is | |||
20237 | // implemented for compatibility. | |||
20238 | if (ARM_AM::getSOImmVal(~CVal) != -1) | |||
20239 | break; | |||
20240 | } | |||
20241 | return; | |||
20242 | ||||
20243 | case 'L': | |||
20244 | if (Subtarget->isThumb1Only()) { | |||
20245 | // This must be a constant between -7 and 7, | |||
20246 | // for 3-operand ADD/SUB immediate instructions. | |||
20247 | if (CVal >= -7 && CVal < 7) | |||
20248 | break; | |||
20249 | } else if (Subtarget->isThumb2()) { | |||
20250 | // A constant whose negation can be used as an immediate value in a | |||
20251 | // data-processing instruction. This can be used in GCC with an "n" | |||
20252 | // modifier that prints the negated value, for use with SUB | |||
20253 | // instructions. It is not useful otherwise but is implemented for | |||
20254 | // compatibility. | |||
20255 | if (ARM_AM::getT2SOImmVal(-CVal) != -1) | |||
20256 | break; | |||
20257 | } else { | |||
20258 | // A constant whose negation can be used as an immediate value in a | |||
20259 | // data-processing instruction. This can be used in GCC with an "n" | |||
20260 | // modifier that prints the negated value, for use with SUB | |||
20261 | // instructions. It is not useful otherwise but is implemented for | |||
20262 | // compatibility. | |||
20263 | if (ARM_AM::getSOImmVal(-CVal) != -1) | |||
20264 | break; | |||
20265 | } | |||
20266 | return; | |||
20267 | ||||
20268 | case 'M': | |||
20269 | if (Subtarget->isThumb1Only()) { | |||
20270 | // This must be a multiple of 4 between 0 and 1020, for | |||
20271 | // ADD sp + immediate. | |||
20272 | if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) | |||
20273 | break; | |||
20274 | } else { | |||
20275 | // A power of two or a constant between 0 and 32. This is used in | |||
20276 | // GCC for the shift amount on shifted register operands, but it is | |||
20277 | // useful in general for any shift amounts. | |||
20278 | if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) | |||
20279 | break; | |||
20280 | } | |||
20281 | return; | |||
20282 | ||||
20283 | case 'N': | |||
20284 | if (Subtarget->isThumb1Only()) { | |||
20285 | // This must be a constant between 0 and 31, for shift amounts. | |||
20286 | if (CVal >= 0 && CVal <= 31) | |||
20287 | break; | |||
20288 | } | |||
20289 | return; | |||
20290 | ||||
20291 | case 'O': | |||
20292 | if (Subtarget->isThumb1Only()) { | |||
20293 | // This must be a multiple of 4 between -508 and 508, for | |||
20294 | // ADD/SUB sp = sp + immediate. | |||
20295 | if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) | |||
20296 | break; | |||
20297 | } | |||
20298 | return; | |||
20299 | } | |||
20300 | Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType()); | |||
20301 | break; | |||
20302 | } | |||
20303 | ||||
20304 | if (Result.getNode()) { | |||
20305 | Ops.push_back(Result); | |||
20306 | return; | |||
20307 | } | |||
20308 | return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); | |||
20309 | } | |||
20310 | ||||
20311 | static RTLIB::Libcall getDivRemLibcall( | |||
20312 | const SDNode *N, MVT::SimpleValueType SVT) { | |||
20313 | assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||(static_cast <bool> ((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && "Unhandled Opcode in getDivRemLibcall" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && \"Unhandled Opcode in getDivRemLibcall\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20315, __extension__ __PRETTY_FUNCTION__)) | |||
20314 | N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) &&(static_cast <bool> ((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && "Unhandled Opcode in getDivRemLibcall" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && \"Unhandled Opcode in getDivRemLibcall\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20315, __extension__ __PRETTY_FUNCTION__)) | |||
20315 | "Unhandled Opcode in getDivRemLibcall")(static_cast <bool> ((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && "Unhandled Opcode in getDivRemLibcall" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && \"Unhandled Opcode in getDivRemLibcall\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20315, __extension__ __PRETTY_FUNCTION__)); | |||
20316 | bool isSigned = N->getOpcode() == ISD::SDIVREM || | |||
20317 | N->getOpcode() == ISD::SREM; | |||
20318 | RTLIB::Libcall LC; | |||
20319 | switch (SVT) { | |||
20320 | default: llvm_unreachable("Unexpected request for libcall!")::llvm::llvm_unreachable_internal("Unexpected request for libcall!" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20320); | |||
20321 | case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; | |||
20322 | case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; | |||
20323 | case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; | |||
20324 | case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; | |||
20325 | } | |||
20326 | return LC; | |||
20327 | } | |||
20328 | ||||
20329 | static TargetLowering::ArgListTy getDivRemArgList( | |||
20330 | const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) { | |||
20331 | assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||(static_cast <bool> ((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && "Unhandled Opcode in getDivRemArgList" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && \"Unhandled Opcode in getDivRemArgList\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20333, __extension__ __PRETTY_FUNCTION__)) | |||
20332 | N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) &&(static_cast <bool> ((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && "Unhandled Opcode in getDivRemArgList" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && \"Unhandled Opcode in getDivRemArgList\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20333, __extension__ __PRETTY_FUNCTION__)) | |||
20333 | "Unhandled Opcode in getDivRemArgList")(static_cast <bool> ((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && "Unhandled Opcode in getDivRemArgList" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && \"Unhandled Opcode in getDivRemArgList\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20333, __extension__ __PRETTY_FUNCTION__)); | |||
20334 | bool isSigned = N->getOpcode() == ISD::SDIVREM || | |||
20335 | N->getOpcode() == ISD::SREM; | |||
20336 | TargetLowering::ArgListTy Args; | |||
20337 | TargetLowering::ArgListEntry Entry; | |||
20338 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { | |||
20339 | EVT ArgVT = N->getOperand(i).getValueType(); | |||
20340 | Type *ArgTy = ArgVT.getTypeForEVT(*Context); | |||
20341 | Entry.Node = N->getOperand(i); | |||
20342 | Entry.Ty = ArgTy; | |||
20343 | Entry.IsSExt = isSigned; | |||
20344 | Entry.IsZExt = !isSigned; | |||
20345 | Args.push_back(Entry); | |||
20346 | } | |||
20347 | if (Subtarget->isTargetWindows() && Args.size() >= 2) | |||
20348 | std::swap(Args[0], Args[1]); | |||
20349 | return Args; | |||
20350 | } | |||
20351 | ||||
20352 | SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { | |||
20353 | assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||(static_cast <bool> ((Subtarget->isTargetAEABI() || Subtarget ->isTargetAndroid() || Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || Subtarget->isTargetWindows ()) && "Register-based DivRem lowering only") ? void ( 0) : __assert_fail ("(Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || Subtarget->isTargetWindows()) && \"Register-based DivRem lowering only\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20356, __extension__ __PRETTY_FUNCTION__)) | |||
20354 | Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||(static_cast <bool> ((Subtarget->isTargetAEABI() || Subtarget ->isTargetAndroid() || Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || Subtarget->isTargetWindows ()) && "Register-based DivRem lowering only") ? void ( 0) : __assert_fail ("(Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || Subtarget->isTargetWindows()) && \"Register-based DivRem lowering only\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20356, __extension__ __PRETTY_FUNCTION__)) | |||
20355 | Subtarget->isTargetWindows()) &&(static_cast <bool> ((Subtarget->isTargetAEABI() || Subtarget ->isTargetAndroid() || Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || Subtarget->isTargetWindows ()) && "Register-based DivRem lowering only") ? void ( 0) : __assert_fail ("(Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || Subtarget->isTargetWindows()) && \"Register-based DivRem lowering only\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20356, __extension__ __PRETTY_FUNCTION__)) | |||
20356 | "Register-based DivRem lowering only")(static_cast <bool> ((Subtarget->isTargetAEABI() || Subtarget ->isTargetAndroid() || Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || Subtarget->isTargetWindows ()) && "Register-based DivRem lowering only") ? void ( 0) : __assert_fail ("(Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || Subtarget->isTargetWindows()) && \"Register-based DivRem lowering only\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20356, __extension__ __PRETTY_FUNCTION__)); | |||
20357 | unsigned Opcode = Op->getOpcode(); | |||
20358 | assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&(static_cast <bool> ((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && "Invalid opcode for Div/Rem lowering" ) ? void (0) : __assert_fail ("(Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && \"Invalid opcode for Div/Rem lowering\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20359, __extension__ __PRETTY_FUNCTION__)) | |||
20359 | "Invalid opcode for Div/Rem lowering")(static_cast <bool> ((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && "Invalid opcode for Div/Rem lowering" ) ? void (0) : __assert_fail ("(Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && \"Invalid opcode for Div/Rem lowering\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20359, __extension__ __PRETTY_FUNCTION__)); | |||
20360 | bool isSigned = (Opcode == ISD::SDIVREM); | |||
20361 | EVT VT = Op->getValueType(0); | |||
20362 | Type *Ty = VT.getTypeForEVT(*DAG.getContext()); | |||
20363 | SDLoc dl(Op); | |||
20364 | ||||
20365 | // If the target has hardware divide, use divide + multiply + subtract: | |||
20366 | // div = a / b | |||
20367 | // rem = a - b * div | |||
20368 | // return {div, rem} | |||
20369 | // This should be lowered into UDIV/SDIV + MLS later on. | |||
20370 | bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() | |||
20371 | : Subtarget->hasDivideInARMMode(); | |||
20372 | if (hasDivide && Op->getValueType(0).isSimple() && | |||
20373 | Op->getSimpleValueType(0) == MVT::i32) { | |||
20374 | unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV; | |||
20375 | const SDValue Dividend = Op->getOperand(0); | |||
20376 | const SDValue Divisor = Op->getOperand(1); | |||
20377 | SDValue Div = DAG.getNode(DivOpcode, dl, VT, Dividend, Divisor); | |||
20378 | SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Div, Divisor); | |||
20379 | SDValue Rem = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul); | |||
20380 | ||||
20381 | SDValue Values[2] = {Div, Rem}; | |||
20382 | return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VT, VT), Values); | |||
20383 | } | |||
20384 | ||||
20385 | RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(), | |||
20386 | VT.getSimpleVT().SimpleTy); | |||
20387 | SDValue InChain = DAG.getEntryNode(); | |||
20388 | ||||
20389 | TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(), | |||
20390 | DAG.getContext(), | |||
20391 | Subtarget); | |||
20392 | ||||
20393 | SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), | |||
20394 | getPointerTy(DAG.getDataLayout())); | |||
20395 | ||||
20396 | Type *RetTy = StructType::get(Ty, Ty); | |||
20397 | ||||
20398 | if (Subtarget->isTargetWindows()) | |||
20399 | InChain = WinDBZCheckDenominator(DAG, Op.getNode(), InChain); | |||
20400 | ||||
20401 | TargetLowering::CallLoweringInfo CLI(DAG); | |||
20402 | CLI.setDebugLoc(dl).setChain(InChain) | |||
20403 | .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) | |||
20404 | .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned); | |||
20405 | ||||
20406 | std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); | |||
20407 | return CallInfo.first; | |||
20408 | } | |||
20409 | ||||
20410 | // Lowers REM using divmod helpers | |||
20411 | // see RTABI section 4.2/4.3 | |||
20412 | SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const { | |||
20413 | // Build return types (div and rem) | |||
20414 | std::vector<Type*> RetTyParams; | |||
20415 | Type *RetTyElement; | |||
20416 | ||||
20417 | switch (N->getValueType(0).getSimpleVT().SimpleTy) { | |||
20418 | default: llvm_unreachable("Unexpected request for libcall!")::llvm::llvm_unreachable_internal("Unexpected request for libcall!" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20418); | |||
20419 | case MVT::i8: RetTyElement = Type::getInt8Ty(*DAG.getContext()); break; | |||
20420 | case MVT::i16: RetTyElement = Type::getInt16Ty(*DAG.getContext()); break; | |||
20421 | case MVT::i32: RetTyElement = Type::getInt32Ty(*DAG.getContext()); break; | |||
20422 | case MVT::i64: RetTyElement = Type::getInt64Ty(*DAG.getContext()); break; | |||
20423 | } | |||
20424 | ||||
20425 | RetTyParams.push_back(RetTyElement); | |||
20426 | RetTyParams.push_back(RetTyElement); | |||
20427 | ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams); | |||
20428 | Type *RetTy = StructType::get(*DAG.getContext(), ret); | |||
20429 | ||||
20430 | RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT(). | |||
20431 | SimpleTy); | |||
20432 | SDValue InChain = DAG.getEntryNode(); | |||
20433 | TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext(), | |||
20434 | Subtarget); | |||
20435 | bool isSigned = N->getOpcode() == ISD::SREM; | |||
20436 | SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), | |||
20437 | getPointerTy(DAG.getDataLayout())); | |||
20438 | ||||
20439 | if (Subtarget->isTargetWindows()) | |||
20440 | InChain = WinDBZCheckDenominator(DAG, N, InChain); | |||
20441 | ||||
20442 | // Lower call | |||
20443 | CallLoweringInfo CLI(DAG); | |||
20444 | CLI.setChain(InChain) | |||
20445 | .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args)) | |||
20446 | .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N)); | |||
20447 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); | |||
20448 | ||||
20449 | // Return second (rem) result operand (first contains div) | |||
20450 | SDNode *ResNode = CallResult.first.getNode(); | |||
20451 | assert(ResNode->getNumOperands() == 2 && "divmod should return two operands")(static_cast <bool> (ResNode->getNumOperands() == 2 && "divmod should return two operands") ? void (0) : __assert_fail ("ResNode->getNumOperands() == 2 && \"divmod should return two operands\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20451, __extension__ __PRETTY_FUNCTION__)); | |||
20452 | return ResNode->getOperand(1); | |||
20453 | } | |||
20454 | ||||
20455 | SDValue | |||
20456 | ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { | |||
20457 | assert(Subtarget->isTargetWindows() && "unsupported target platform")(static_cast <bool> (Subtarget->isTargetWindows() && "unsupported target platform") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"unsupported target platform\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20457, __extension__ __PRETTY_FUNCTION__)); | |||
20458 | SDLoc DL(Op); | |||
20459 | ||||
20460 | // Get the inputs. | |||
20461 | SDValue Chain = Op.getOperand(0); | |||
20462 | SDValue Size = Op.getOperand(1); | |||
20463 | ||||
20464 | if (DAG.getMachineFunction().getFunction().hasFnAttribute( | |||
20465 | "no-stack-arg-probe")) { | |||
20466 | MaybeAlign Align = | |||
20467 | cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue(); | |||
20468 | SDValue SP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); | |||
20469 | Chain = SP.getValue(1); | |||
20470 | SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size); | |||
20471 | if (Align) | |||
20472 | SP = | |||
20473 | DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0), | |||
20474 | DAG.getConstant(-(uint64_t)Align->value(), DL, MVT::i32)); | |||
20475 | Chain = DAG.getCopyToReg(Chain, DL, ARM::SP, SP); | |||
20476 | SDValue Ops[2] = { SP, Chain }; | |||
20477 | return DAG.getMergeValues(Ops, DL); | |||
20478 | } | |||
20479 | ||||
20480 | SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size, | |||
20481 | DAG.getConstant(2, DL, MVT::i32)); | |||
20482 | ||||
20483 | SDValue Flag; | |||
20484 | Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag); | |||
20485 | Flag = Chain.getValue(1); | |||
20486 | ||||
20487 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
20488 | Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag); | |||
20489 | ||||
20490 | SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); | |||
20491 | Chain = NewSP.getValue(1); | |||
20492 | ||||
20493 | SDValue Ops[2] = { NewSP, Chain }; | |||
20494 | return DAG.getMergeValues(Ops, DL); | |||
20495 | } | |||
20496 | ||||
20497 | SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { | |||
20498 | bool IsStrict = Op->isStrictFPOpcode(); | |||
20499 | SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); | |||
20500 | const unsigned DstSz = Op.getValueType().getSizeInBits(); | |||
20501 | const unsigned SrcSz = SrcVal.getValueType().getSizeInBits(); | |||
20502 | assert(DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 &&(static_cast <bool> (DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 && "Unexpected type for custom-lowering FP_EXTEND" ) ? void (0) : __assert_fail ("DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 && \"Unexpected type for custom-lowering FP_EXTEND\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20503, __extension__ __PRETTY_FUNCTION__)) | |||
20503 | "Unexpected type for custom-lowering FP_EXTEND")(static_cast <bool> (DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 && "Unexpected type for custom-lowering FP_EXTEND" ) ? void (0) : __assert_fail ("DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 && \"Unexpected type for custom-lowering FP_EXTEND\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20503, __extension__ __PRETTY_FUNCTION__)); | |||
20504 | ||||
20505 | assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) &&(static_cast <bool> ((!Subtarget->hasFP64() || !Subtarget ->hasFPARMv8Base()) && "With both FP DP and 16, any FP conversion is legal!" ) ? void (0) : __assert_fail ("(!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && \"With both FP DP and 16, any FP conversion is legal!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20506, __extension__ __PRETTY_FUNCTION__)) | |||
20506 | "With both FP DP and 16, any FP conversion is legal!")(static_cast <bool> ((!Subtarget->hasFP64() || !Subtarget ->hasFPARMv8Base()) && "With both FP DP and 16, any FP conversion is legal!" ) ? void (0) : __assert_fail ("(!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && \"With both FP DP and 16, any FP conversion is legal!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20506, __extension__ __PRETTY_FUNCTION__)); | |||
20507 | ||||
20508 | assert(!(DstSz == 32 && Subtarget->hasFP16()) &&(static_cast <bool> (!(DstSz == 32 && Subtarget ->hasFP16()) && "With FP16, 16 to 32 conversion is legal!" ) ? void (0) : __assert_fail ("!(DstSz == 32 && Subtarget->hasFP16()) && \"With FP16, 16 to 32 conversion is legal!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20509, __extension__ __PRETTY_FUNCTION__)) | |||
20509 | "With FP16, 16 to 32 conversion is legal!")(static_cast <bool> (!(DstSz == 32 && Subtarget ->hasFP16()) && "With FP16, 16 to 32 conversion is legal!" ) ? void (0) : __assert_fail ("!(DstSz == 32 && Subtarget->hasFP16()) && \"With FP16, 16 to 32 conversion is legal!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20509, __extension__ __PRETTY_FUNCTION__)); | |||
20510 | ||||
20511 | // Converting from 32 -> 64 is valid if we have FP64. | |||
20512 | if (SrcSz == 32 && DstSz == 64 && Subtarget->hasFP64()) { | |||
20513 | // FIXME: Remove this when we have strict fp instruction selection patterns | |||
20514 | if (IsStrict) { | |||
20515 | SDLoc Loc(Op); | |||
20516 | SDValue Result = DAG.getNode(ISD::FP_EXTEND, | |||
20517 | Loc, Op.getValueType(), SrcVal); | |||
20518 | return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc); | |||
20519 | } | |||
20520 | return Op; | |||
20521 | } | |||
20522 | ||||
20523 | // Either we are converting from 16 -> 64, without FP16 and/or | |||
20524 | // FP.double-precision or without Armv8-fp. So we must do it in two | |||
20525 | // steps. | |||
20526 | // Or we are converting from 32 -> 64 without fp.double-precision or 16 -> 32 | |||
20527 | // without FP16. So we must do a function call. | |||
20528 | SDLoc Loc(Op); | |||
20529 | RTLIB::Libcall LC; | |||
20530 | MakeLibCallOptions CallOptions; | |||
20531 | SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); | |||
20532 | for (unsigned Sz = SrcSz; Sz <= 32 && Sz < DstSz; Sz *= 2) { | |||
20533 | bool Supported = (Sz == 16 ? Subtarget->hasFP16() : Subtarget->hasFP64()); | |||
20534 | MVT SrcVT = (Sz == 16 ? MVT::f16 : MVT::f32); | |||
20535 | MVT DstVT = (Sz == 16 ? MVT::f32 : MVT::f64); | |||
20536 | if (Supported) { | |||
20537 | if (IsStrict) { | |||
20538 | SrcVal = DAG.getNode(ISD::STRICT_FP_EXTEND, Loc, | |||
20539 | {DstVT, MVT::Other}, {Chain, SrcVal}); | |||
20540 | Chain = SrcVal.getValue(1); | |||
20541 | } else { | |||
20542 | SrcVal = DAG.getNode(ISD::FP_EXTEND, Loc, DstVT, SrcVal); | |||
20543 | } | |||
20544 | } else { | |||
20545 | LC = RTLIB::getFPEXT(SrcVT, DstVT); | |||
20546 | assert(LC != RTLIB::UNKNOWN_LIBCALL &&(static_cast <bool> (LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected type for custom-lowering FP_EXTEND") ? void (0) : __assert_fail ("LC != RTLIB::UNKNOWN_LIBCALL && \"Unexpected type for custom-lowering FP_EXTEND\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20547, __extension__ __PRETTY_FUNCTION__)) | |||
20547 | "Unexpected type for custom-lowering FP_EXTEND")(static_cast <bool> (LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected type for custom-lowering FP_EXTEND") ? void (0) : __assert_fail ("LC != RTLIB::UNKNOWN_LIBCALL && \"Unexpected type for custom-lowering FP_EXTEND\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20547, __extension__ __PRETTY_FUNCTION__)); | |||
20548 | std::tie(SrcVal, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions, | |||
20549 | Loc, Chain); | |||
20550 | } | |||
20551 | } | |||
20552 | ||||
20553 | return IsStrict ? DAG.getMergeValues({SrcVal, Chain}, Loc) : SrcVal; | |||
20554 | } | |||
20555 | ||||
20556 | SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { | |||
20557 | bool IsStrict = Op->isStrictFPOpcode(); | |||
20558 | ||||
20559 | SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); | |||
20560 | EVT SrcVT = SrcVal.getValueType(); | |||
20561 | EVT DstVT = Op.getValueType(); | |||
20562 | const unsigned DstSz = Op.getValueType().getSizeInBits(); | |||
20563 | const unsigned SrcSz = SrcVT.getSizeInBits(); | |||
20564 | (void)DstSz; | |||
20565 | assert(DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 &&(static_cast <bool> (DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 && "Unexpected type for custom-lowering FP_ROUND" ) ? void (0) : __assert_fail ("DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 && \"Unexpected type for custom-lowering FP_ROUND\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20566, __extension__ __PRETTY_FUNCTION__)) | |||
20566 | "Unexpected type for custom-lowering FP_ROUND")(static_cast <bool> (DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 && "Unexpected type for custom-lowering FP_ROUND" ) ? void (0) : __assert_fail ("DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 && \"Unexpected type for custom-lowering FP_ROUND\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20566, __extension__ __PRETTY_FUNCTION__)); | |||
20567 | ||||
20568 | assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) &&(static_cast <bool> ((!Subtarget->hasFP64() || !Subtarget ->hasFPARMv8Base()) && "With both FP DP and 16, any FP conversion is legal!" ) ? void (0) : __assert_fail ("(!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && \"With both FP DP and 16, any FP conversion is legal!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20569, __extension__ __PRETTY_FUNCTION__)) | |||
20569 | "With both FP DP and 16, any FP conversion is legal!")(static_cast <bool> ((!Subtarget->hasFP64() || !Subtarget ->hasFPARMv8Base()) && "With both FP DP and 16, any FP conversion is legal!" ) ? void (0) : __assert_fail ("(!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && \"With both FP DP and 16, any FP conversion is legal!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20569, __extension__ __PRETTY_FUNCTION__)); | |||
20570 | ||||
20571 | SDLoc Loc(Op); | |||
20572 | ||||
20573 | // Instruction from 32 -> 16 if hasFP16 is valid | |||
20574 | if (SrcSz == 32 && Subtarget->hasFP16()) | |||
20575 | return Op; | |||
20576 | ||||
20577 | // Lib call from 32 -> 16 / 64 -> [32, 16] | |||
20578 | RTLIB::Libcall LC = RTLIB::getFPROUND(SrcVT, DstVT); | |||
20579 | assert(LC != RTLIB::UNKNOWN_LIBCALL &&(static_cast <bool> (LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected type for custom-lowering FP_ROUND") ? void (0) : __assert_fail ("LC != RTLIB::UNKNOWN_LIBCALL && \"Unexpected type for custom-lowering FP_ROUND\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20580, __extension__ __PRETTY_FUNCTION__)) | |||
20580 | "Unexpected type for custom-lowering FP_ROUND")(static_cast <bool> (LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected type for custom-lowering FP_ROUND") ? void (0) : __assert_fail ("LC != RTLIB::UNKNOWN_LIBCALL && \"Unexpected type for custom-lowering FP_ROUND\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20580, __extension__ __PRETTY_FUNCTION__)); | |||
20581 | MakeLibCallOptions CallOptions; | |||
20582 | SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); | |||
20583 | SDValue Result; | |||
20584 | std::tie(Result, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions, | |||
20585 | Loc, Chain); | |||
20586 | return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result; | |||
20587 | } | |||
20588 | ||||
20589 | bool | |||
20590 | ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { | |||
20591 | // The ARM target isn't yet aware of offsets. | |||
20592 | return false; | |||
20593 | } | |||
20594 | ||||
20595 | bool ARM::isBitFieldInvertedMask(unsigned v) { | |||
20596 | if (v == 0xffffffff) | |||
20597 | return false; | |||
20598 | ||||
20599 | // there can be 1's on either or both "outsides", all the "inside" | |||
20600 | // bits must be 0's | |||
20601 | return isShiftedMask_32(~v); | |||
20602 | } | |||
20603 | ||||
20604 | /// isFPImmLegal - Returns true if the target can instruction select the | |||
20605 | /// specified FP immediate natively. If false, the legalizer will | |||
20606 | /// materialize the FP immediate as a load from a constant pool. | |||
20607 | bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, | |||
20608 | bool ForCodeSize) const { | |||
20609 | if (!Subtarget->hasVFP3Base()) | |||
20610 | return false; | |||
20611 | if (VT == MVT::f16 && Subtarget->hasFullFP16()) | |||
20612 | return ARM_AM::getFP16Imm(Imm) != -1; | |||
20613 | if (VT == MVT::f32 && Subtarget->hasFullFP16() && | |||
20614 | ARM_AM::getFP32FP16Imm(Imm) != -1) | |||
20615 | return true; | |||
20616 | if (VT == MVT::f32) | |||
20617 | return ARM_AM::getFP32Imm(Imm) != -1; | |||
20618 | if (VT == MVT::f64 && Subtarget->hasFP64()) | |||
20619 | return ARM_AM::getFP64Imm(Imm) != -1; | |||
20620 | return false; | |||
20621 | } | |||
20622 | ||||
20623 | /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as | |||
20624 | /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment | |||
20625 | /// specified in the intrinsic calls. | |||
20626 | bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, | |||
20627 | const CallInst &I, | |||
20628 | MachineFunction &MF, | |||
20629 | unsigned Intrinsic) const { | |||
20630 | switch (Intrinsic) { | |||
20631 | case Intrinsic::arm_neon_vld1: | |||
20632 | case Intrinsic::arm_neon_vld2: | |||
20633 | case Intrinsic::arm_neon_vld3: | |||
20634 | case Intrinsic::arm_neon_vld4: | |||
20635 | case Intrinsic::arm_neon_vld2lane: | |||
20636 | case Intrinsic::arm_neon_vld3lane: | |||
20637 | case Intrinsic::arm_neon_vld4lane: | |||
20638 | case Intrinsic::arm_neon_vld2dup: | |||
20639 | case Intrinsic::arm_neon_vld3dup: | |||
20640 | case Intrinsic::arm_neon_vld4dup: { | |||
20641 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
20642 | // Conservatively set memVT to the entire set of vectors loaded. | |||
20643 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); | |||
20644 | uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; | |||
20645 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); | |||
20646 | Info.ptrVal = I.getArgOperand(0); | |||
20647 | Info.offset = 0; | |||
20648 | Value *AlignArg = I.getArgOperand(I.arg_size() - 1); | |||
20649 | Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue(); | |||
20650 | // volatile loads with NEON intrinsics not supported | |||
20651 | Info.flags = MachineMemOperand::MOLoad; | |||
20652 | return true; | |||
20653 | } | |||
20654 | case Intrinsic::arm_neon_vld1x2: | |||
20655 | case Intrinsic::arm_neon_vld1x3: | |||
20656 | case Intrinsic::arm_neon_vld1x4: { | |||
20657 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
20658 | // Conservatively set memVT to the entire set of vectors loaded. | |||
20659 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); | |||
20660 | uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; | |||
20661 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); | |||
20662 | Info.ptrVal = I.getArgOperand(I.arg_size() - 1); | |||
20663 | Info.offset = 0; | |||
20664 | Info.align.reset(); | |||
20665 | // volatile loads with NEON intrinsics not supported | |||
20666 | Info.flags = MachineMemOperand::MOLoad; | |||
20667 | return true; | |||
20668 | } | |||
20669 | case Intrinsic::arm_neon_vst1: | |||
20670 | case Intrinsic::arm_neon_vst2: | |||
20671 | case Intrinsic::arm_neon_vst3: | |||
20672 | case Intrinsic::arm_neon_vst4: | |||
20673 | case Intrinsic::arm_neon_vst2lane: | |||
20674 | case Intrinsic::arm_neon_vst3lane: | |||
20675 | case Intrinsic::arm_neon_vst4lane: { | |||
20676 | Info.opc = ISD::INTRINSIC_VOID; | |||
20677 | // Conservatively set memVT to the entire set of vectors stored. | |||
20678 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); | |||
20679 | unsigned NumElts = 0; | |||
20680 | for (unsigned ArgI = 1, ArgE = I.arg_size(); ArgI < ArgE; ++ArgI) { | |||
20681 | Type *ArgTy = I.getArgOperand(ArgI)->getType(); | |||
20682 | if (!ArgTy->isVectorTy()) | |||
20683 | break; | |||
20684 | NumElts += DL.getTypeSizeInBits(ArgTy) / 64; | |||
20685 | } | |||
20686 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); | |||
20687 | Info.ptrVal = I.getArgOperand(0); | |||
20688 | Info.offset = 0; | |||
20689 | Value *AlignArg = I.getArgOperand(I.arg_size() - 1); | |||
20690 | Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue(); | |||
20691 | // volatile stores with NEON intrinsics not supported | |||
20692 | Info.flags = MachineMemOperand::MOStore; | |||
20693 | return true; | |||
20694 | } | |||
20695 | case Intrinsic::arm_neon_vst1x2: | |||
20696 | case Intrinsic::arm_neon_vst1x3: | |||
20697 | case Intrinsic::arm_neon_vst1x4: { | |||
20698 | Info.opc = ISD::INTRINSIC_VOID; | |||
20699 | // Conservatively set memVT to the entire set of vectors stored. | |||
20700 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); | |||
20701 | unsigned NumElts = 0; | |||
20702 | for (unsigned ArgI = 1, ArgE = I.arg_size(); ArgI < ArgE; ++ArgI) { | |||
20703 | Type *ArgTy = I.getArgOperand(ArgI)->getType(); | |||
20704 | if (!ArgTy->isVectorTy()) | |||
20705 | break; | |||
20706 | NumElts += DL.getTypeSizeInBits(ArgTy) / 64; | |||
20707 | } | |||
20708 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); | |||
20709 | Info.ptrVal = I.getArgOperand(0); | |||
20710 | Info.offset = 0; | |||
20711 | Info.align.reset(); | |||
20712 | // volatile stores with NEON intrinsics not supported | |||
20713 | Info.flags = MachineMemOperand::MOStore; | |||
20714 | return true; | |||
20715 | } | |||
20716 | case Intrinsic::arm_mve_vld2q: | |||
20717 | case Intrinsic::arm_mve_vld4q: { | |||
20718 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
20719 | // Conservatively set memVT to the entire set of vectors loaded. | |||
20720 | Type *VecTy = cast<StructType>(I.getType())->getElementType(1); | |||
20721 | unsigned Factor = Intrinsic == Intrinsic::arm_mve_vld2q ? 2 : 4; | |||
20722 | Info.memVT = EVT::getVectorVT(VecTy->getContext(), MVT::i64, Factor * 2); | |||
20723 | Info.ptrVal = I.getArgOperand(0); | |||
20724 | Info.offset = 0; | |||
20725 | Info.align = Align(VecTy->getScalarSizeInBits() / 8); | |||
20726 | // volatile loads with MVE intrinsics not supported | |||
20727 | Info.flags = MachineMemOperand::MOLoad; | |||
20728 | return true; | |||
20729 | } | |||
20730 | case Intrinsic::arm_mve_vst2q: | |||
20731 | case Intrinsic::arm_mve_vst4q: { | |||
20732 | Info.opc = ISD::INTRINSIC_VOID; | |||
20733 | // Conservatively set memVT to the entire set of vectors stored. | |||
20734 | Type *VecTy = I.getArgOperand(1)->getType(); | |||
20735 | unsigned Factor = Intrinsic == Intrinsic::arm_mve_vst2q ? 2 : 4; | |||
20736 | Info.memVT = EVT::getVectorVT(VecTy->getContext(), MVT::i64, Factor * 2); | |||
20737 | Info.ptrVal = I.getArgOperand(0); | |||
20738 | Info.offset = 0; | |||
20739 | Info.align = Align(VecTy->getScalarSizeInBits() / 8); | |||
20740 | // volatile stores with MVE intrinsics not supported | |||
20741 | Info.flags = MachineMemOperand::MOStore; | |||
20742 | return true; | |||
20743 | } | |||
20744 | case Intrinsic::arm_mve_vldr_gather_base: | |||
20745 | case Intrinsic::arm_mve_vldr_gather_base_predicated: { | |||
20746 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
20747 | Info.ptrVal = nullptr; | |||
20748 | Info.memVT = MVT::getVT(I.getType()); | |||
20749 | Info.align = Align(1); | |||
20750 | Info.flags |= MachineMemOperand::MOLoad; | |||
20751 | return true; | |||
20752 | } | |||
20753 | case Intrinsic::arm_mve_vldr_gather_base_wb: | |||
20754 | case Intrinsic::arm_mve_vldr_gather_base_wb_predicated: { | |||
20755 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
20756 | Info.ptrVal = nullptr; | |||
20757 | Info.memVT = MVT::getVT(I.getType()->getContainedType(0)); | |||
20758 | Info.align = Align(1); | |||
20759 | Info.flags |= MachineMemOperand::MOLoad; | |||
20760 | return true; | |||
20761 | } | |||
20762 | case Intrinsic::arm_mve_vldr_gather_offset: | |||
20763 | case Intrinsic::arm_mve_vldr_gather_offset_predicated: { | |||
20764 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
20765 | Info.ptrVal = nullptr; | |||
20766 | MVT DataVT = MVT::getVT(I.getType()); | |||
20767 | unsigned MemSize = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue(); | |||
20768 | Info.memVT = MVT::getVectorVT(MVT::getIntegerVT(MemSize), | |||
20769 | DataVT.getVectorNumElements()); | |||
20770 | Info.align = Align(1); | |||
20771 | Info.flags |= MachineMemOperand::MOLoad; | |||
20772 | return true; | |||
20773 | } | |||
20774 | case Intrinsic::arm_mve_vstr_scatter_base: | |||
20775 | case Intrinsic::arm_mve_vstr_scatter_base_predicated: { | |||
20776 | Info.opc = ISD::INTRINSIC_VOID; | |||
20777 | Info.ptrVal = nullptr; | |||
20778 | Info.memVT = MVT::getVT(I.getArgOperand(2)->getType()); | |||
20779 | Info.align = Align(1); | |||
20780 | Info.flags |= MachineMemOperand::MOStore; | |||
20781 | return true; | |||
20782 | } | |||
20783 | case Intrinsic::arm_mve_vstr_scatter_base_wb: | |||
20784 | case Intrinsic::arm_mve_vstr_scatter_base_wb_predicated: { | |||
20785 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
20786 | Info.ptrVal = nullptr; | |||
20787 | Info.memVT = MVT::getVT(I.getArgOperand(2)->getType()); | |||
20788 | Info.align = Align(1); | |||
20789 | Info.flags |= MachineMemOperand::MOStore; | |||
20790 | return true; | |||
20791 | } | |||
20792 | case Intrinsic::arm_mve_vstr_scatter_offset: | |||
20793 | case Intrinsic::arm_mve_vstr_scatter_offset_predicated: { | |||
20794 | Info.opc = ISD::INTRINSIC_VOID; | |||
20795 | Info.ptrVal = nullptr; | |||
20796 | MVT DataVT = MVT::getVT(I.getArgOperand(2)->getType()); | |||
20797 | unsigned MemSize = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue(); | |||
20798 | Info.memVT = MVT::getVectorVT(MVT::getIntegerVT(MemSize), | |||
20799 | DataVT.getVectorNumElements()); | |||
20800 | Info.align = Align(1); | |||
20801 | Info.flags |= MachineMemOperand::MOStore; | |||
20802 | return true; | |||
20803 | } | |||
20804 | case Intrinsic::arm_ldaex: | |||
20805 | case Intrinsic::arm_ldrex: { | |||
20806 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); | |||
20807 | Type *ValTy = I.getParamElementType(0); | |||
20808 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
20809 | Info.memVT = MVT::getVT(ValTy); | |||
20810 | Info.ptrVal = I.getArgOperand(0); | |||
20811 | Info.offset = 0; | |||
20812 | Info.align = DL.getABITypeAlign(ValTy); | |||
20813 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; | |||
20814 | return true; | |||
20815 | } | |||
20816 | case Intrinsic::arm_stlex: | |||
20817 | case Intrinsic::arm_strex: { | |||
20818 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); | |||
20819 | Type *ValTy = I.getParamElementType(1); | |||
20820 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
20821 | Info.memVT = MVT::getVT(ValTy); | |||
20822 | Info.ptrVal = I.getArgOperand(1); | |||
20823 | Info.offset = 0; | |||
20824 | Info.align = DL.getABITypeAlign(ValTy); | |||
20825 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; | |||
20826 | return true; | |||
20827 | } | |||
20828 | case Intrinsic::arm_stlexd: | |||
20829 | case Intrinsic::arm_strexd: | |||
20830 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
20831 | Info.memVT = MVT::i64; | |||
20832 | Info.ptrVal = I.getArgOperand(2); | |||
20833 | Info.offset = 0; | |||
20834 | Info.align = Align(8); | |||
20835 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; | |||
20836 | return true; | |||
20837 | ||||
20838 | case Intrinsic::arm_ldaexd: | |||
20839 | case Intrinsic::arm_ldrexd: | |||
20840 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
20841 | Info.memVT = MVT::i64; | |||
20842 | Info.ptrVal = I.getArgOperand(0); | |||
20843 | Info.offset = 0; | |||
20844 | Info.align = Align(8); | |||
20845 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; | |||
20846 | return true; | |||
20847 | ||||
20848 | default: | |||
20849 | break; | |||
20850 | } | |||
20851 | ||||
20852 | return false; | |||
20853 | } | |||
20854 | ||||
20855 | /// Returns true if it is beneficial to convert a load of a constant | |||
20856 | /// to just the constant itself. | |||
20857 | bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, | |||
20858 | Type *Ty) const { | |||
20859 | assert(Ty->isIntegerTy())(static_cast <bool> (Ty->isIntegerTy()) ? void (0) : __assert_fail ("Ty->isIntegerTy()", "llvm/lib/Target/ARM/ARMISelLowering.cpp" , 20859, __extension__ __PRETTY_FUNCTION__)); | |||
20860 | ||||
20861 | unsigned Bits = Ty->getPrimitiveSizeInBits(); | |||
20862 | if (Bits == 0 || Bits > 32) | |||
20863 | return false; | |||
20864 | return true; | |||
20865 | } | |||
20866 | ||||
20867 | bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, | |||
20868 | unsigned Index) const { | |||
20869 | if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT)) | |||
20870 | return false; | |||
20871 | ||||
20872 | return (Index == 0 || Index == ResVT.getVectorNumElements()); | |||
20873 | } | |||
20874 | ||||
20875 | Instruction *ARMTargetLowering::makeDMB(IRBuilderBase &Builder, | |||
20876 | ARM_MB::MemBOpt Domain) const { | |||
20877 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); | |||
20878 | ||||
20879 | // First, if the target has no DMB, see what fallback we can use. | |||
20880 | if (!Subtarget->hasDataBarrier()) { | |||
20881 | // Some ARMv6 cpus can support data barriers with an mcr instruction. | |||
20882 | // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get | |||
20883 | // here. | |||
20884 | if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) { | |||
20885 | Function *MCR = Intrinsic::getDeclaration(M, Intrinsic::arm_mcr); | |||
20886 | Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0), | |||
20887 | Builder.getInt32(0), Builder.getInt32(7), | |||
20888 | Builder.getInt32(10), Builder.getInt32(5)}; | |||
20889 | return Builder.CreateCall(MCR, args); | |||
20890 | } else { | |||
20891 | // Instead of using barriers, atomic accesses on these subtargets use | |||
20892 | // libcalls. | |||
20893 | llvm_unreachable("makeDMB on a target so old that it has no barriers")::llvm::llvm_unreachable_internal("makeDMB on a target so old that it has no barriers" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20893); | |||
20894 | } | |||
20895 | } else { | |||
20896 | Function *DMB = Intrinsic::getDeclaration(M, Intrinsic::arm_dmb); | |||
20897 | // Only a full system barrier exists in the M-class architectures. | |||
20898 | Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain; | |||
20899 | Constant *CDomain = Builder.getInt32(Domain); | |||
20900 | return Builder.CreateCall(DMB, CDomain); | |||
20901 | } | |||
20902 | } | |||
20903 | ||||
20904 | // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html | |||
20905 | Instruction *ARMTargetLowering::emitLeadingFence(IRBuilderBase &Builder, | |||
20906 | Instruction *Inst, | |||
20907 | AtomicOrdering Ord) const { | |||
20908 | switch (Ord) { | |||
20909 | case AtomicOrdering::NotAtomic: | |||
20910 | case AtomicOrdering::Unordered: | |||
20911 | llvm_unreachable("Invalid fence: unordered/non-atomic")::llvm::llvm_unreachable_internal("Invalid fence: unordered/non-atomic" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20911); | |||
20912 | case AtomicOrdering::Monotonic: | |||
20913 | case AtomicOrdering::Acquire: | |||
20914 | return nullptr; // Nothing to do | |||
20915 | case AtomicOrdering::SequentiallyConsistent: | |||
20916 | if (!Inst->hasAtomicStore()) | |||
20917 | return nullptr; // Nothing to do | |||
20918 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
20919 | case AtomicOrdering::Release: | |||
20920 | case AtomicOrdering::AcquireRelease: | |||
20921 | if (Subtarget->preferISHSTBarriers()) | |||
20922 | return makeDMB(Builder, ARM_MB::ISHST); | |||
20923 | // FIXME: add a comment with a link to documentation justifying this. | |||
20924 | else | |||
20925 | return makeDMB(Builder, ARM_MB::ISH); | |||
20926 | } | |||
20927 | llvm_unreachable("Unknown fence ordering in emitLeadingFence")::llvm::llvm_unreachable_internal("Unknown fence ordering in emitLeadingFence" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20927); | |||
20928 | } | |||
20929 | ||||
20930 | Instruction *ARMTargetLowering::emitTrailingFence(IRBuilderBase &Builder, | |||
20931 | Instruction *Inst, | |||
20932 | AtomicOrdering Ord) const { | |||
20933 | switch (Ord) { | |||
20934 | case AtomicOrdering::NotAtomic: | |||
20935 | case AtomicOrdering::Unordered: | |||
20936 | llvm_unreachable("Invalid fence: unordered/not-atomic")::llvm::llvm_unreachable_internal("Invalid fence: unordered/not-atomic" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20936); | |||
20937 | case AtomicOrdering::Monotonic: | |||
20938 | case AtomicOrdering::Release: | |||
20939 | return nullptr; // Nothing to do | |||
20940 | case AtomicOrdering::Acquire: | |||
20941 | case AtomicOrdering::AcquireRelease: | |||
20942 | case AtomicOrdering::SequentiallyConsistent: | |||
20943 | return makeDMB(Builder, ARM_MB::ISH); | |||
20944 | } | |||
20945 | llvm_unreachable("Unknown fence ordering in emitTrailingFence")::llvm::llvm_unreachable_internal("Unknown fence ordering in emitTrailingFence" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 20945); | |||
20946 | } | |||
20947 | ||||
20948 | // Loads and stores less than 64-bits are already atomic; ones above that | |||
20949 | // are doomed anyway, so defer to the default libcall and blame the OS when | |||
20950 | // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit | |||
20951 | // anything for those. | |||
20952 | TargetLoweringBase::AtomicExpansionKind | |||
20953 | ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { | |||
20954 | bool has64BitAtomicStore; | |||
20955 | if (Subtarget->isMClass()) | |||
20956 | has64BitAtomicStore = false; | |||
20957 | else if (Subtarget->isThumb()) | |||
20958 | has64BitAtomicStore = Subtarget->hasV7Ops(); | |||
20959 | else | |||
20960 | has64BitAtomicStore = Subtarget->hasV6Ops(); | |||
20961 | ||||
20962 | unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); | |||
20963 | return Size == 64 && has64BitAtomicStore ? AtomicExpansionKind::Expand | |||
20964 | : AtomicExpansionKind::None; | |||
20965 | } | |||
20966 | ||||
20967 | // Loads and stores less than 64-bits are already atomic; ones above that | |||
20968 | // are doomed anyway, so defer to the default libcall and blame the OS when | |||
20969 | // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit | |||
20970 | // anything for those. | |||
20971 | // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that | |||
20972 | // guarantee, see DDI0406C ARM architecture reference manual, | |||
20973 | // sections A8.8.72-74 LDRD) | |||
20974 | TargetLowering::AtomicExpansionKind | |||
20975 | ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { | |||
20976 | bool has64BitAtomicLoad; | |||
20977 | if (Subtarget->isMClass()) | |||
20978 | has64BitAtomicLoad = false; | |||
20979 | else if (Subtarget->isThumb()) | |||
20980 | has64BitAtomicLoad = Subtarget->hasV7Ops(); | |||
20981 | else | |||
20982 | has64BitAtomicLoad = Subtarget->hasV6Ops(); | |||
20983 | ||||
20984 | unsigned Size = LI->getType()->getPrimitiveSizeInBits(); | |||
20985 | return (Size == 64 && has64BitAtomicLoad) ? AtomicExpansionKind::LLOnly | |||
20986 | : AtomicExpansionKind::None; | |||
20987 | } | |||
20988 | ||||
20989 | // For the real atomic operations, we have ldrex/strex up to 32 bits, | |||
20990 | // and up to 64 bits on the non-M profiles | |||
20991 | TargetLowering::AtomicExpansionKind | |||
20992 | ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { | |||
20993 | if (AI->isFloatingPointOperation()) | |||
20994 | return AtomicExpansionKind::CmpXChg; | |||
20995 | ||||
20996 | unsigned Size = AI->getType()->getPrimitiveSizeInBits(); | |||
20997 | bool hasAtomicRMW; | |||
20998 | if (Subtarget->isMClass()) | |||
20999 | hasAtomicRMW = Subtarget->hasV8MBaselineOps(); | |||
21000 | else if (Subtarget->isThumb()) | |||
21001 | hasAtomicRMW = Subtarget->hasV7Ops(); | |||
21002 | else | |||
21003 | hasAtomicRMW = Subtarget->hasV6Ops(); | |||
21004 | if (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW) { | |||
21005 | // At -O0, fast-regalloc cannot cope with the live vregs necessary to | |||
21006 | // implement atomicrmw without spilling. If the target address is also on | |||
21007 | // the stack and close enough to the spill slot, this can lead to a | |||
21008 | // situation where the monitor always gets cleared and the atomic operation | |||
21009 | // can never succeed. So at -O0 lower this operation to a CAS loop. | |||
21010 | if (getTargetMachine().getOptLevel() == CodeGenOpt::None) | |||
21011 | return AtomicExpansionKind::CmpXChg; | |||
21012 | return AtomicExpansionKind::LLSC; | |||
21013 | } | |||
21014 | return AtomicExpansionKind::None; | |||
21015 | } | |||
21016 | ||||
21017 | // Similar to shouldExpandAtomicRMWInIR, ldrex/strex can be used up to 32 | |||
21018 | // bits, and up to 64 bits on the non-M profiles. | |||
21019 | TargetLowering::AtomicExpansionKind | |||
21020 | ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { | |||
21021 | // At -O0, fast-regalloc cannot cope with the live vregs necessary to | |||
21022 | // implement cmpxchg without spilling. If the address being exchanged is also | |||
21023 | // on the stack and close enough to the spill slot, this can lead to a | |||
21024 | // situation where the monitor always gets cleared and the atomic operation | |||
21025 | // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. | |||
21026 | unsigned Size = AI->getOperand(1)->getType()->getPrimitiveSizeInBits(); | |||
21027 | bool HasAtomicCmpXchg; | |||
21028 | if (Subtarget->isMClass()) | |||
21029 | HasAtomicCmpXchg = Subtarget->hasV8MBaselineOps(); | |||
21030 | else if (Subtarget->isThumb()) | |||
21031 | HasAtomicCmpXchg = Subtarget->hasV7Ops(); | |||
21032 | else | |||
21033 | HasAtomicCmpXchg = Subtarget->hasV6Ops(); | |||
21034 | if (getTargetMachine().getOptLevel() != 0 && HasAtomicCmpXchg && | |||
21035 | Size <= (Subtarget->isMClass() ? 32U : 64U)) | |||
21036 | return AtomicExpansionKind::LLSC; | |||
21037 | return AtomicExpansionKind::None; | |||
21038 | } | |||
21039 | ||||
21040 | bool ARMTargetLowering::shouldInsertFencesForAtomic( | |||
21041 | const Instruction *I) const { | |||
21042 | return InsertFencesForAtomic; | |||
21043 | } | |||
21044 | ||||
21045 | bool ARMTargetLowering::useLoadStackGuardNode() const { return true; } | |||
21046 | ||||
21047 | void ARMTargetLowering::insertSSPDeclarations(Module &M) const { | |||
21048 | if (!Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) | |||
21049 | return TargetLowering::insertSSPDeclarations(M); | |||
21050 | ||||
21051 | // MSVC CRT has a global variable holding security cookie. | |||
21052 | M.getOrInsertGlobal("__security_cookie", | |||
21053 | Type::getInt8PtrTy(M.getContext())); | |||
21054 | ||||
21055 | // MSVC CRT has a function to validate security cookie. | |||
21056 | FunctionCallee SecurityCheckCookie = M.getOrInsertFunction( | |||
21057 | "__security_check_cookie", Type::getVoidTy(M.getContext()), | |||
21058 | Type::getInt8PtrTy(M.getContext())); | |||
21059 | if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) | |||
21060 | F->addParamAttr(0, Attribute::AttrKind::InReg); | |||
21061 | } | |||
21062 | ||||
21063 | Value *ARMTargetLowering::getSDagStackGuard(const Module &M) const { | |||
21064 | // MSVC CRT has a global variable holding security cookie. | |||
21065 | if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) | |||
21066 | return M.getGlobalVariable("__security_cookie"); | |||
21067 | return TargetLowering::getSDagStackGuard(M); | |||
21068 | } | |||
21069 | ||||
21070 | Function *ARMTargetLowering::getSSPStackGuardCheck(const Module &M) const { | |||
21071 | // MSVC CRT has a function to validate security cookie. | |||
21072 | if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) | |||
21073 | return M.getFunction("__security_check_cookie"); | |||
21074 | return TargetLowering::getSSPStackGuardCheck(M); | |||
21075 | } | |||
21076 | ||||
21077 | bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx, | |||
21078 | unsigned &Cost) const { | |||
21079 | // If we do not have NEON, vector types are not natively supported. | |||
21080 | if (!Subtarget->hasNEON()) | |||
21081 | return false; | |||
21082 | ||||
21083 | // Floating point values and vector values map to the same register file. | |||
21084 | // Therefore, although we could do a store extract of a vector type, this is | |||
21085 | // better to leave at float as we have more freedom in the addressing mode for | |||
21086 | // those. | |||
21087 | if (VectorTy->isFPOrFPVectorTy()) | |||
21088 | return false; | |||
21089 | ||||
21090 | // If the index is unknown at compile time, this is very expensive to lower | |||
21091 | // and it is not possible to combine the store with the extract. | |||
21092 | if (!isa<ConstantInt>(Idx)) | |||
21093 | return false; | |||
21094 | ||||
21095 | assert(VectorTy->isVectorTy() && "VectorTy is not a vector type")(static_cast <bool> (VectorTy->isVectorTy() && "VectorTy is not a vector type") ? void (0) : __assert_fail ( "VectorTy->isVectorTy() && \"VectorTy is not a vector type\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21095, __extension__ __PRETTY_FUNCTION__)); | |||
21096 | unsigned BitWidth = VectorTy->getPrimitiveSizeInBits().getFixedSize(); | |||
21097 | // We can do a store + vector extract on any vector that fits perfectly in a D | |||
21098 | // or Q register. | |||
21099 | if (BitWidth == 64 || BitWidth == 128) { | |||
21100 | Cost = 0; | |||
21101 | return true; | |||
21102 | } | |||
21103 | return false; | |||
21104 | } | |||
21105 | ||||
21106 | bool ARMTargetLowering::isCheapToSpeculateCttz() const { | |||
21107 | return Subtarget->hasV6T2Ops(); | |||
21108 | } | |||
21109 | ||||
21110 | bool ARMTargetLowering::isCheapToSpeculateCtlz() const { | |||
21111 | return Subtarget->hasV6T2Ops(); | |||
21112 | } | |||
21113 | ||||
21114 | bool ARMTargetLowering::shouldExpandShift(SelectionDAG &DAG, SDNode *N) const { | |||
21115 | return !Subtarget->hasMinSize() || Subtarget->isTargetWindows(); | |||
21116 | } | |||
21117 | ||||
21118 | Value *ARMTargetLowering::emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, | |||
21119 | Value *Addr, | |||
21120 | AtomicOrdering Ord) const { | |||
21121 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); | |||
21122 | bool IsAcquire = isAcquireOrStronger(Ord); | |||
21123 | ||||
21124 | // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd | |||
21125 | // intrinsic must return {i32, i32} and we have to recombine them into a | |||
21126 | // single i64 here. | |||
21127 | if (ValueTy->getPrimitiveSizeInBits() == 64) { | |||
21128 | Intrinsic::ID Int = | |||
21129 | IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd; | |||
21130 | Function *Ldrex = Intrinsic::getDeclaration(M, Int); | |||
21131 | ||||
21132 | Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); | |||
21133 | Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi"); | |||
21134 | ||||
21135 | Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); | |||
21136 | Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi"); | |||
21137 | if (!Subtarget->isLittle()) | |||
21138 | std::swap (Lo, Hi); | |||
21139 | Lo = Builder.CreateZExt(Lo, ValueTy, "lo64"); | |||
21140 | Hi = Builder.CreateZExt(Hi, ValueTy, "hi64"); | |||
21141 | return Builder.CreateOr( | |||
21142 | Lo, Builder.CreateShl(Hi, ConstantInt::get(ValueTy, 32)), "val64"); | |||
21143 | } | |||
21144 | ||||
21145 | Type *Tys[] = { Addr->getType() }; | |||
21146 | Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex; | |||
21147 | Function *Ldrex = Intrinsic::getDeclaration(M, Int, Tys); | |||
21148 | CallInst *CI = Builder.CreateCall(Ldrex, Addr); | |||
21149 | ||||
21150 | CI->addParamAttr( | |||
21151 | 0, Attribute::get(M->getContext(), Attribute::ElementType, ValueTy)); | |||
21152 | return Builder.CreateTruncOrBitCast(CI, ValueTy); | |||
21153 | } | |||
21154 | ||||
21155 | void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance( | |||
21156 | IRBuilderBase &Builder) const { | |||
21157 | if (!Subtarget->hasV7Ops()) | |||
21158 | return; | |||
21159 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); | |||
21160 | Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::arm_clrex)); | |||
21161 | } | |||
21162 | ||||
21163 | Value *ARMTargetLowering::emitStoreConditional(IRBuilderBase &Builder, | |||
21164 | Value *Val, Value *Addr, | |||
21165 | AtomicOrdering Ord) const { | |||
21166 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); | |||
21167 | bool IsRelease = isReleaseOrStronger(Ord); | |||
21168 | ||||
21169 | // Since the intrinsics must have legal type, the i64 intrinsics take two | |||
21170 | // parameters: "i32, i32". We must marshal Val into the appropriate form | |||
21171 | // before the call. | |||
21172 | if (Val->getType()->getPrimitiveSizeInBits() == 64) { | |||
21173 | Intrinsic::ID Int = | |||
21174 | IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd; | |||
21175 | Function *Strex = Intrinsic::getDeclaration(M, Int); | |||
21176 | Type *Int32Ty = Type::getInt32Ty(M->getContext()); | |||
21177 | ||||
21178 | Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo"); | |||
21179 | Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi"); | |||
21180 | if (!Subtarget->isLittle()) | |||
21181 | std::swap(Lo, Hi); | |||
21182 | Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); | |||
21183 | return Builder.CreateCall(Strex, {Lo, Hi, Addr}); | |||
21184 | } | |||
21185 | ||||
21186 | Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex; | |||
21187 | Type *Tys[] = { Addr->getType() }; | |||
21188 | Function *Strex = Intrinsic::getDeclaration(M, Int, Tys); | |||
21189 | ||||
21190 | CallInst *CI = Builder.CreateCall( | |||
21191 | Strex, {Builder.CreateZExtOrBitCast( | |||
21192 | Val, Strex->getFunctionType()->getParamType(0)), | |||
21193 | Addr}); | |||
21194 | CI->addParamAttr(1, Attribute::get(M->getContext(), Attribute::ElementType, | |||
21195 | Val->getType())); | |||
21196 | return CI; | |||
21197 | } | |||
21198 | ||||
21199 | ||||
21200 | bool ARMTargetLowering::alignLoopsWithOptSize() const { | |||
21201 | return Subtarget->isMClass(); | |||
21202 | } | |||
21203 | ||||
21204 | /// A helper function for determining the number of interleaved accesses we | |||
21205 | /// will generate when lowering accesses of the given type. | |||
21206 | unsigned | |||
21207 | ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy, | |||
21208 | const DataLayout &DL) const { | |||
21209 | return (DL.getTypeSizeInBits(VecTy) + 127) / 128; | |||
21210 | } | |||
21211 | ||||
21212 | bool ARMTargetLowering::isLegalInterleavedAccessType( | |||
21213 | unsigned Factor, FixedVectorType *VecTy, Align Alignment, | |||
21214 | const DataLayout &DL) const { | |||
21215 | ||||
21216 | unsigned VecSize = DL.getTypeSizeInBits(VecTy); | |||
21217 | unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType()); | |||
21218 | ||||
21219 | if (!Subtarget->hasNEON() && !Subtarget->hasMVEIntegerOps()) | |||
21220 | return false; | |||
21221 | ||||
21222 | // Ensure the vector doesn't have f16 elements. Even though we could do an | |||
21223 | // i16 vldN, we can't hold the f16 vectors and will end up converting via | |||
21224 | // f32. | |||
21225 | if (Subtarget->hasNEON() && VecTy->getElementType()->isHalfTy()) | |||
21226 | return false; | |||
21227 | if (Subtarget->hasMVEIntegerOps() && Factor == 3) | |||
21228 | return false; | |||
21229 | ||||
21230 | // Ensure the number of vector elements is greater than 1. | |||
21231 | if (VecTy->getNumElements() < 2) | |||
21232 | return false; | |||
21233 | ||||
21234 | // Ensure the element type is legal. | |||
21235 | if (ElSize != 8 && ElSize != 16 && ElSize != 32) | |||
21236 | return false; | |||
21237 | // And the alignment if high enough under MVE. | |||
21238 | if (Subtarget->hasMVEIntegerOps() && Alignment < ElSize / 8) | |||
21239 | return false; | |||
21240 | ||||
21241 | // Ensure the total vector size is 64 or a multiple of 128. Types larger than | |||
21242 | // 128 will be split into multiple interleaved accesses. | |||
21243 | if (Subtarget->hasNEON() && VecSize == 64) | |||
21244 | return true; | |||
21245 | return VecSize % 128 == 0; | |||
21246 | } | |||
21247 | ||||
21248 | unsigned ARMTargetLowering::getMaxSupportedInterleaveFactor() const { | |||
21249 | if (Subtarget->hasNEON()) | |||
21250 | return 4; | |||
21251 | if (Subtarget->hasMVEIntegerOps()) | |||
21252 | return MVEMaxSupportedInterleaveFactor; | |||
21253 | return TargetLoweringBase::getMaxSupportedInterleaveFactor(); | |||
21254 | } | |||
21255 | ||||
21256 | /// Lower an interleaved load into a vldN intrinsic. | |||
21257 | /// | |||
21258 | /// E.g. Lower an interleaved load (Factor = 2): | |||
21259 | /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4 | |||
21260 | /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements | |||
21261 | /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements | |||
21262 | /// | |||
21263 | /// Into: | |||
21264 | /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4) | |||
21265 | /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0 | |||
21266 | /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1 | |||
21267 | bool ARMTargetLowering::lowerInterleavedLoad( | |||
21268 | LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles, | |||
21269 | ArrayRef<unsigned> Indices, unsigned Factor) const { | |||
21270 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21271, __extension__ __PRETTY_FUNCTION__)) | |||
21271 | "Invalid interleave factor")(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21271, __extension__ __PRETTY_FUNCTION__)); | |||
21272 | assert(!Shuffles.empty() && "Empty shufflevector input")(static_cast <bool> (!Shuffles.empty() && "Empty shufflevector input" ) ? void (0) : __assert_fail ("!Shuffles.empty() && \"Empty shufflevector input\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21272, __extension__ __PRETTY_FUNCTION__)); | |||
21273 | assert(Shuffles.size() == Indices.size() &&(static_cast <bool> (Shuffles.size() == Indices.size() && "Unmatched number of shufflevectors and indices") ? void (0) : __assert_fail ("Shuffles.size() == Indices.size() && \"Unmatched number of shufflevectors and indices\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21274, __extension__ __PRETTY_FUNCTION__)) | |||
21274 | "Unmatched number of shufflevectors and indices")(static_cast <bool> (Shuffles.size() == Indices.size() && "Unmatched number of shufflevectors and indices") ? void (0) : __assert_fail ("Shuffles.size() == Indices.size() && \"Unmatched number of shufflevectors and indices\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21274, __extension__ __PRETTY_FUNCTION__)); | |||
21275 | ||||
21276 | auto *VecTy = cast<FixedVectorType>(Shuffles[0]->getType()); | |||
21277 | Type *EltTy = VecTy->getElementType(); | |||
21278 | ||||
21279 | const DataLayout &DL = LI->getModule()->getDataLayout(); | |||
21280 | Align Alignment = LI->getAlign(); | |||
21281 | ||||
21282 | // Skip if we do not have NEON and skip illegal vector types. We can | |||
21283 | // "legalize" wide vector types into multiple interleaved accesses as long as | |||
21284 | // the vector types are divisible by 128. | |||
21285 | if (!isLegalInterleavedAccessType(Factor, VecTy, Alignment, DL)) | |||
21286 | return false; | |||
21287 | ||||
21288 | unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL); | |||
21289 | ||||
21290 | // A pointer vector can not be the return type of the ldN intrinsics. Need to | |||
21291 | // load integer vectors first and then convert to pointer vectors. | |||
21292 | if (EltTy->isPointerTy()) | |||
21293 | VecTy = FixedVectorType::get(DL.getIntPtrType(EltTy), VecTy); | |||
21294 | ||||
21295 | IRBuilder<> Builder(LI); | |||
21296 | ||||
21297 | // The base address of the load. | |||
21298 | Value *BaseAddr = LI->getPointerOperand(); | |||
21299 | ||||
21300 | if (NumLoads > 1) { | |||
21301 | // If we're going to generate more than one load, reset the sub-vector type | |||
21302 | // to something legal. | |||
21303 | VecTy = FixedVectorType::get(VecTy->getElementType(), | |||
21304 | VecTy->getNumElements() / NumLoads); | |||
21305 | ||||
21306 | // We will compute the pointer operand of each load from the original base | |||
21307 | // address using GEPs. Cast the base address to a pointer to the scalar | |||
21308 | // element type. | |||
21309 | BaseAddr = Builder.CreateBitCast( | |||
21310 | BaseAddr, | |||
21311 | VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace())); | |||
21312 | } | |||
21313 | ||||
21314 | assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!")(static_cast <bool> (isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!") ? void (0) : __assert_fail ("isTypeLegal(EVT::getEVT(VecTy)) && \"Illegal vldN vector type!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21314, __extension__ __PRETTY_FUNCTION__)); | |||
21315 | ||||
21316 | auto createLoadIntrinsic = [&](Value *BaseAddr) { | |||
21317 | if (Subtarget->hasNEON()) { | |||
21318 | Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace()); | |||
21319 | Type *Tys[] = {VecTy, Int8Ptr}; | |||
21320 | static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2, | |||
21321 | Intrinsic::arm_neon_vld3, | |||
21322 | Intrinsic::arm_neon_vld4}; | |||
21323 | Function *VldnFunc = | |||
21324 | Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys); | |||
21325 | ||||
21326 | SmallVector<Value *, 2> Ops; | |||
21327 | Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr)); | |||
21328 | Ops.push_back(Builder.getInt32(LI->getAlignment())); | |||
21329 | ||||
21330 | return Builder.CreateCall(VldnFunc, Ops, "vldN"); | |||
21331 | } else { | |||
21332 | assert((Factor == 2 || Factor == 4) &&(static_cast <bool> ((Factor == 2 || Factor == 4) && "expected interleave factor of 2 or 4 for MVE") ? void (0) : __assert_fail ("(Factor == 2 || Factor == 4) && \"expected interleave factor of 2 or 4 for MVE\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21333, __extension__ __PRETTY_FUNCTION__)) | |||
21333 | "expected interleave factor of 2 or 4 for MVE")(static_cast <bool> ((Factor == 2 || Factor == 4) && "expected interleave factor of 2 or 4 for MVE") ? void (0) : __assert_fail ("(Factor == 2 || Factor == 4) && \"expected interleave factor of 2 or 4 for MVE\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21333, __extension__ __PRETTY_FUNCTION__)); | |||
21334 | Intrinsic::ID LoadInts = | |||
21335 | Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q; | |||
21336 | Type *VecEltTy = | |||
21337 | VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace()); | |||
21338 | Type *Tys[] = {VecTy, VecEltTy}; | |||
21339 | Function *VldnFunc = | |||
21340 | Intrinsic::getDeclaration(LI->getModule(), LoadInts, Tys); | |||
21341 | ||||
21342 | SmallVector<Value *, 2> Ops; | |||
21343 | Ops.push_back(Builder.CreateBitCast(BaseAddr, VecEltTy)); | |||
21344 | return Builder.CreateCall(VldnFunc, Ops, "vldN"); | |||
21345 | } | |||
21346 | }; | |||
21347 | ||||
21348 | // Holds sub-vectors extracted from the load intrinsic return values. The | |||
21349 | // sub-vectors are associated with the shufflevector instructions they will | |||
21350 | // replace. | |||
21351 | DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs; | |||
21352 | ||||
21353 | for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) { | |||
21354 | // If we're generating more than one load, compute the base address of | |||
21355 | // subsequent loads as an offset from the previous. | |||
21356 | if (LoadCount > 0) | |||
21357 | BaseAddr = Builder.CreateConstGEP1_32(VecTy->getElementType(), BaseAddr, | |||
21358 | VecTy->getNumElements() * Factor); | |||
21359 | ||||
21360 | CallInst *VldN = createLoadIntrinsic(BaseAddr); | |||
21361 | ||||
21362 | // Replace uses of each shufflevector with the corresponding vector loaded | |||
21363 | // by ldN. | |||
21364 | for (unsigned i = 0; i < Shuffles.size(); i++) { | |||
21365 | ShuffleVectorInst *SV = Shuffles[i]; | |||
21366 | unsigned Index = Indices[i]; | |||
21367 | ||||
21368 | Value *SubVec = Builder.CreateExtractValue(VldN, Index); | |||
21369 | ||||
21370 | // Convert the integer vector to pointer vector if the element is pointer. | |||
21371 | if (EltTy->isPointerTy()) | |||
21372 | SubVec = Builder.CreateIntToPtr( | |||
21373 | SubVec, | |||
21374 | FixedVectorType::get(SV->getType()->getElementType(), VecTy)); | |||
21375 | ||||
21376 | SubVecs[SV].push_back(SubVec); | |||
21377 | } | |||
21378 | } | |||
21379 | ||||
21380 | // Replace uses of the shufflevector instructions with the sub-vectors | |||
21381 | // returned by the load intrinsic. If a shufflevector instruction is | |||
21382 | // associated with more than one sub-vector, those sub-vectors will be | |||
21383 | // concatenated into a single wide vector. | |||
21384 | for (ShuffleVectorInst *SVI : Shuffles) { | |||
21385 | auto &SubVec = SubVecs[SVI]; | |||
21386 | auto *WideVec = | |||
21387 | SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0]; | |||
21388 | SVI->replaceAllUsesWith(WideVec); | |||
21389 | } | |||
21390 | ||||
21391 | return true; | |||
21392 | } | |||
21393 | ||||
21394 | /// Lower an interleaved store into a vstN intrinsic. | |||
21395 | /// | |||
21396 | /// E.g. Lower an interleaved store (Factor = 3): | |||
21397 | /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, | |||
21398 | /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> | |||
21399 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4 | |||
21400 | /// | |||
21401 | /// Into: | |||
21402 | /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> | |||
21403 | /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> | |||
21404 | /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> | |||
21405 | /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) | |||
21406 | /// | |||
21407 | /// Note that the new shufflevectors will be removed and we'll only generate one | |||
21408 | /// vst3 instruction in CodeGen. | |||
21409 | /// | |||
21410 | /// Example for a more general valid mask (Factor 3). Lower: | |||
21411 | /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1, | |||
21412 | /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19> | |||
21413 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr | |||
21414 | /// | |||
21415 | /// Into: | |||
21416 | /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7> | |||
21417 | /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35> | |||
21418 | /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19> | |||
21419 | /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) | |||
21420 | bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI, | |||
21421 | ShuffleVectorInst *SVI, | |||
21422 | unsigned Factor) const { | |||
21423 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21424, __extension__ __PRETTY_FUNCTION__)) | |||
21424 | "Invalid interleave factor")(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21424, __extension__ __PRETTY_FUNCTION__)); | |||
21425 | ||||
21426 | auto *VecTy = cast<FixedVectorType>(SVI->getType()); | |||
21427 | assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store")(static_cast <bool> (VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store") ? void (0) : __assert_fail ("VecTy->getNumElements() % Factor == 0 && \"Invalid interleaved store\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21427, __extension__ __PRETTY_FUNCTION__)); | |||
21428 | ||||
21429 | unsigned LaneLen = VecTy->getNumElements() / Factor; | |||
21430 | Type *EltTy = VecTy->getElementType(); | |||
21431 | auto *SubVecTy = FixedVectorType::get(EltTy, LaneLen); | |||
21432 | ||||
21433 | const DataLayout &DL = SI->getModule()->getDataLayout(); | |||
21434 | Align Alignment = SI->getAlign(); | |||
21435 | ||||
21436 | // Skip if we do not have NEON and skip illegal vector types. We can | |||
21437 | // "legalize" wide vector types into multiple interleaved accesses as long as | |||
21438 | // the vector types are divisible by 128. | |||
21439 | if (!isLegalInterleavedAccessType(Factor, SubVecTy, Alignment, DL)) | |||
21440 | return false; | |||
21441 | ||||
21442 | unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL); | |||
21443 | ||||
21444 | Value *Op0 = SVI->getOperand(0); | |||
21445 | Value *Op1 = SVI->getOperand(1); | |||
21446 | IRBuilder<> Builder(SI); | |||
21447 | ||||
21448 | // StN intrinsics don't support pointer vectors as arguments. Convert pointer | |||
21449 | // vectors to integer vectors. | |||
21450 | if (EltTy->isPointerTy()) { | |||
21451 | Type *IntTy = DL.getIntPtrType(EltTy); | |||
21452 | ||||
21453 | // Convert to the corresponding integer vector. | |||
21454 | auto *IntVecTy = | |||
21455 | FixedVectorType::get(IntTy, cast<FixedVectorType>(Op0->getType())); | |||
21456 | Op0 = Builder.CreatePtrToInt(Op0, IntVecTy); | |||
21457 | Op1 = Builder.CreatePtrToInt(Op1, IntVecTy); | |||
21458 | ||||
21459 | SubVecTy = FixedVectorType::get(IntTy, LaneLen); | |||
21460 | } | |||
21461 | ||||
21462 | // The base address of the store. | |||
21463 | Value *BaseAddr = SI->getPointerOperand(); | |||
21464 | ||||
21465 | if (NumStores > 1) { | |||
21466 | // If we're going to generate more than one store, reset the lane length | |||
21467 | // and sub-vector type to something legal. | |||
21468 | LaneLen /= NumStores; | |||
21469 | SubVecTy = FixedVectorType::get(SubVecTy->getElementType(), LaneLen); | |||
21470 | ||||
21471 | // We will compute the pointer operand of each store from the original base | |||
21472 | // address using GEPs. Cast the base address to a pointer to the scalar | |||
21473 | // element type. | |||
21474 | BaseAddr = Builder.CreateBitCast( | |||
21475 | BaseAddr, | |||
21476 | SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace())); | |||
21477 | } | |||
21478 | ||||
21479 | assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!")(static_cast <bool> (isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!") ? void (0) : __assert_fail ("isTypeLegal(EVT::getEVT(SubVecTy)) && \"Illegal vstN vector type!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21479, __extension__ __PRETTY_FUNCTION__)); | |||
21480 | ||||
21481 | auto Mask = SVI->getShuffleMask(); | |||
21482 | ||||
21483 | auto createStoreIntrinsic = [&](Value *BaseAddr, | |||
21484 | SmallVectorImpl<Value *> &Shuffles) { | |||
21485 | if (Subtarget->hasNEON()) { | |||
21486 | static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2, | |||
21487 | Intrinsic::arm_neon_vst3, | |||
21488 | Intrinsic::arm_neon_vst4}; | |||
21489 | Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace()); | |||
21490 | Type *Tys[] = {Int8Ptr, SubVecTy}; | |||
21491 | ||||
21492 | Function *VstNFunc = Intrinsic::getDeclaration( | |||
21493 | SI->getModule(), StoreInts[Factor - 2], Tys); | |||
21494 | ||||
21495 | SmallVector<Value *, 6> Ops; | |||
21496 | Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr)); | |||
21497 | append_range(Ops, Shuffles); | |||
21498 | Ops.push_back(Builder.getInt32(SI->getAlignment())); | |||
21499 | Builder.CreateCall(VstNFunc, Ops); | |||
21500 | } else { | |||
21501 | assert((Factor == 2 || Factor == 4) &&(static_cast <bool> ((Factor == 2 || Factor == 4) && "expected interleave factor of 2 or 4 for MVE") ? void (0) : __assert_fail ("(Factor == 2 || Factor == 4) && \"expected interleave factor of 2 or 4 for MVE\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21502, __extension__ __PRETTY_FUNCTION__)) | |||
21502 | "expected interleave factor of 2 or 4 for MVE")(static_cast <bool> ((Factor == 2 || Factor == 4) && "expected interleave factor of 2 or 4 for MVE") ? void (0) : __assert_fail ("(Factor == 2 || Factor == 4) && \"expected interleave factor of 2 or 4 for MVE\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21502, __extension__ __PRETTY_FUNCTION__)); | |||
21503 | Intrinsic::ID StoreInts = | |||
21504 | Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q; | |||
21505 | Type *EltPtrTy = SubVecTy->getElementType()->getPointerTo( | |||
21506 | SI->getPointerAddressSpace()); | |||
21507 | Type *Tys[] = {EltPtrTy, SubVecTy}; | |||
21508 | Function *VstNFunc = | |||
21509 | Intrinsic::getDeclaration(SI->getModule(), StoreInts, Tys); | |||
21510 | ||||
21511 | SmallVector<Value *, 6> Ops; | |||
21512 | Ops.push_back(Builder.CreateBitCast(BaseAddr, EltPtrTy)); | |||
21513 | append_range(Ops, Shuffles); | |||
21514 | for (unsigned F = 0; F < Factor; F++) { | |||
21515 | Ops.push_back(Builder.getInt32(F)); | |||
21516 | Builder.CreateCall(VstNFunc, Ops); | |||
21517 | Ops.pop_back(); | |||
21518 | } | |||
21519 | } | |||
21520 | }; | |||
21521 | ||||
21522 | for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) { | |||
21523 | // If we generating more than one store, we compute the base address of | |||
21524 | // subsequent stores as an offset from the previous. | |||
21525 | if (StoreCount > 0) | |||
21526 | BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(), | |||
21527 | BaseAddr, LaneLen * Factor); | |||
21528 | ||||
21529 | SmallVector<Value *, 4> Shuffles; | |||
21530 | ||||
21531 | // Split the shufflevector operands into sub vectors for the new vstN call. | |||
21532 | for (unsigned i = 0; i < Factor; i++) { | |||
21533 | unsigned IdxI = StoreCount * LaneLen * Factor + i; | |||
21534 | if (Mask[IdxI] >= 0) { | |||
21535 | Shuffles.push_back(Builder.CreateShuffleVector( | |||
21536 | Op0, Op1, createSequentialMask(Mask[IdxI], LaneLen, 0))); | |||
21537 | } else { | |||
21538 | unsigned StartMask = 0; | |||
21539 | for (unsigned j = 1; j < LaneLen; j++) { | |||
21540 | unsigned IdxJ = StoreCount * LaneLen * Factor + j; | |||
21541 | if (Mask[IdxJ * Factor + IdxI] >= 0) { | |||
21542 | StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ; | |||
21543 | break; | |||
21544 | } | |||
21545 | } | |||
21546 | // Note: If all elements in a chunk are undefs, StartMask=0! | |||
21547 | // Note: Filling undef gaps with random elements is ok, since | |||
21548 | // those elements were being written anyway (with undefs). | |||
21549 | // In the case of all undefs we're defaulting to using elems from 0 | |||
21550 | // Note: StartMask cannot be negative, it's checked in | |||
21551 | // isReInterleaveMask | |||
21552 | Shuffles.push_back(Builder.CreateShuffleVector( | |||
21553 | Op0, Op1, createSequentialMask(StartMask, LaneLen, 0))); | |||
21554 | } | |||
21555 | } | |||
21556 | ||||
21557 | createStoreIntrinsic(BaseAddr, Shuffles); | |||
21558 | } | |||
21559 | return true; | |||
21560 | } | |||
21561 | ||||
21562 | enum HABaseType { | |||
21563 | HA_UNKNOWN = 0, | |||
21564 | HA_FLOAT, | |||
21565 | HA_DOUBLE, | |||
21566 | HA_VECT64, | |||
21567 | HA_VECT128 | |||
21568 | }; | |||
21569 | ||||
21570 | static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, | |||
21571 | uint64_t &Members) { | |||
21572 | if (auto *ST = dyn_cast<StructType>(Ty)) { | |||
21573 | for (unsigned i = 0; i < ST->getNumElements(); ++i) { | |||
21574 | uint64_t SubMembers = 0; | |||
21575 | if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers)) | |||
21576 | return false; | |||
21577 | Members += SubMembers; | |||
21578 | } | |||
21579 | } else if (auto *AT = dyn_cast<ArrayType>(Ty)) { | |||
21580 | uint64_t SubMembers = 0; | |||
21581 | if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers)) | |||
21582 | return false; | |||
21583 | Members += SubMembers * AT->getNumElements(); | |||
21584 | } else if (Ty->isFloatTy()) { | |||
21585 | if (Base != HA_UNKNOWN && Base != HA_FLOAT) | |||
21586 | return false; | |||
21587 | Members = 1; | |||
21588 | Base = HA_FLOAT; | |||
21589 | } else if (Ty->isDoubleTy()) { | |||
21590 | if (Base != HA_UNKNOWN && Base != HA_DOUBLE) | |||
21591 | return false; | |||
21592 | Members = 1; | |||
21593 | Base = HA_DOUBLE; | |||
21594 | } else if (auto *VT = dyn_cast<VectorType>(Ty)) { | |||
21595 | Members = 1; | |||
21596 | switch (Base) { | |||
21597 | case HA_FLOAT: | |||
21598 | case HA_DOUBLE: | |||
21599 | return false; | |||
21600 | case HA_VECT64: | |||
21601 | return VT->getPrimitiveSizeInBits().getFixedSize() == 64; | |||
21602 | case HA_VECT128: | |||
21603 | return VT->getPrimitiveSizeInBits().getFixedSize() == 128; | |||
21604 | case HA_UNKNOWN: | |||
21605 | switch (VT->getPrimitiveSizeInBits().getFixedSize()) { | |||
21606 | case 64: | |||
21607 | Base = HA_VECT64; | |||
21608 | return true; | |||
21609 | case 128: | |||
21610 | Base = HA_VECT128; | |||
21611 | return true; | |||
21612 | default: | |||
21613 | return false; | |||
21614 | } | |||
21615 | } | |||
21616 | } | |||
21617 | ||||
21618 | return (Members > 0 && Members <= 4); | |||
21619 | } | |||
21620 | ||||
21621 | /// Return the correct alignment for the current calling convention. | |||
21622 | Align ARMTargetLowering::getABIAlignmentForCallingConv( | |||
21623 | Type *ArgTy, const DataLayout &DL) const { | |||
21624 | const Align ABITypeAlign = DL.getABITypeAlign(ArgTy); | |||
21625 | if (!ArgTy->isVectorTy()) | |||
21626 | return ABITypeAlign; | |||
21627 | ||||
21628 | // Avoid over-aligning vector parameters. It would require realigning the | |||
21629 | // stack and waste space for no real benefit. | |||
21630 | return std::min(ABITypeAlign, DL.getStackAlignment()); | |||
21631 | } | |||
21632 | ||||
21633 | /// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of | |||
21634 | /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when | |||
21635 | /// passing according to AAPCS rules. | |||
21636 | bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters( | |||
21637 | Type *Ty, CallingConv::ID CallConv, bool isVarArg, | |||
21638 | const DataLayout &DL) const { | |||
21639 | if (getEffectiveCallingConv(CallConv, isVarArg) != | |||
21640 | CallingConv::ARM_AAPCS_VFP) | |||
21641 | return false; | |||
21642 | ||||
21643 | HABaseType Base = HA_UNKNOWN; | |||
21644 | uint64_t Members = 0; | |||
21645 | bool IsHA = isHomogeneousAggregate(Ty, Base, Members); | |||
21646 | LLVM_DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("arm-isel")) { dbgs() << "isHA: " << IsHA << " "; Ty->dump(); } } while (false); | |||
21647 | ||||
21648 | bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy(); | |||
21649 | return IsHA || IsIntArray; | |||
21650 | } | |||
21651 | ||||
21652 | Register ARMTargetLowering::getExceptionPointerRegister( | |||
21653 | const Constant *PersonalityFn) const { | |||
21654 | // Platforms which do not use SjLj EH may return values in these registers | |||
21655 | // via the personality function. | |||
21656 | return Subtarget->useSjLjEH() ? Register() : ARM::R0; | |||
21657 | } | |||
21658 | ||||
21659 | Register ARMTargetLowering::getExceptionSelectorRegister( | |||
21660 | const Constant *PersonalityFn) const { | |||
21661 | // Platforms which do not use SjLj EH may return values in these registers | |||
21662 | // via the personality function. | |||
21663 | return Subtarget->useSjLjEH() ? Register() : ARM::R1; | |||
21664 | } | |||
21665 | ||||
21666 | void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { | |||
21667 | // Update IsSplitCSR in ARMFunctionInfo. | |||
21668 | ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>(); | |||
21669 | AFI->setIsSplitCSR(true); | |||
21670 | } | |||
21671 | ||||
21672 | void ARMTargetLowering::insertCopiesSplitCSR( | |||
21673 | MachineBasicBlock *Entry, | |||
21674 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const { | |||
21675 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); | |||
21676 | const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); | |||
21677 | if (!IStart) | |||
21678 | return; | |||
21679 | ||||
21680 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | |||
21681 | MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); | |||
21682 | MachineBasicBlock::iterator MBBI = Entry->begin(); | |||
21683 | for (const MCPhysReg *I = IStart; *I; ++I) { | |||
21684 | const TargetRegisterClass *RC = nullptr; | |||
21685 | if (ARM::GPRRegClass.contains(*I)) | |||
21686 | RC = &ARM::GPRRegClass; | |||
21687 | else if (ARM::DPRRegClass.contains(*I)) | |||
21688 | RC = &ARM::DPRRegClass; | |||
21689 | else | |||
21690 | llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21690); | |||
21691 | ||||
21692 | Register NewVR = MRI->createVirtualRegister(RC); | |||
21693 | // Create copy from CSR to a virtual register. | |||
21694 | // FIXME: this currently does not emit CFI pseudo-instructions, it works | |||
21695 | // fine for CXX_FAST_TLS since the C++-style TLS access functions should be | |||
21696 | // nounwind. If we want to generalize this later, we may need to emit | |||
21697 | // CFI pseudo-instructions. | |||
21698 | assert(Entry->getParent()->getFunction().hasFnAttribute((static_cast <bool> (Entry->getParent()->getFunction ().hasFnAttribute( Attribute::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!" ) ? void (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21700, __extension__ __PRETTY_FUNCTION__)) | |||
21699 | Attribute::NoUnwind) &&(static_cast <bool> (Entry->getParent()->getFunction ().hasFnAttribute( Attribute::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!" ) ? void (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21700, __extension__ __PRETTY_FUNCTION__)) | |||
21700 | "Function should be nounwind in insertCopiesSplitCSR!")(static_cast <bool> (Entry->getParent()->getFunction ().hasFnAttribute( Attribute::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!" ) ? void (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\"" , "llvm/lib/Target/ARM/ARMISelLowering.cpp", 21700, __extension__ __PRETTY_FUNCTION__)); | |||
21701 | Entry->addLiveIn(*I); | |||
21702 | BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) | |||
21703 | .addReg(*I); | |||
21704 | ||||
21705 | // Insert the copy-back instructions right before the terminator. | |||
21706 | for (auto *Exit : Exits) | |||
21707 | BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), | |||
21708 | TII->get(TargetOpcode::COPY), *I) | |||
21709 | .addReg(NewVR); | |||
21710 | } | |||
21711 | } | |||
21712 | ||||
21713 | void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const { | |||
21714 | MF.getFrameInfo().computeMaxCallFrameSize(MF); | |||
21715 | TargetLoweringBase::finalizeLowering(MF); | |||
21716 | } |