File: | lib/Target/ARM/ARMISelLowering.cpp |
Warning: | line 245, column 20 Excessive padding in 'struct (anonymous at /build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp:245:20)' (8 padding bytes, where 0 is optimal). Optimal fields order: Name, Op, Cond, consider reordering the fields or adding explicit padding members |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===// |
2 | // |
3 | // The LLVM Compiler Infrastructure |
4 | // |
5 | // This file is distributed under the University of Illinois Open Source |
6 | // License. See LICENSE.TXT for details. |
7 | // |
8 | //===----------------------------------------------------------------------===// |
9 | // |
10 | // This file defines the interfaces that ARM uses to lower LLVM code into a |
11 | // selection DAG. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #include "ARMISelLowering.h" |
16 | #include "ARMBaseInstrInfo.h" |
17 | #include "ARMBaseRegisterInfo.h" |
18 | #include "ARMCallingConv.h" |
19 | #include "ARMConstantPoolValue.h" |
20 | #include "ARMMachineFunctionInfo.h" |
21 | #include "ARMPerfectShuffle.h" |
22 | #include "ARMRegisterInfo.h" |
23 | #include "ARMSelectionDAGInfo.h" |
24 | #include "ARMSubtarget.h" |
25 | #include "MCTargetDesc/ARMAddressingModes.h" |
26 | #include "MCTargetDesc/ARMBaseInfo.h" |
27 | #include "Utils/ARMBaseInfo.h" |
28 | #include "llvm/ADT/APFloat.h" |
29 | #include "llvm/ADT/APInt.h" |
30 | #include "llvm/ADT/ArrayRef.h" |
31 | #include "llvm/ADT/BitVector.h" |
32 | #include "llvm/ADT/DenseMap.h" |
33 | #include "llvm/ADT/STLExtras.h" |
34 | #include "llvm/ADT/SmallPtrSet.h" |
35 | #include "llvm/ADT/SmallVector.h" |
36 | #include "llvm/ADT/Statistic.h" |
37 | #include "llvm/ADT/StringExtras.h" |
38 | #include "llvm/ADT/StringRef.h" |
39 | #include "llvm/ADT/StringSwitch.h" |
40 | #include "llvm/ADT/Triple.h" |
41 | #include "llvm/ADT/Twine.h" |
42 | #include "llvm/Analysis/VectorUtils.h" |
43 | #include "llvm/CodeGen/CallingConvLower.h" |
44 | #include "llvm/CodeGen/ISDOpcodes.h" |
45 | #include "llvm/CodeGen/IntrinsicLowering.h" |
46 | #include "llvm/CodeGen/MachineBasicBlock.h" |
47 | #include "llvm/CodeGen/MachineConstantPool.h" |
48 | #include "llvm/CodeGen/MachineFrameInfo.h" |
49 | #include "llvm/CodeGen/MachineFunction.h" |
50 | #include "llvm/CodeGen/MachineInstr.h" |
51 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
52 | #include "llvm/CodeGen/MachineJumpTableInfo.h" |
53 | #include "llvm/CodeGen/MachineMemOperand.h" |
54 | #include "llvm/CodeGen/MachineOperand.h" |
55 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
56 | #include "llvm/CodeGen/RuntimeLibcalls.h" |
57 | #include "llvm/CodeGen/SelectionDAG.h" |
58 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
59 | #include "llvm/CodeGen/TargetInstrInfo.h" |
60 | #include "llvm/CodeGen/TargetLowering.h" |
61 | #include "llvm/CodeGen/TargetOpcodes.h" |
62 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
63 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
64 | #include "llvm/CodeGen/ValueTypes.h" |
65 | #include "llvm/IR/Attributes.h" |
66 | #include "llvm/IR/CallingConv.h" |
67 | #include "llvm/IR/Constant.h" |
68 | #include "llvm/IR/Constants.h" |
69 | #include "llvm/IR/DataLayout.h" |
70 | #include "llvm/IR/DebugLoc.h" |
71 | #include "llvm/IR/DerivedTypes.h" |
72 | #include "llvm/IR/Function.h" |
73 | #include "llvm/IR/GlobalAlias.h" |
74 | #include "llvm/IR/GlobalValue.h" |
75 | #include "llvm/IR/GlobalVariable.h" |
76 | #include "llvm/IR/IRBuilder.h" |
77 | #include "llvm/IR/InlineAsm.h" |
78 | #include "llvm/IR/Instruction.h" |
79 | #include "llvm/IR/Instructions.h" |
80 | #include "llvm/IR/IntrinsicInst.h" |
81 | #include "llvm/IR/Intrinsics.h" |
82 | #include "llvm/IR/Module.h" |
83 | #include "llvm/IR/Type.h" |
84 | #include "llvm/IR/User.h" |
85 | #include "llvm/IR/Value.h" |
86 | #include "llvm/MC/MCInstrDesc.h" |
87 | #include "llvm/MC/MCInstrItineraries.h" |
88 | #include "llvm/MC/MCRegisterInfo.h" |
89 | #include "llvm/MC/MCSchedule.h" |
90 | #include "llvm/Support/AtomicOrdering.h" |
91 | #include "llvm/Support/BranchProbability.h" |
92 | #include "llvm/Support/Casting.h" |
93 | #include "llvm/Support/CodeGen.h" |
94 | #include "llvm/Support/CommandLine.h" |
95 | #include "llvm/Support/Compiler.h" |
96 | #include "llvm/Support/Debug.h" |
97 | #include "llvm/Support/ErrorHandling.h" |
98 | #include "llvm/Support/KnownBits.h" |
99 | #include "llvm/Support/MachineValueType.h" |
100 | #include "llvm/Support/MathExtras.h" |
101 | #include "llvm/Support/raw_ostream.h" |
102 | #include "llvm/Target/TargetMachine.h" |
103 | #include "llvm/Target/TargetOptions.h" |
104 | #include <algorithm> |
105 | #include <cassert> |
106 | #include <cstdint> |
107 | #include <cstdlib> |
108 | #include <iterator> |
109 | #include <limits> |
110 | #include <string> |
111 | #include <tuple> |
112 | #include <utility> |
113 | #include <vector> |
114 | |
115 | using namespace llvm; |
116 | |
117 | #define DEBUG_TYPE"arm-isel" "arm-isel" |
118 | |
119 | STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"arm-isel", "NumTailCalls" , "Number of tail calls", {0}, {false}}; |
120 | STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt")static llvm::Statistic NumMovwMovt = {"arm-isel", "NumMovwMovt" , "Number of GAs materialized with movw + movt", {0}, {false} }; |
121 | STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments")static llvm::Statistic NumLoopByVals = {"arm-isel", "NumLoopByVals" , "Number of loops generated for byval arguments", {0}, {false }}; |
122 | STATISTIC(NumConstpoolPromoted,static llvm::Statistic NumConstpoolPromoted = {"arm-isel", "NumConstpoolPromoted" , "Number of constants with their storage promoted into constant pools" , {0}, {false}} |
123 | "Number of constants with their storage promoted into constant pools")static llvm::Statistic NumConstpoolPromoted = {"arm-isel", "NumConstpoolPromoted" , "Number of constants with their storage promoted into constant pools" , {0}, {false}}; |
124 | |
125 | static cl::opt<bool> |
126 | ARMInterworking("arm-interworking", cl::Hidden, |
127 | cl::desc("Enable / disable ARM interworking (for debugging only)"), |
128 | cl::init(true)); |
129 | |
130 | static cl::opt<bool> EnableConstpoolPromotion( |
131 | "arm-promote-constant", cl::Hidden, |
132 | cl::desc("Enable / disable promotion of unnamed_addr constants into " |
133 | "constant pools"), |
134 | cl::init(false)); // FIXME: set to true by default once PR32780 is fixed |
135 | static cl::opt<unsigned> ConstpoolPromotionMaxSize( |
136 | "arm-promote-constant-max-size", cl::Hidden, |
137 | cl::desc("Maximum size of constant to promote into a constant pool"), |
138 | cl::init(64)); |
139 | static cl::opt<unsigned> ConstpoolPromotionMaxTotal( |
140 | "arm-promote-constant-max-total", cl::Hidden, |
141 | cl::desc("Maximum size of ALL constants to promote into a constant pool"), |
142 | cl::init(128)); |
143 | |
144 | // The APCS parameter registers. |
145 | static const MCPhysReg GPRArgRegs[] = { |
146 | ARM::R0, ARM::R1, ARM::R2, ARM::R3 |
147 | }; |
148 | |
149 | void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT, |
150 | MVT PromotedBitwiseVT) { |
151 | if (VT != PromotedLdStVT) { |
152 | setOperationAction(ISD::LOAD, VT, Promote); |
153 | AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT); |
154 | |
155 | setOperationAction(ISD::STORE, VT, Promote); |
156 | AddPromotedToType (ISD::STORE, VT, PromotedLdStVT); |
157 | } |
158 | |
159 | MVT ElemTy = VT.getVectorElementType(); |
160 | if (ElemTy != MVT::f64) |
161 | setOperationAction(ISD::SETCC, VT, Custom); |
162 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); |
163 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); |
164 | if (ElemTy == MVT::i32) { |
165 | setOperationAction(ISD::SINT_TO_FP, VT, Custom); |
166 | setOperationAction(ISD::UINT_TO_FP, VT, Custom); |
167 | setOperationAction(ISD::FP_TO_SINT, VT, Custom); |
168 | setOperationAction(ISD::FP_TO_UINT, VT, Custom); |
169 | } else { |
170 | setOperationAction(ISD::SINT_TO_FP, VT, Expand); |
171 | setOperationAction(ISD::UINT_TO_FP, VT, Expand); |
172 | setOperationAction(ISD::FP_TO_SINT, VT, Expand); |
173 | setOperationAction(ISD::FP_TO_UINT, VT, Expand); |
174 | } |
175 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); |
176 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); |
177 | setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); |
178 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); |
179 | setOperationAction(ISD::SELECT, VT, Expand); |
180 | setOperationAction(ISD::SELECT_CC, VT, Expand); |
181 | setOperationAction(ISD::VSELECT, VT, Expand); |
182 | setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); |
183 | if (VT.isInteger()) { |
184 | setOperationAction(ISD::SHL, VT, Custom); |
185 | setOperationAction(ISD::SRA, VT, Custom); |
186 | setOperationAction(ISD::SRL, VT, Custom); |
187 | } |
188 | |
189 | // Promote all bit-wise operations. |
190 | if (VT.isInteger() && VT != PromotedBitwiseVT) { |
191 | setOperationAction(ISD::AND, VT, Promote); |
192 | AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT); |
193 | setOperationAction(ISD::OR, VT, Promote); |
194 | AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT); |
195 | setOperationAction(ISD::XOR, VT, Promote); |
196 | AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT); |
197 | } |
198 | |
199 | // Neon does not support vector divide/remainder operations. |
200 | setOperationAction(ISD::SDIV, VT, Expand); |
201 | setOperationAction(ISD::UDIV, VT, Expand); |
202 | setOperationAction(ISD::FDIV, VT, Expand); |
203 | setOperationAction(ISD::SREM, VT, Expand); |
204 | setOperationAction(ISD::UREM, VT, Expand); |
205 | setOperationAction(ISD::FREM, VT, Expand); |
206 | |
207 | if (!VT.isFloatingPoint() && |
208 | VT != MVT::v2i64 && VT != MVT::v1i64) |
209 | for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) |
210 | setOperationAction(Opcode, VT, Legal); |
211 | } |
212 | |
213 | void ARMTargetLowering::addDRTypeForNEON(MVT VT) { |
214 | addRegisterClass(VT, &ARM::DPRRegClass); |
215 | addTypeForNEON(VT, MVT::f64, MVT::v2i32); |
216 | } |
217 | |
218 | void ARMTargetLowering::addQRTypeForNEON(MVT VT) { |
219 | addRegisterClass(VT, &ARM::DPairRegClass); |
220 | addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); |
221 | } |
222 | |
223 | ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, |
224 | const ARMSubtarget &STI) |
225 | : TargetLowering(TM), Subtarget(&STI) { |
226 | RegInfo = Subtarget->getRegisterInfo(); |
227 | Itins = Subtarget->getInstrItineraryData(); |
228 | |
229 | setBooleanContents(ZeroOrOneBooleanContent); |
230 | setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); |
231 | |
232 | if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetIOS() && |
233 | !Subtarget->isTargetWatchOS()) { |
234 | bool IsHFTarget = TM.Options.FloatABIType == FloatABI::Hard; |
235 | for (int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID) |
236 | setLibcallCallingConv(static_cast<RTLIB::Libcall>(LCID), |
237 | IsHFTarget ? CallingConv::ARM_AAPCS_VFP |
238 | : CallingConv::ARM_AAPCS); |
239 | } |
240 | |
241 | if (Subtarget->isTargetMachO()) { |
242 | // Uses VFP for Thumb libfuncs if available. |
243 | if (Subtarget->isThumb() && Subtarget->hasVFP2() && |
244 | Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) { |
245 | static const struct { |
Excessive padding in 'struct (anonymous at /build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp:245:20)' (8 padding bytes, where 0 is optimal).
Optimal fields order:
Name,
Op,
Cond,
consider reordering the fields or adding explicit padding members | |
246 | const RTLIB::Libcall Op; |
247 | const char * const Name; |
248 | const ISD::CondCode Cond; |
249 | } LibraryCalls[] = { |
250 | // Single-precision floating-point arithmetic. |
251 | { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID }, |
252 | { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID }, |
253 | { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID }, |
254 | { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID }, |
255 | |
256 | // Double-precision floating-point arithmetic. |
257 | { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID }, |
258 | { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID }, |
259 | { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID }, |
260 | { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID }, |
261 | |
262 | // Single-precision comparisons. |
263 | { RTLIB::OEQ_F32, "__eqsf2vfp", ISD::SETNE }, |
264 | { RTLIB::UNE_F32, "__nesf2vfp", ISD::SETNE }, |
265 | { RTLIB::OLT_F32, "__ltsf2vfp", ISD::SETNE }, |
266 | { RTLIB::OLE_F32, "__lesf2vfp", ISD::SETNE }, |
267 | { RTLIB::OGE_F32, "__gesf2vfp", ISD::SETNE }, |
268 | { RTLIB::OGT_F32, "__gtsf2vfp", ISD::SETNE }, |
269 | { RTLIB::UO_F32, "__unordsf2vfp", ISD::SETNE }, |
270 | { RTLIB::O_F32, "__unordsf2vfp", ISD::SETEQ }, |
271 | |
272 | // Double-precision comparisons. |
273 | { RTLIB::OEQ_F64, "__eqdf2vfp", ISD::SETNE }, |
274 | { RTLIB::UNE_F64, "__nedf2vfp", ISD::SETNE }, |
275 | { RTLIB::OLT_F64, "__ltdf2vfp", ISD::SETNE }, |
276 | { RTLIB::OLE_F64, "__ledf2vfp", ISD::SETNE }, |
277 | { RTLIB::OGE_F64, "__gedf2vfp", ISD::SETNE }, |
278 | { RTLIB::OGT_F64, "__gtdf2vfp", ISD::SETNE }, |
279 | { RTLIB::UO_F64, "__unorddf2vfp", ISD::SETNE }, |
280 | { RTLIB::O_F64, "__unorddf2vfp", ISD::SETEQ }, |
281 | |
282 | // Floating-point to integer conversions. |
283 | // i64 conversions are done via library routines even when generating VFP |
284 | // instructions, so use the same ones. |
285 | { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp", ISD::SETCC_INVALID }, |
286 | { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID }, |
287 | { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp", ISD::SETCC_INVALID }, |
288 | { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID }, |
289 | |
290 | // Conversions between floating types. |
291 | { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp", ISD::SETCC_INVALID }, |
292 | { RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp", ISD::SETCC_INVALID }, |
293 | |
294 | // Integer to floating-point conversions. |
295 | // i64 conversions are done via library routines even when generating VFP |
296 | // instructions, so use the same ones. |
297 | // FIXME: There appears to be some naming inconsistency in ARM libgcc: |
298 | // e.g., __floatunsidf vs. __floatunssidfvfp. |
299 | { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp", ISD::SETCC_INVALID }, |
300 | { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID }, |
301 | { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp", ISD::SETCC_INVALID }, |
302 | { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID }, |
303 | }; |
304 | |
305 | for (const auto &LC : LibraryCalls) { |
306 | setLibcallName(LC.Op, LC.Name); |
307 | if (LC.Cond != ISD::SETCC_INVALID) |
308 | setCmpLibcallCC(LC.Op, LC.Cond); |
309 | } |
310 | } |
311 | |
312 | // Set the correct calling convention for ARMv7k WatchOS. It's just |
313 | // AAPCS_VFP for functions as simple as libcalls. |
314 | if (Subtarget->isTargetWatchABI()) { |
315 | for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) |
316 | setLibcallCallingConv((RTLIB::Libcall)i, CallingConv::ARM_AAPCS_VFP); |
317 | } |
318 | } |
319 | |
320 | // These libcalls are not available in 32-bit. |
321 | setLibcallName(RTLIB::SHL_I128, nullptr); |
322 | setLibcallName(RTLIB::SRL_I128, nullptr); |
323 | setLibcallName(RTLIB::SRA_I128, nullptr); |
324 | |
325 | // RTLIB |
326 | if (Subtarget->isAAPCS_ABI() && |
327 | (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() || |
328 | Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) { |
329 | static const struct { |
330 | const RTLIB::Libcall Op; |
331 | const char * const Name; |
332 | const CallingConv::ID CC; |
333 | const ISD::CondCode Cond; |
334 | } LibraryCalls[] = { |
335 | // Double-precision floating-point arithmetic helper functions |
336 | // RTABI chapter 4.1.2, Table 2 |
337 | { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
338 | { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
339 | { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
340 | { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
341 | |
342 | // Double-precision floating-point comparison helper functions |
343 | // RTABI chapter 4.1.2, Table 3 |
344 | { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, |
345 | { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, |
346 | { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, |
347 | { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, |
348 | { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, |
349 | { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, |
350 | { RTLIB::UO_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, |
351 | { RTLIB::O_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ }, |
352 | |
353 | // Single-precision floating-point arithmetic helper functions |
354 | // RTABI chapter 4.1.2, Table 4 |
355 | { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
356 | { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
357 | { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
358 | { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
359 | |
360 | // Single-precision floating-point comparison helper functions |
361 | // RTABI chapter 4.1.2, Table 5 |
362 | { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, |
363 | { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, |
364 | { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, |
365 | { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, |
366 | { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, |
367 | { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, |
368 | { RTLIB::UO_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, |
369 | { RTLIB::O_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ }, |
370 | |
371 | // Floating-point to integer conversions. |
372 | // RTABI chapter 4.1.2, Table 6 |
373 | { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
374 | { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
375 | { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
376 | { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
377 | { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
378 | { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
379 | { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
380 | { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
381 | |
382 | // Conversions between floating types. |
383 | // RTABI chapter 4.1.2, Table 7 |
384 | { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
385 | { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
386 | { RTLIB::FPEXT_F32_F64, "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
387 | |
388 | // Integer to floating-point conversions. |
389 | // RTABI chapter 4.1.2, Table 8 |
390 | { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
391 | { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
392 | { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
393 | { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
394 | { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
395 | { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
396 | { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
397 | { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
398 | |
399 | // Long long helper functions |
400 | // RTABI chapter 4.2, Table 9 |
401 | { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
402 | { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
403 | { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
404 | { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
405 | |
406 | // Integer division functions |
407 | // RTABI chapter 4.3.1 |
408 | { RTLIB::SDIV_I8, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
409 | { RTLIB::SDIV_I16, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
410 | { RTLIB::SDIV_I32, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
411 | { RTLIB::SDIV_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
412 | { RTLIB::UDIV_I8, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
413 | { RTLIB::UDIV_I16, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
414 | { RTLIB::UDIV_I32, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
415 | { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
416 | }; |
417 | |
418 | for (const auto &LC : LibraryCalls) { |
419 | setLibcallName(LC.Op, LC.Name); |
420 | setLibcallCallingConv(LC.Op, LC.CC); |
421 | if (LC.Cond != ISD::SETCC_INVALID) |
422 | setCmpLibcallCC(LC.Op, LC.Cond); |
423 | } |
424 | |
425 | // EABI dependent RTLIB |
426 | if (TM.Options.EABIVersion == EABI::EABI4 || |
427 | TM.Options.EABIVersion == EABI::EABI5) { |
428 | static const struct { |
429 | const RTLIB::Libcall Op; |
430 | const char *const Name; |
431 | const CallingConv::ID CC; |
432 | const ISD::CondCode Cond; |
433 | } MemOpsLibraryCalls[] = { |
434 | // Memory operations |
435 | // RTABI chapter 4.3.4 |
436 | { RTLIB::MEMCPY, "__aeabi_memcpy", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
437 | { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
438 | { RTLIB::MEMSET, "__aeabi_memset", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
439 | }; |
440 | |
441 | for (const auto &LC : MemOpsLibraryCalls) { |
442 | setLibcallName(LC.Op, LC.Name); |
443 | setLibcallCallingConv(LC.Op, LC.CC); |
444 | if (LC.Cond != ISD::SETCC_INVALID) |
445 | setCmpLibcallCC(LC.Op, LC.Cond); |
446 | } |
447 | } |
448 | } |
449 | |
450 | if (Subtarget->isTargetWindows()) { |
451 | static const struct { |
452 | const RTLIB::Libcall Op; |
453 | const char * const Name; |
454 | const CallingConv::ID CC; |
455 | } LibraryCalls[] = { |
456 | { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP }, |
457 | { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP }, |
458 | { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP }, |
459 | { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP }, |
460 | { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP }, |
461 | { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP }, |
462 | { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP }, |
463 | { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP }, |
464 | }; |
465 | |
466 | for (const auto &LC : LibraryCalls) { |
467 | setLibcallName(LC.Op, LC.Name); |
468 | setLibcallCallingConv(LC.Op, LC.CC); |
469 | } |
470 | } |
471 | |
472 | // Use divmod compiler-rt calls for iOS 5.0 and later. |
473 | if (Subtarget->isTargetMachO() && |
474 | !(Subtarget->isTargetIOS() && |
475 | Subtarget->getTargetTriple().isOSVersionLT(5, 0))) { |
476 | setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); |
477 | setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); |
478 | } |
479 | |
480 | // The half <-> float conversion functions are always soft-float on |
481 | // non-watchos platforms, but are needed for some targets which use a |
482 | // hard-float calling convention by default. |
483 | if (!Subtarget->isTargetWatchABI()) { |
484 | if (Subtarget->isAAPCS_ABI()) { |
485 | setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS); |
486 | setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS); |
487 | setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS); |
488 | } else { |
489 | setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS); |
490 | setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS); |
491 | setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS); |
492 | } |
493 | } |
494 | |
495 | // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have |
496 | // a __gnu_ prefix (which is the default). |
497 | if (Subtarget->isTargetAEABI()) { |
498 | static const struct { |
499 | const RTLIB::Libcall Op; |
500 | const char * const Name; |
501 | const CallingConv::ID CC; |
502 | } LibraryCalls[] = { |
503 | { RTLIB::FPROUND_F32_F16, "__aeabi_f2h", CallingConv::ARM_AAPCS }, |
504 | { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS }, |
505 | { RTLIB::FPEXT_F16_F32, "__aeabi_h2f", CallingConv::ARM_AAPCS }, |
506 | }; |
507 | |
508 | for (const auto &LC : LibraryCalls) { |
509 | setLibcallName(LC.Op, LC.Name); |
510 | setLibcallCallingConv(LC.Op, LC.CC); |
511 | } |
512 | } |
513 | |
514 | if (Subtarget->isThumb1Only()) |
515 | addRegisterClass(MVT::i32, &ARM::tGPRRegClass); |
516 | else |
517 | addRegisterClass(MVT::i32, &ARM::GPRRegClass); |
518 | |
519 | if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && |
520 | !Subtarget->isThumb1Only()) { |
521 | addRegisterClass(MVT::f32, &ARM::SPRRegClass); |
522 | addRegisterClass(MVT::f64, &ARM::DPRRegClass); |
523 | } |
524 | |
525 | if (Subtarget->hasFullFP16()) { |
526 | addRegisterClass(MVT::f16, &ARM::HPRRegClass); |
527 | setOperationAction(ISD::BITCAST, MVT::i16, Custom); |
528 | setOperationAction(ISD::BITCAST, MVT::i32, Custom); |
529 | setOperationAction(ISD::BITCAST, MVT::f16, Custom); |
530 | } |
531 | |
532 | for (MVT VT : MVT::vector_valuetypes()) { |
533 | for (MVT InnerVT : MVT::vector_valuetypes()) { |
534 | setTruncStoreAction(VT, InnerVT, Expand); |
535 | setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); |
536 | setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); |
537 | setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); |
538 | } |
539 | |
540 | setOperationAction(ISD::MULHS, VT, Expand); |
541 | setOperationAction(ISD::SMUL_LOHI, VT, Expand); |
542 | setOperationAction(ISD::MULHU, VT, Expand); |
543 | setOperationAction(ISD::UMUL_LOHI, VT, Expand); |
544 | |
545 | setOperationAction(ISD::BSWAP, VT, Expand); |
546 | } |
547 | |
548 | setOperationAction(ISD::ConstantFP, MVT::f32, Custom); |
549 | setOperationAction(ISD::ConstantFP, MVT::f64, Custom); |
550 | |
551 | setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom); |
552 | setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom); |
553 | |
554 | if (Subtarget->hasNEON()) { |
555 | addDRTypeForNEON(MVT::v2f32); |
556 | addDRTypeForNEON(MVT::v8i8); |
557 | addDRTypeForNEON(MVT::v4i16); |
558 | addDRTypeForNEON(MVT::v2i32); |
559 | addDRTypeForNEON(MVT::v1i64); |
560 | |
561 | addQRTypeForNEON(MVT::v4f32); |
562 | addQRTypeForNEON(MVT::v2f64); |
563 | addQRTypeForNEON(MVT::v16i8); |
564 | addQRTypeForNEON(MVT::v8i16); |
565 | addQRTypeForNEON(MVT::v4i32); |
566 | addQRTypeForNEON(MVT::v2i64); |
567 | |
568 | if (Subtarget->hasFullFP16()) { |
569 | addQRTypeForNEON(MVT::v8f16); |
570 | addDRTypeForNEON(MVT::v4f16); |
571 | } |
572 | |
573 | // v2f64 is legal so that QR subregs can be extracted as f64 elements, but |
574 | // neither Neon nor VFP support any arithmetic operations on it. |
575 | // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively |
576 | // supported for v4f32. |
577 | setOperationAction(ISD::FADD, MVT::v2f64, Expand); |
578 | setOperationAction(ISD::FSUB, MVT::v2f64, Expand); |
579 | setOperationAction(ISD::FMUL, MVT::v2f64, Expand); |
580 | // FIXME: Code duplication: FDIV and FREM are expanded always, see |
581 | // ARMTargetLowering::addTypeForNEON method for details. |
582 | setOperationAction(ISD::FDIV, MVT::v2f64, Expand); |
583 | setOperationAction(ISD::FREM, MVT::v2f64, Expand); |
584 | // FIXME: Create unittest. |
585 | // In another words, find a way when "copysign" appears in DAG with vector |
586 | // operands. |
587 | setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); |
588 | // FIXME: Code duplication: SETCC has custom operation action, see |
589 | // ARMTargetLowering::addTypeForNEON method for details. |
590 | setOperationAction(ISD::SETCC, MVT::v2f64, Expand); |
591 | // FIXME: Create unittest for FNEG and for FABS. |
592 | setOperationAction(ISD::FNEG, MVT::v2f64, Expand); |
593 | setOperationAction(ISD::FABS, MVT::v2f64, Expand); |
594 | setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); |
595 | setOperationAction(ISD::FSIN, MVT::v2f64, Expand); |
596 | setOperationAction(ISD::FCOS, MVT::v2f64, Expand); |
597 | setOperationAction(ISD::FPOW, MVT::v2f64, Expand); |
598 | setOperationAction(ISD::FLOG, MVT::v2f64, Expand); |
599 | setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); |
600 | setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); |
601 | setOperationAction(ISD::FEXP, MVT::v2f64, Expand); |
602 | setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); |
603 | // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR. |
604 | setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); |
605 | setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); |
606 | setOperationAction(ISD::FRINT, MVT::v2f64, Expand); |
607 | setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); |
608 | setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); |
609 | setOperationAction(ISD::FMA, MVT::v2f64, Expand); |
610 | |
611 | setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); |
612 | setOperationAction(ISD::FSIN, MVT::v4f32, Expand); |
613 | setOperationAction(ISD::FCOS, MVT::v4f32, Expand); |
614 | setOperationAction(ISD::FPOW, MVT::v4f32, Expand); |
615 | setOperationAction(ISD::FLOG, MVT::v4f32, Expand); |
616 | setOperationAction(ISD::FLOG2, MVT::v4f32, Expand); |
617 | setOperationAction(ISD::FLOG10, MVT::v4f32, Expand); |
618 | setOperationAction(ISD::FEXP, MVT::v4f32, Expand); |
619 | setOperationAction(ISD::FEXP2, MVT::v4f32, Expand); |
620 | setOperationAction(ISD::FCEIL, MVT::v4f32, Expand); |
621 | setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand); |
622 | setOperationAction(ISD::FRINT, MVT::v4f32, Expand); |
623 | setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); |
624 | setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand); |
625 | |
626 | // Mark v2f32 intrinsics. |
627 | setOperationAction(ISD::FSQRT, MVT::v2f32, Expand); |
628 | setOperationAction(ISD::FSIN, MVT::v2f32, Expand); |
629 | setOperationAction(ISD::FCOS, MVT::v2f32, Expand); |
630 | setOperationAction(ISD::FPOW, MVT::v2f32, Expand); |
631 | setOperationAction(ISD::FLOG, MVT::v2f32, Expand); |
632 | setOperationAction(ISD::FLOG2, MVT::v2f32, Expand); |
633 | setOperationAction(ISD::FLOG10, MVT::v2f32, Expand); |
634 | setOperationAction(ISD::FEXP, MVT::v2f32, Expand); |
635 | setOperationAction(ISD::FEXP2, MVT::v2f32, Expand); |
636 | setOperationAction(ISD::FCEIL, MVT::v2f32, Expand); |
637 | setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand); |
638 | setOperationAction(ISD::FRINT, MVT::v2f32, Expand); |
639 | setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand); |
640 | setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand); |
641 | |
642 | // Neon does not support some operations on v1i64 and v2i64 types. |
643 | setOperationAction(ISD::MUL, MVT::v1i64, Expand); |
644 | // Custom handling for some quad-vector types to detect VMULL. |
645 | setOperationAction(ISD::MUL, MVT::v8i16, Custom); |
646 | setOperationAction(ISD::MUL, MVT::v4i32, Custom); |
647 | setOperationAction(ISD::MUL, MVT::v2i64, Custom); |
648 | // Custom handling for some vector types to avoid expensive expansions |
649 | setOperationAction(ISD::SDIV, MVT::v4i16, Custom); |
650 | setOperationAction(ISD::SDIV, MVT::v8i8, Custom); |
651 | setOperationAction(ISD::UDIV, MVT::v4i16, Custom); |
652 | setOperationAction(ISD::UDIV, MVT::v8i8, Custom); |
653 | // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with |
654 | // a destination type that is wider than the source, and nor does |
655 | // it have a FP_TO_[SU]INT instruction with a narrower destination than |
656 | // source. |
657 | setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); |
658 | setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); |
659 | setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); |
660 | setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); |
661 | |
662 | setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); |
663 | setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); |
664 | |
665 | // NEON does not have single instruction CTPOP for vectors with element |
666 | // types wider than 8-bits. However, custom lowering can leverage the |
667 | // v8i8/v16i8 vcnt instruction. |
668 | setOperationAction(ISD::CTPOP, MVT::v2i32, Custom); |
669 | setOperationAction(ISD::CTPOP, MVT::v4i32, Custom); |
670 | setOperationAction(ISD::CTPOP, MVT::v4i16, Custom); |
671 | setOperationAction(ISD::CTPOP, MVT::v8i16, Custom); |
672 | setOperationAction(ISD::CTPOP, MVT::v1i64, Expand); |
673 | setOperationAction(ISD::CTPOP, MVT::v2i64, Expand); |
674 | |
675 | setOperationAction(ISD::CTLZ, MVT::v1i64, Expand); |
676 | setOperationAction(ISD::CTLZ, MVT::v2i64, Expand); |
677 | |
678 | // NEON does not have single instruction CTTZ for vectors. |
679 | setOperationAction(ISD::CTTZ, MVT::v8i8, Custom); |
680 | setOperationAction(ISD::CTTZ, MVT::v4i16, Custom); |
681 | setOperationAction(ISD::CTTZ, MVT::v2i32, Custom); |
682 | setOperationAction(ISD::CTTZ, MVT::v1i64, Custom); |
683 | |
684 | setOperationAction(ISD::CTTZ, MVT::v16i8, Custom); |
685 | setOperationAction(ISD::CTTZ, MVT::v8i16, Custom); |
686 | setOperationAction(ISD::CTTZ, MVT::v4i32, Custom); |
687 | setOperationAction(ISD::CTTZ, MVT::v2i64, Custom); |
688 | |
689 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom); |
690 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom); |
691 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom); |
692 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom); |
693 | |
694 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom); |
695 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom); |
696 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom); |
697 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom); |
698 | |
699 | // NEON only has FMA instructions as of VFP4. |
700 | if (!Subtarget->hasVFP4()) { |
701 | setOperationAction(ISD::FMA, MVT::v2f32, Expand); |
702 | setOperationAction(ISD::FMA, MVT::v4f32, Expand); |
703 | } |
704 | |
705 | setTargetDAGCombine(ISD::INTRINSIC_VOID); |
706 | setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); |
707 | setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); |
708 | setTargetDAGCombine(ISD::SHL); |
709 | setTargetDAGCombine(ISD::SRL); |
710 | setTargetDAGCombine(ISD::SRA); |
711 | setTargetDAGCombine(ISD::SIGN_EXTEND); |
712 | setTargetDAGCombine(ISD::ZERO_EXTEND); |
713 | setTargetDAGCombine(ISD::ANY_EXTEND); |
714 | setTargetDAGCombine(ISD::BUILD_VECTOR); |
715 | setTargetDAGCombine(ISD::VECTOR_SHUFFLE); |
716 | setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); |
717 | setTargetDAGCombine(ISD::STORE); |
718 | setTargetDAGCombine(ISD::FP_TO_SINT); |
719 | setTargetDAGCombine(ISD::FP_TO_UINT); |
720 | setTargetDAGCombine(ISD::FDIV); |
721 | setTargetDAGCombine(ISD::LOAD); |
722 | |
723 | // It is legal to extload from v4i8 to v4i16 or v4i32. |
724 | for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, |
725 | MVT::v2i32}) { |
726 | for (MVT VT : MVT::integer_vector_valuetypes()) { |
727 | setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal); |
728 | setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal); |
729 | setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal); |
730 | } |
731 | } |
732 | } |
733 | |
734 | if (Subtarget->isFPOnlySP()) { |
735 | // When targeting a floating-point unit with only single-precision |
736 | // operations, f64 is legal for the few double-precision instructions which |
737 | // are present However, no double-precision operations other than moves, |
738 | // loads and stores are provided by the hardware. |
739 | setOperationAction(ISD::FADD, MVT::f64, Expand); |
740 | setOperationAction(ISD::FSUB, MVT::f64, Expand); |
741 | setOperationAction(ISD::FMUL, MVT::f64, Expand); |
742 | setOperationAction(ISD::FMA, MVT::f64, Expand); |
743 | setOperationAction(ISD::FDIV, MVT::f64, Expand); |
744 | setOperationAction(ISD::FREM, MVT::f64, Expand); |
745 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); |
746 | setOperationAction(ISD::FGETSIGN, MVT::f64, Expand); |
747 | setOperationAction(ISD::FNEG, MVT::f64, Expand); |
748 | setOperationAction(ISD::FABS, MVT::f64, Expand); |
749 | setOperationAction(ISD::FSQRT, MVT::f64, Expand); |
750 | setOperationAction(ISD::FSIN, MVT::f64, Expand); |
751 | setOperationAction(ISD::FCOS, MVT::f64, Expand); |
752 | setOperationAction(ISD::FPOW, MVT::f64, Expand); |
753 | setOperationAction(ISD::FLOG, MVT::f64, Expand); |
754 | setOperationAction(ISD::FLOG2, MVT::f64, Expand); |
755 | setOperationAction(ISD::FLOG10, MVT::f64, Expand); |
756 | setOperationAction(ISD::FEXP, MVT::f64, Expand); |
757 | setOperationAction(ISD::FEXP2, MVT::f64, Expand); |
758 | setOperationAction(ISD::FCEIL, MVT::f64, Expand); |
759 | setOperationAction(ISD::FTRUNC, MVT::f64, Expand); |
760 | setOperationAction(ISD::FRINT, MVT::f64, Expand); |
761 | setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand); |
762 | setOperationAction(ISD::FFLOOR, MVT::f64, Expand); |
763 | setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); |
764 | setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); |
765 | setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); |
766 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); |
767 | setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom); |
768 | setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom); |
769 | setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); |
770 | setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom); |
771 | } |
772 | |
773 | computeRegisterProperties(Subtarget->getRegisterInfo()); |
774 | |
775 | // ARM does not have floating-point extending loads. |
776 | for (MVT VT : MVT::fp_valuetypes()) { |
777 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); |
778 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); |
779 | } |
780 | |
781 | // ... or truncating stores |
782 | setTruncStoreAction(MVT::f64, MVT::f32, Expand); |
783 | setTruncStoreAction(MVT::f32, MVT::f16, Expand); |
784 | setTruncStoreAction(MVT::f64, MVT::f16, Expand); |
785 | |
786 | // ARM does not have i1 sign extending load. |
787 | for (MVT VT : MVT::integer_valuetypes()) |
788 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); |
789 | |
790 | // ARM supports all 4 flavors of integer indexed load / store. |
791 | if (!Subtarget->isThumb1Only()) { |
792 | for (unsigned im = (unsigned)ISD::PRE_INC; |
793 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
794 | setIndexedLoadAction(im, MVT::i1, Legal); |
795 | setIndexedLoadAction(im, MVT::i8, Legal); |
796 | setIndexedLoadAction(im, MVT::i16, Legal); |
797 | setIndexedLoadAction(im, MVT::i32, Legal); |
798 | setIndexedStoreAction(im, MVT::i1, Legal); |
799 | setIndexedStoreAction(im, MVT::i8, Legal); |
800 | setIndexedStoreAction(im, MVT::i16, Legal); |
801 | setIndexedStoreAction(im, MVT::i32, Legal); |
802 | } |
803 | } else { |
804 | // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}. |
805 | setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal); |
806 | setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal); |
807 | } |
808 | |
809 | setOperationAction(ISD::SADDO, MVT::i32, Custom); |
810 | setOperationAction(ISD::UADDO, MVT::i32, Custom); |
811 | setOperationAction(ISD::SSUBO, MVT::i32, Custom); |
812 | setOperationAction(ISD::USUBO, MVT::i32, Custom); |
813 | |
814 | setOperationAction(ISD::ADDCARRY, MVT::i32, Custom); |
815 | setOperationAction(ISD::SUBCARRY, MVT::i32, Custom); |
816 | |
817 | // i64 operation support. |
818 | setOperationAction(ISD::MUL, MVT::i64, Expand); |
819 | setOperationAction(ISD::MULHU, MVT::i32, Expand); |
820 | if (Subtarget->isThumb1Only()) { |
821 | setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); |
822 | setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); |
823 | } |
824 | if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() |
825 | || (Subtarget->isThumb2() && !Subtarget->hasDSP())) |
826 | setOperationAction(ISD::MULHS, MVT::i32, Expand); |
827 | |
828 | setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); |
829 | setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); |
830 | setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); |
831 | setOperationAction(ISD::SRL, MVT::i64, Custom); |
832 | setOperationAction(ISD::SRA, MVT::i64, Custom); |
833 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); |
834 | |
835 | // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1. |
836 | if (Subtarget->isThumb1Only()) { |
837 | setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); |
838 | setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); |
839 | setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); |
840 | } |
841 | |
842 | setOperationAction(ISD::ADDC, MVT::i32, Custom); |
843 | setOperationAction(ISD::ADDE, MVT::i32, Custom); |
844 | setOperationAction(ISD::SUBC, MVT::i32, Custom); |
845 | setOperationAction(ISD::SUBE, MVT::i32, Custom); |
846 | |
847 | if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) |
848 | setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); |
849 | |
850 | // ARM does not have ROTL. |
851 | setOperationAction(ISD::ROTL, MVT::i32, Expand); |
852 | for (MVT VT : MVT::vector_valuetypes()) { |
853 | setOperationAction(ISD::ROTL, VT, Expand); |
854 | setOperationAction(ISD::ROTR, VT, Expand); |
855 | } |
856 | setOperationAction(ISD::CTTZ, MVT::i32, Custom); |
857 | setOperationAction(ISD::CTPOP, MVT::i32, Expand); |
858 | if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) |
859 | setOperationAction(ISD::CTLZ, MVT::i32, Expand); |
860 | |
861 | // @llvm.readcyclecounter requires the Performance Monitors extension. |
862 | // Default to the 0 expansion on unsupported platforms. |
863 | // FIXME: Technically there are older ARM CPUs that have |
864 | // implementation-specific ways of obtaining this information. |
865 | if (Subtarget->hasPerfMon()) |
866 | setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom); |
867 | |
868 | // Only ARMv6 has BSWAP. |
869 | if (!Subtarget->hasV6Ops()) |
870 | setOperationAction(ISD::BSWAP, MVT::i32, Expand); |
871 | |
872 | bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() |
873 | : Subtarget->hasDivideInARMMode(); |
874 | if (!hasDivide) { |
875 | // These are expanded into libcalls if the cpu doesn't have HW divider. |
876 | setOperationAction(ISD::SDIV, MVT::i32, LibCall); |
877 | setOperationAction(ISD::UDIV, MVT::i32, LibCall); |
878 | } |
879 | |
880 | if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) { |
881 | setOperationAction(ISD::SDIV, MVT::i32, Custom); |
882 | setOperationAction(ISD::UDIV, MVT::i32, Custom); |
883 | |
884 | setOperationAction(ISD::SDIV, MVT::i64, Custom); |
885 | setOperationAction(ISD::UDIV, MVT::i64, Custom); |
886 | } |
887 | |
888 | setOperationAction(ISD::SREM, MVT::i32, Expand); |
889 | setOperationAction(ISD::UREM, MVT::i32, Expand); |
890 | |
891 | // Register based DivRem for AEABI (RTABI 4.2) |
892 | if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || |
893 | Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || |
894 | Subtarget->isTargetWindows()) { |
895 | setOperationAction(ISD::SREM, MVT::i64, Custom); |
896 | setOperationAction(ISD::UREM, MVT::i64, Custom); |
897 | HasStandaloneRem = false; |
898 | |
899 | if (Subtarget->isTargetWindows()) { |
900 | const struct { |
901 | const RTLIB::Libcall Op; |
902 | const char * const Name; |
903 | const CallingConv::ID CC; |
904 | } LibraryCalls[] = { |
905 | { RTLIB::SDIVREM_I8, "__rt_sdiv", CallingConv::ARM_AAPCS }, |
906 | { RTLIB::SDIVREM_I16, "__rt_sdiv", CallingConv::ARM_AAPCS }, |
907 | { RTLIB::SDIVREM_I32, "__rt_sdiv", CallingConv::ARM_AAPCS }, |
908 | { RTLIB::SDIVREM_I64, "__rt_sdiv64", CallingConv::ARM_AAPCS }, |
909 | |
910 | { RTLIB::UDIVREM_I8, "__rt_udiv", CallingConv::ARM_AAPCS }, |
911 | { RTLIB::UDIVREM_I16, "__rt_udiv", CallingConv::ARM_AAPCS }, |
912 | { RTLIB::UDIVREM_I32, "__rt_udiv", CallingConv::ARM_AAPCS }, |
913 | { RTLIB::UDIVREM_I64, "__rt_udiv64", CallingConv::ARM_AAPCS }, |
914 | }; |
915 | |
916 | for (const auto &LC : LibraryCalls) { |
917 | setLibcallName(LC.Op, LC.Name); |
918 | setLibcallCallingConv(LC.Op, LC.CC); |
919 | } |
920 | } else { |
921 | const struct { |
922 | const RTLIB::Libcall Op; |
923 | const char * const Name; |
924 | const CallingConv::ID CC; |
925 | } LibraryCalls[] = { |
926 | { RTLIB::SDIVREM_I8, "__aeabi_idivmod", CallingConv::ARM_AAPCS }, |
927 | { RTLIB::SDIVREM_I16, "__aeabi_idivmod", CallingConv::ARM_AAPCS }, |
928 | { RTLIB::SDIVREM_I32, "__aeabi_idivmod", CallingConv::ARM_AAPCS }, |
929 | { RTLIB::SDIVREM_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS }, |
930 | |
931 | { RTLIB::UDIVREM_I8, "__aeabi_uidivmod", CallingConv::ARM_AAPCS }, |
932 | { RTLIB::UDIVREM_I16, "__aeabi_uidivmod", CallingConv::ARM_AAPCS }, |
933 | { RTLIB::UDIVREM_I32, "__aeabi_uidivmod", CallingConv::ARM_AAPCS }, |
934 | { RTLIB::UDIVREM_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS }, |
935 | }; |
936 | |
937 | for (const auto &LC : LibraryCalls) { |
938 | setLibcallName(LC.Op, LC.Name); |
939 | setLibcallCallingConv(LC.Op, LC.CC); |
940 | } |
941 | } |
942 | |
943 | setOperationAction(ISD::SDIVREM, MVT::i32, Custom); |
944 | setOperationAction(ISD::UDIVREM, MVT::i32, Custom); |
945 | setOperationAction(ISD::SDIVREM, MVT::i64, Custom); |
946 | setOperationAction(ISD::UDIVREM, MVT::i64, Custom); |
947 | } else { |
948 | setOperationAction(ISD::SDIVREM, MVT::i32, Expand); |
949 | setOperationAction(ISD::UDIVREM, MVT::i32, Expand); |
950 | } |
951 | |
952 | if (Subtarget->isTargetWindows() && Subtarget->getTargetTriple().isOSMSVCRT()) |
953 | for (auto &VT : {MVT::f32, MVT::f64}) |
954 | setOperationAction(ISD::FPOWI, VT, Custom); |
955 | |
956 | setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); |
957 | setOperationAction(ISD::ConstantPool, MVT::i32, Custom); |
958 | setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); |
959 | setOperationAction(ISD::BlockAddress, MVT::i32, Custom); |
960 | |
961 | setOperationAction(ISD::TRAP, MVT::Other, Legal); |
962 | |
963 | // Use the default implementation. |
964 | setOperationAction(ISD::VASTART, MVT::Other, Custom); |
965 | setOperationAction(ISD::VAARG, MVT::Other, Expand); |
966 | setOperationAction(ISD::VACOPY, MVT::Other, Expand); |
967 | setOperationAction(ISD::VAEND, MVT::Other, Expand); |
968 | setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); |
969 | setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); |
970 | |
971 | if (Subtarget->isTargetWindows()) |
972 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); |
973 | else |
974 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); |
975 | |
976 | // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use |
977 | // the default expansion. |
978 | InsertFencesForAtomic = false; |
979 | if (Subtarget->hasAnyDataBarrier() && |
980 | (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) { |
981 | // ATOMIC_FENCE needs custom lowering; the others should have been expanded |
982 | // to ldrex/strex loops already. |
983 | setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); |
984 | if (!Subtarget->isThumb() || !Subtarget->isMClass()) |
985 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); |
986 | |
987 | // On v8, we have particularly efficient implementations of atomic fences |
988 | // if they can be combined with nearby atomic loads and stores. |
989 | if (!Subtarget->hasV8Ops() || getTargetMachine().getOptLevel() == 0) { |
990 | // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc. |
991 | InsertFencesForAtomic = true; |
992 | } |
993 | } else { |
994 | // If there's anything we can use as a barrier, go through custom lowering |
995 | // for ATOMIC_FENCE. |
996 | // If target has DMB in thumb, Fences can be inserted. |
997 | if (Subtarget->hasDataBarrier()) |
998 | InsertFencesForAtomic = true; |
999 | |
1000 | setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, |
1001 | Subtarget->hasAnyDataBarrier() ? Custom : Expand); |
1002 | |
1003 | // Set them all for expansion, which will force libcalls. |
1004 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); |
1005 | setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); |
1006 | setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); |
1007 | setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); |
1008 | setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); |
1009 | setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); |
1010 | setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); |
1011 | setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); |
1012 | setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); |
1013 | setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); |
1014 | setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); |
1015 | setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); |
1016 | // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the |
1017 | // Unordered/Monotonic case. |
1018 | if (!InsertFencesForAtomic) { |
1019 | setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); |
1020 | setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); |
1021 | } |
1022 | } |
1023 | |
1024 | setOperationAction(ISD::PREFETCH, MVT::Other, Custom); |
1025 | |
1026 | // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. |
1027 | if (!Subtarget->hasV6Ops()) { |
1028 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); |
1029 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); |
1030 | } |
1031 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); |
1032 | |
1033 | if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && |
1034 | !Subtarget->isThumb1Only()) { |
1035 | // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR |
1036 | // iff target supports vfp2. |
1037 | setOperationAction(ISD::BITCAST, MVT::i64, Custom); |
1038 | setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); |
1039 | } |
1040 | |
1041 | // We want to custom lower some of our intrinsics. |
1042 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); |
1043 | setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); |
1044 | setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); |
1045 | setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom); |
1046 | if (Subtarget->useSjLjEH()) |
1047 | setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); |
1048 | |
1049 | setOperationAction(ISD::SETCC, MVT::i32, Expand); |
1050 | setOperationAction(ISD::SETCC, MVT::f16, Expand); |
1051 | setOperationAction(ISD::SETCC, MVT::f32, Expand); |
1052 | setOperationAction(ISD::SETCC, MVT::f64, Expand); |
1053 | setOperationAction(ISD::SELECT, MVT::i32, Custom); |
1054 | setOperationAction(ISD::SELECT, MVT::f32, Custom); |
1055 | setOperationAction(ISD::SELECT, MVT::f64, Custom); |
1056 | setOperationAction(ISD::SELECT_CC, MVT::f16, Custom); |
1057 | setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); |
1058 | setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); |
1059 | setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); |
1060 | |
1061 | // Thumb-1 cannot currently select ARMISD::SUBE. |
1062 | if (!Subtarget->isThumb1Only()) |
1063 | setOperationAction(ISD::SETCCE, MVT::i32, Custom); |
1064 | |
1065 | setOperationAction(ISD::BRCOND, MVT::Other, Custom); |
1066 | setOperationAction(ISD::BR_CC, MVT::i32, Custom); |
1067 | setOperationAction(ISD::BR_CC, MVT::f16, Custom); |
1068 | setOperationAction(ISD::BR_CC, MVT::f32, Custom); |
1069 | setOperationAction(ISD::BR_CC, MVT::f64, Custom); |
1070 | setOperationAction(ISD::BR_JT, MVT::Other, Custom); |
1071 | |
1072 | // We don't support sin/cos/fmod/copysign/pow |
1073 | setOperationAction(ISD::FSIN, MVT::f64, Expand); |
1074 | setOperationAction(ISD::FSIN, MVT::f32, Expand); |
1075 | setOperationAction(ISD::FCOS, MVT::f32, Expand); |
1076 | setOperationAction(ISD::FCOS, MVT::f64, Expand); |
1077 | setOperationAction(ISD::FSINCOS, MVT::f64, Expand); |
1078 | setOperationAction(ISD::FSINCOS, MVT::f32, Expand); |
1079 | setOperationAction(ISD::FREM, MVT::f64, Expand); |
1080 | setOperationAction(ISD::FREM, MVT::f32, Expand); |
1081 | if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && |
1082 | !Subtarget->isThumb1Only()) { |
1083 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); |
1084 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); |
1085 | } |
1086 | setOperationAction(ISD::FPOW, MVT::f64, Expand); |
1087 | setOperationAction(ISD::FPOW, MVT::f32, Expand); |
1088 | |
1089 | if (!Subtarget->hasVFP4()) { |
1090 | setOperationAction(ISD::FMA, MVT::f64, Expand); |
1091 | setOperationAction(ISD::FMA, MVT::f32, Expand); |
1092 | } |
1093 | |
1094 | // Various VFP goodness |
1095 | if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) { |
1096 | // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded. |
1097 | if (!Subtarget->hasFPARMv8() || Subtarget->isFPOnlySP()) { |
1098 | setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); |
1099 | setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); |
1100 | } |
1101 | |
1102 | // fp16 is a special v7 extension that adds f16 <-> f32 conversions. |
1103 | if (!Subtarget->hasFP16()) { |
1104 | setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); |
1105 | setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); |
1106 | } |
1107 | } |
1108 | |
1109 | // Use __sincos_stret if available. |
1110 | if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr && |
1111 | getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) { |
1112 | setOperationAction(ISD::FSINCOS, MVT::f64, Custom); |
1113 | setOperationAction(ISD::FSINCOS, MVT::f32, Custom); |
1114 | } |
1115 | |
1116 | // FP-ARMv8 implements a lot of rounding-like FP operations. |
1117 | if (Subtarget->hasFPARMv8()) { |
1118 | setOperationAction(ISD::FFLOOR, MVT::f32, Legal); |
1119 | setOperationAction(ISD::FCEIL, MVT::f32, Legal); |
1120 | setOperationAction(ISD::FROUND, MVT::f32, Legal); |
1121 | setOperationAction(ISD::FTRUNC, MVT::f32, Legal); |
1122 | setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); |
1123 | setOperationAction(ISD::FRINT, MVT::f32, Legal); |
1124 | setOperationAction(ISD::FMINNUM, MVT::f32, Legal); |
1125 | setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); |
1126 | setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal); |
1127 | setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal); |
1128 | setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); |
1129 | setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); |
1130 | |
1131 | if (!Subtarget->isFPOnlySP()) { |
1132 | setOperationAction(ISD::FFLOOR, MVT::f64, Legal); |
1133 | setOperationAction(ISD::FCEIL, MVT::f64, Legal); |
1134 | setOperationAction(ISD::FROUND, MVT::f64, Legal); |
1135 | setOperationAction(ISD::FTRUNC, MVT::f64, Legal); |
1136 | setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); |
1137 | setOperationAction(ISD::FRINT, MVT::f64, Legal); |
1138 | setOperationAction(ISD::FMINNUM, MVT::f64, Legal); |
1139 | setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); |
1140 | } |
1141 | } |
1142 | |
1143 | if (Subtarget->hasNEON()) { |
1144 | // vmin and vmax aren't available in a scalar form, so we use |
1145 | // a NEON instruction with an undef lane instead. |
1146 | setOperationAction(ISD::FMINNAN, MVT::f32, Legal); |
1147 | setOperationAction(ISD::FMAXNAN, MVT::f32, Legal); |
1148 | setOperationAction(ISD::FMINNAN, MVT::v2f32, Legal); |
1149 | setOperationAction(ISD::FMAXNAN, MVT::v2f32, Legal); |
1150 | setOperationAction(ISD::FMINNAN, MVT::v4f32, Legal); |
1151 | setOperationAction(ISD::FMAXNAN, MVT::v4f32, Legal); |
1152 | } |
1153 | |
1154 | // We have target-specific dag combine patterns for the following nodes: |
1155 | // ARMISD::VMOVRRD - No need to call setTargetDAGCombine |
1156 | setTargetDAGCombine(ISD::ADD); |
1157 | setTargetDAGCombine(ISD::SUB); |
1158 | setTargetDAGCombine(ISD::MUL); |
1159 | setTargetDAGCombine(ISD::AND); |
1160 | setTargetDAGCombine(ISD::OR); |
1161 | setTargetDAGCombine(ISD::XOR); |
1162 | |
1163 | if (Subtarget->hasV6Ops()) |
1164 | setTargetDAGCombine(ISD::SRL); |
1165 | |
1166 | setStackPointerRegisterToSaveRestore(ARM::SP); |
1167 | |
1168 | if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() || |
1169 | !Subtarget->hasVFP2()) |
1170 | setSchedulingPreference(Sched::RegPressure); |
1171 | else |
1172 | setSchedulingPreference(Sched::Hybrid); |
1173 | |
1174 | //// temporary - rewrite interface to use type |
1175 | MaxStoresPerMemset = 8; |
1176 | MaxStoresPerMemsetOptSize = 4; |
1177 | MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores |
1178 | MaxStoresPerMemcpyOptSize = 2; |
1179 | MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores |
1180 | MaxStoresPerMemmoveOptSize = 2; |
1181 | |
1182 | // On ARM arguments smaller than 4 bytes are extended, so all arguments |
1183 | // are at least 4 bytes aligned. |
1184 | setMinStackArgumentAlignment(4); |
1185 | |
1186 | // Prefer likely predicted branches to selects on out-of-order cores. |
1187 | PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); |
1188 | |
1189 | setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); |
1190 | } |
1191 | |
1192 | bool ARMTargetLowering::useSoftFloat() const { |
1193 | return Subtarget->useSoftFloat(); |
1194 | } |
1195 | |
1196 | // FIXME: It might make sense to define the representative register class as the |
1197 | // nearest super-register that has a non-null superset. For example, DPR_VFP2 is |
1198 | // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, |
1199 | // SPR's representative would be DPR_VFP2. This should work well if register |
1200 | // pressure tracking were modified such that a register use would increment the |
1201 | // pressure of the register class's representative and all of it's super |
1202 | // classes' representatives transitively. We have not implemented this because |
1203 | // of the difficulty prior to coalescing of modeling operand register classes |
1204 | // due to the common occurrence of cross class copies and subregister insertions |
1205 | // and extractions. |
1206 | std::pair<const TargetRegisterClass *, uint8_t> |
1207 | ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, |
1208 | MVT VT) const { |
1209 | const TargetRegisterClass *RRC = nullptr; |
1210 | uint8_t Cost = 1; |
1211 | switch (VT.SimpleTy) { |
1212 | default: |
1213 | return TargetLowering::findRepresentativeClass(TRI, VT); |
1214 | // Use DPR as representative register class for all floating point |
1215 | // and vector types. Since there are 32 SPR registers and 32 DPR registers so |
1216 | // the cost is 1 for both f32 and f64. |
1217 | case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: |
1218 | case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: |
1219 | RRC = &ARM::DPRRegClass; |
1220 | // When NEON is used for SP, only half of the register file is available |
1221 | // because operations that define both SP and DP results will be constrained |
1222 | // to the VFP2 class (D0-D15). We currently model this constraint prior to |
1223 | // coalescing by double-counting the SP regs. See the FIXME above. |
1224 | if (Subtarget->useNEONForSinglePrecisionFP()) |
1225 | Cost = 2; |
1226 | break; |
1227 | case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: |
1228 | case MVT::v4f32: case MVT::v2f64: |
1229 | RRC = &ARM::DPRRegClass; |
1230 | Cost = 2; |
1231 | break; |
1232 | case MVT::v4i64: |
1233 | RRC = &ARM::DPRRegClass; |
1234 | Cost = 4; |
1235 | break; |
1236 | case MVT::v8i64: |
1237 | RRC = &ARM::DPRRegClass; |
1238 | Cost = 8; |
1239 | break; |
1240 | } |
1241 | return std::make_pair(RRC, Cost); |
1242 | } |
1243 | |
1244 | const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { |
1245 | switch ((ARMISD::NodeType)Opcode) { |
1246 | case ARMISD::FIRST_NUMBER: break; |
1247 | case ARMISD::Wrapper: return "ARMISD::Wrapper"; |
1248 | case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; |
1249 | case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; |
1250 | case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL"; |
1251 | case ARMISD::CALL: return "ARMISD::CALL"; |
1252 | case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; |
1253 | case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; |
1254 | case ARMISD::BRCOND: return "ARMISD::BRCOND"; |
1255 | case ARMISD::BR_JT: return "ARMISD::BR_JT"; |
1256 | case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; |
1257 | case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; |
1258 | case ARMISD::INTRET_FLAG: return "ARMISD::INTRET_FLAG"; |
1259 | case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; |
1260 | case ARMISD::CMP: return "ARMISD::CMP"; |
1261 | case ARMISD::CMN: return "ARMISD::CMN"; |
1262 | case ARMISD::CMPZ: return "ARMISD::CMPZ"; |
1263 | case ARMISD::CMPFP: return "ARMISD::CMPFP"; |
1264 | case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; |
1265 | case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; |
1266 | case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; |
1267 | |
1268 | case ARMISD::CMOV: return "ARMISD::CMOV"; |
1269 | |
1270 | case ARMISD::SSAT: return "ARMISD::SSAT"; |
1271 | case ARMISD::USAT: return "ARMISD::USAT"; |
1272 | |
1273 | case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; |
1274 | case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; |
1275 | case ARMISD::RRX: return "ARMISD::RRX"; |
1276 | |
1277 | case ARMISD::ADDC: return "ARMISD::ADDC"; |
1278 | case ARMISD::ADDE: return "ARMISD::ADDE"; |
1279 | case ARMISD::SUBC: return "ARMISD::SUBC"; |
1280 | case ARMISD::SUBE: return "ARMISD::SUBE"; |
1281 | |
1282 | case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; |
1283 | case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; |
1284 | case ARMISD::VMOVhr: return "ARMISD::VMOVhr"; |
1285 | case ARMISD::VMOVrh: return "ARMISD::VMOVrh"; |
1286 | case ARMISD::VMOVSR: return "ARMISD::VMOVSR"; |
1287 | |
1288 | case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; |
1289 | case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP"; |
1290 | case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH"; |
1291 | |
1292 | case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; |
1293 | |
1294 | case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; |
1295 | |
1296 | case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; |
1297 | |
1298 | case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; |
1299 | |
1300 | case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; |
1301 | |
1302 | case ARMISD::WIN__CHKSTK: return "ARMISD::WIN__CHKSTK"; |
1303 | case ARMISD::WIN__DBZCHK: return "ARMISD::WIN__DBZCHK"; |
1304 | |
1305 | case ARMISD::VCEQ: return "ARMISD::VCEQ"; |
1306 | case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; |
1307 | case ARMISD::VCGE: return "ARMISD::VCGE"; |
1308 | case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; |
1309 | case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; |
1310 | case ARMISD::VCGEU: return "ARMISD::VCGEU"; |
1311 | case ARMISD::VCGT: return "ARMISD::VCGT"; |
1312 | case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; |
1313 | case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; |
1314 | case ARMISD::VCGTU: return "ARMISD::VCGTU"; |
1315 | case ARMISD::VTST: return "ARMISD::VTST"; |
1316 | |
1317 | case ARMISD::VSHL: return "ARMISD::VSHL"; |
1318 | case ARMISD::VSHRs: return "ARMISD::VSHRs"; |
1319 | case ARMISD::VSHRu: return "ARMISD::VSHRu"; |
1320 | case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; |
1321 | case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; |
1322 | case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; |
1323 | case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; |
1324 | case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; |
1325 | case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; |
1326 | case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; |
1327 | case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; |
1328 | case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; |
1329 | case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; |
1330 | case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; |
1331 | case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; |
1332 | case ARMISD::VSLI: return "ARMISD::VSLI"; |
1333 | case ARMISD::VSRI: return "ARMISD::VSRI"; |
1334 | case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; |
1335 | case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; |
1336 | case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; |
1337 | case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; |
1338 | case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM"; |
1339 | case ARMISD::VDUP: return "ARMISD::VDUP"; |
1340 | case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; |
1341 | case ARMISD::VEXT: return "ARMISD::VEXT"; |
1342 | case ARMISD::VREV64: return "ARMISD::VREV64"; |
1343 | case ARMISD::VREV32: return "ARMISD::VREV32"; |
1344 | case ARMISD::VREV16: return "ARMISD::VREV16"; |
1345 | case ARMISD::VZIP: return "ARMISD::VZIP"; |
1346 | case ARMISD::VUZP: return "ARMISD::VUZP"; |
1347 | case ARMISD::VTRN: return "ARMISD::VTRN"; |
1348 | case ARMISD::VTBL1: return "ARMISD::VTBL1"; |
1349 | case ARMISD::VTBL2: return "ARMISD::VTBL2"; |
1350 | case ARMISD::VMULLs: return "ARMISD::VMULLs"; |
1351 | case ARMISD::VMULLu: return "ARMISD::VMULLu"; |
1352 | case ARMISD::UMAAL: return "ARMISD::UMAAL"; |
1353 | case ARMISD::UMLAL: return "ARMISD::UMLAL"; |
1354 | case ARMISD::SMLAL: return "ARMISD::SMLAL"; |
1355 | case ARMISD::SMLALBB: return "ARMISD::SMLALBB"; |
1356 | case ARMISD::SMLALBT: return "ARMISD::SMLALBT"; |
1357 | case ARMISD::SMLALTB: return "ARMISD::SMLALTB"; |
1358 | case ARMISD::SMLALTT: return "ARMISD::SMLALTT"; |
1359 | case ARMISD::SMULWB: return "ARMISD::SMULWB"; |
1360 | case ARMISD::SMULWT: return "ARMISD::SMULWT"; |
1361 | case ARMISD::SMLALD: return "ARMISD::SMLALD"; |
1362 | case ARMISD::SMLALDX: return "ARMISD::SMLALDX"; |
1363 | case ARMISD::SMLSLD: return "ARMISD::SMLSLD"; |
1364 | case ARMISD::SMLSLDX: return "ARMISD::SMLSLDX"; |
1365 | case ARMISD::SMMLAR: return "ARMISD::SMMLAR"; |
1366 | case ARMISD::SMMLSR: return "ARMISD::SMMLSR"; |
1367 | case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; |
1368 | case ARMISD::BFI: return "ARMISD::BFI"; |
1369 | case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; |
1370 | case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; |
1371 | case ARMISD::VBSL: return "ARMISD::VBSL"; |
1372 | case ARMISD::MEMCPY: return "ARMISD::MEMCPY"; |
1373 | case ARMISD::VLD1DUP: return "ARMISD::VLD1DUP"; |
1374 | case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; |
1375 | case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; |
1376 | case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; |
1377 | case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; |
1378 | case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; |
1379 | case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; |
1380 | case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; |
1381 | case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; |
1382 | case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; |
1383 | case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; |
1384 | case ARMISD::VLD1DUP_UPD: return "ARMISD::VLD1DUP_UPD"; |
1385 | case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; |
1386 | case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; |
1387 | case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; |
1388 | case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; |
1389 | case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; |
1390 | case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; |
1391 | case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; |
1392 | case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; |
1393 | case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; |
1394 | case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; |
1395 | } |
1396 | return nullptr; |
1397 | } |
1398 | |
1399 | EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, |
1400 | EVT VT) const { |
1401 | if (!VT.isVector()) |
1402 | return getPointerTy(DL); |
1403 | return VT.changeVectorElementTypeToInteger(); |
1404 | } |
1405 | |
1406 | /// getRegClassFor - Return the register class that should be used for the |
1407 | /// specified value type. |
1408 | const TargetRegisterClass *ARMTargetLowering::getRegClassFor(MVT VT) const { |
1409 | // Map v4i64 to QQ registers but do not make the type legal. Similarly map |
1410 | // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to |
1411 | // load / store 4 to 8 consecutive D registers. |
1412 | if (Subtarget->hasNEON()) { |
1413 | if (VT == MVT::v4i64) |
1414 | return &ARM::QQPRRegClass; |
1415 | if (VT == MVT::v8i64) |
1416 | return &ARM::QQQQPRRegClass; |
1417 | } |
1418 | return TargetLowering::getRegClassFor(VT); |
1419 | } |
1420 | |
1421 | // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the |
1422 | // source/dest is aligned and the copy size is large enough. We therefore want |
1423 | // to align such objects passed to memory intrinsics. |
1424 | bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, |
1425 | unsigned &PrefAlign) const { |
1426 | if (!isa<MemIntrinsic>(CI)) |
1427 | return false; |
1428 | MinSize = 8; |
1429 | // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1 |
1430 | // cycle faster than 4-byte aligned LDM. |
1431 | PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4); |
1432 | return true; |
1433 | } |
1434 | |
1435 | // Create a fast isel object. |
1436 | FastISel * |
1437 | ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, |
1438 | const TargetLibraryInfo *libInfo) const { |
1439 | return ARM::createFastISel(funcInfo, libInfo); |
1440 | } |
1441 | |
1442 | Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { |
1443 | unsigned NumVals = N->getNumValues(); |
1444 | if (!NumVals) |
1445 | return Sched::RegPressure; |
1446 | |
1447 | for (unsigned i = 0; i != NumVals; ++i) { |
1448 | EVT VT = N->getValueType(i); |
1449 | if (VT == MVT::Glue || VT == MVT::Other) |
1450 | continue; |
1451 | if (VT.isFloatingPoint() || VT.isVector()) |
1452 | return Sched::ILP; |
1453 | } |
1454 | |
1455 | if (!N->isMachineOpcode()) |
1456 | return Sched::RegPressure; |
1457 | |
1458 | // Load are scheduled for latency even if there instruction itinerary |
1459 | // is not available. |
1460 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
1461 | const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); |
1462 | |
1463 | if (MCID.getNumDefs() == 0) |
1464 | return Sched::RegPressure; |
1465 | if (!Itins->isEmpty() && |
1466 | Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) |
1467 | return Sched::ILP; |
1468 | |
1469 | return Sched::RegPressure; |
1470 | } |
1471 | |
1472 | //===----------------------------------------------------------------------===// |
1473 | // Lowering Code |
1474 | //===----------------------------------------------------------------------===// |
1475 | |
1476 | static bool isSRL16(const SDValue &Op) { |
1477 | if (Op.getOpcode() != ISD::SRL) |
1478 | return false; |
1479 | if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) |
1480 | return Const->getZExtValue() == 16; |
1481 | return false; |
1482 | } |
1483 | |
1484 | static bool isSRA16(const SDValue &Op) { |
1485 | if (Op.getOpcode() != ISD::SRA) |
1486 | return false; |
1487 | if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) |
1488 | return Const->getZExtValue() == 16; |
1489 | return false; |
1490 | } |
1491 | |
1492 | static bool isSHL16(const SDValue &Op) { |
1493 | if (Op.getOpcode() != ISD::SHL) |
1494 | return false; |
1495 | if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) |
1496 | return Const->getZExtValue() == 16; |
1497 | return false; |
1498 | } |
1499 | |
1500 | // Check for a signed 16-bit value. We special case SRA because it makes it |
1501 | // more simple when also looking for SRAs that aren't sign extending a |
1502 | // smaller value. Without the check, we'd need to take extra care with |
1503 | // checking order for some operations. |
1504 | static bool isS16(const SDValue &Op, SelectionDAG &DAG) { |
1505 | if (isSRA16(Op)) |
1506 | return isSHL16(Op.getOperand(0)); |
1507 | return DAG.ComputeNumSignBits(Op) == 17; |
1508 | } |
1509 | |
1510 | /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC |
1511 | static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { |
1512 | switch (CC) { |
1513 | default: llvm_unreachable("Unknown condition code!")::llvm::llvm_unreachable_internal("Unknown condition code!", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 1513); |
1514 | case ISD::SETNE: return ARMCC::NE; |
1515 | case ISD::SETEQ: return ARMCC::EQ; |
1516 | case ISD::SETGT: return ARMCC::GT; |
1517 | case ISD::SETGE: return ARMCC::GE; |
1518 | case ISD::SETLT: return ARMCC::LT; |
1519 | case ISD::SETLE: return ARMCC::LE; |
1520 | case ISD::SETUGT: return ARMCC::HI; |
1521 | case ISD::SETUGE: return ARMCC::HS; |
1522 | case ISD::SETULT: return ARMCC::LO; |
1523 | case ISD::SETULE: return ARMCC::LS; |
1524 | } |
1525 | } |
1526 | |
1527 | /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. |
1528 | static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, |
1529 | ARMCC::CondCodes &CondCode2, bool &InvalidOnQNaN) { |
1530 | CondCode2 = ARMCC::AL; |
1531 | InvalidOnQNaN = true; |
1532 | switch (CC) { |
1533 | default: llvm_unreachable("Unknown FP condition!")::llvm::llvm_unreachable_internal("Unknown FP condition!", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 1533); |
1534 | case ISD::SETEQ: |
1535 | case ISD::SETOEQ: |
1536 | CondCode = ARMCC::EQ; |
1537 | InvalidOnQNaN = false; |
1538 | break; |
1539 | case ISD::SETGT: |
1540 | case ISD::SETOGT: CondCode = ARMCC::GT; break; |
1541 | case ISD::SETGE: |
1542 | case ISD::SETOGE: CondCode = ARMCC::GE; break; |
1543 | case ISD::SETOLT: CondCode = ARMCC::MI; break; |
1544 | case ISD::SETOLE: CondCode = ARMCC::LS; break; |
1545 | case ISD::SETONE: |
1546 | CondCode = ARMCC::MI; |
1547 | CondCode2 = ARMCC::GT; |
1548 | InvalidOnQNaN = false; |
1549 | break; |
1550 | case ISD::SETO: CondCode = ARMCC::VC; break; |
1551 | case ISD::SETUO: CondCode = ARMCC::VS; break; |
1552 | case ISD::SETUEQ: |
1553 | CondCode = ARMCC::EQ; |
1554 | CondCode2 = ARMCC::VS; |
1555 | InvalidOnQNaN = false; |
1556 | break; |
1557 | case ISD::SETUGT: CondCode = ARMCC::HI; break; |
1558 | case ISD::SETUGE: CondCode = ARMCC::PL; break; |
1559 | case ISD::SETLT: |
1560 | case ISD::SETULT: CondCode = ARMCC::LT; break; |
1561 | case ISD::SETLE: |
1562 | case ISD::SETULE: CondCode = ARMCC::LE; break; |
1563 | case ISD::SETNE: |
1564 | case ISD::SETUNE: |
1565 | CondCode = ARMCC::NE; |
1566 | InvalidOnQNaN = false; |
1567 | break; |
1568 | } |
1569 | } |
1570 | |
1571 | //===----------------------------------------------------------------------===// |
1572 | // Calling Convention Implementation |
1573 | //===----------------------------------------------------------------------===// |
1574 | |
1575 | #include "ARMGenCallingConv.inc" |
1576 | |
1577 | /// getEffectiveCallingConv - Get the effective calling convention, taking into |
1578 | /// account presence of floating point hardware and calling convention |
1579 | /// limitations, such as support for variadic functions. |
1580 | CallingConv::ID |
1581 | ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC, |
1582 | bool isVarArg) const { |
1583 | switch (CC) { |
1584 | default: |
1585 | report_fatal_error("Unsupported calling convention"); |
1586 | case CallingConv::ARM_AAPCS: |
1587 | case CallingConv::ARM_APCS: |
1588 | case CallingConv::GHC: |
1589 | return CC; |
1590 | case CallingConv::PreserveMost: |
1591 | return CallingConv::PreserveMost; |
1592 | case CallingConv::ARM_AAPCS_VFP: |
1593 | case CallingConv::Swift: |
1594 | return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP; |
1595 | case CallingConv::C: |
1596 | if (!Subtarget->isAAPCS_ABI()) |
1597 | return CallingConv::ARM_APCS; |
1598 | else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && |
1599 | getTargetMachine().Options.FloatABIType == FloatABI::Hard && |
1600 | !isVarArg) |
1601 | return CallingConv::ARM_AAPCS_VFP; |
1602 | else |
1603 | return CallingConv::ARM_AAPCS; |
1604 | case CallingConv::Fast: |
1605 | case CallingConv::CXX_FAST_TLS: |
1606 | if (!Subtarget->isAAPCS_ABI()) { |
1607 | if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg) |
1608 | return CallingConv::Fast; |
1609 | return CallingConv::ARM_APCS; |
1610 | } else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg) |
1611 | return CallingConv::ARM_AAPCS_VFP; |
1612 | else |
1613 | return CallingConv::ARM_AAPCS; |
1614 | } |
1615 | } |
1616 | |
1617 | CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC, |
1618 | bool isVarArg) const { |
1619 | return CCAssignFnForNode(CC, false, isVarArg); |
1620 | } |
1621 | |
1622 | CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC, |
1623 | bool isVarArg) const { |
1624 | return CCAssignFnForNode(CC, true, isVarArg); |
1625 | } |
1626 | |
1627 | /// CCAssignFnForNode - Selects the correct CCAssignFn for the given |
1628 | /// CallingConvention. |
1629 | CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, |
1630 | bool Return, |
1631 | bool isVarArg) const { |
1632 | switch (getEffectiveCallingConv(CC, isVarArg)) { |
1633 | default: |
1634 | report_fatal_error("Unsupported calling convention"); |
1635 | case CallingConv::ARM_APCS: |
1636 | return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); |
1637 | case CallingConv::ARM_AAPCS: |
1638 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
1639 | case CallingConv::ARM_AAPCS_VFP: |
1640 | return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); |
1641 | case CallingConv::Fast: |
1642 | return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); |
1643 | case CallingConv::GHC: |
1644 | return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); |
1645 | case CallingConv::PreserveMost: |
1646 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
1647 | } |
1648 | } |
1649 | |
1650 | /// LowerCallResult - Lower the result values of a call into the |
1651 | /// appropriate copies out of appropriate physical registers. |
1652 | SDValue ARMTargetLowering::LowerCallResult( |
1653 | SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, |
1654 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
1655 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, |
1656 | SDValue ThisVal) const { |
1657 | // Assign locations to each value returned by this call. |
1658 | SmallVector<CCValAssign, 16> RVLocs; |
1659 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
1660 | *DAG.getContext()); |
1661 | CCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, isVarArg)); |
1662 | |
1663 | // Copy all of the result registers out of their specified physreg. |
1664 | for (unsigned i = 0; i != RVLocs.size(); ++i) { |
1665 | CCValAssign VA = RVLocs[i]; |
1666 | |
1667 | // Pass 'this' value directly from the argument to return value, to avoid |
1668 | // reg unit interference |
1669 | if (i == 0 && isThisReturn) { |
1670 | assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&(static_cast <bool> (!VA.needsCustom() && VA.getLocVT () == MVT::i32 && "unexpected return calling convention register assignment" ) ? void (0) : __assert_fail ("!VA.needsCustom() && VA.getLocVT() == MVT::i32 && \"unexpected return calling convention register assignment\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 1671, __extension__ __PRETTY_FUNCTION__)) |
1671 | "unexpected return calling convention register assignment")(static_cast <bool> (!VA.needsCustom() && VA.getLocVT () == MVT::i32 && "unexpected return calling convention register assignment" ) ? void (0) : __assert_fail ("!VA.needsCustom() && VA.getLocVT() == MVT::i32 && \"unexpected return calling convention register assignment\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 1671, __extension__ __PRETTY_FUNCTION__)); |
1672 | InVals.push_back(ThisVal); |
1673 | continue; |
1674 | } |
1675 | |
1676 | SDValue Val; |
1677 | if (VA.needsCustom()) { |
1678 | // Handle f64 or half of a v2f64. |
1679 | SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, |
1680 | InFlag); |
1681 | Chain = Lo.getValue(1); |
1682 | InFlag = Lo.getValue(2); |
1683 | VA = RVLocs[++i]; // skip ahead to next loc |
1684 | SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, |
1685 | InFlag); |
1686 | Chain = Hi.getValue(1); |
1687 | InFlag = Hi.getValue(2); |
1688 | if (!Subtarget->isLittle()) |
1689 | std::swap (Lo, Hi); |
1690 | Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); |
1691 | |
1692 | if (VA.getLocVT() == MVT::v2f64) { |
1693 | SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); |
1694 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, |
1695 | DAG.getConstant(0, dl, MVT::i32)); |
1696 | |
1697 | VA = RVLocs[++i]; // skip ahead to next loc |
1698 | Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); |
1699 | Chain = Lo.getValue(1); |
1700 | InFlag = Lo.getValue(2); |
1701 | VA = RVLocs[++i]; // skip ahead to next loc |
1702 | Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); |
1703 | Chain = Hi.getValue(1); |
1704 | InFlag = Hi.getValue(2); |
1705 | if (!Subtarget->isLittle()) |
1706 | std::swap (Lo, Hi); |
1707 | Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); |
1708 | Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, |
1709 | DAG.getConstant(1, dl, MVT::i32)); |
1710 | } |
1711 | } else { |
1712 | Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), |
1713 | InFlag); |
1714 | Chain = Val.getValue(1); |
1715 | InFlag = Val.getValue(2); |
1716 | } |
1717 | |
1718 | switch (VA.getLocInfo()) { |
1719 | default: llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 1719); |
1720 | case CCValAssign::Full: break; |
1721 | case CCValAssign::BCvt: |
1722 | Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); |
1723 | break; |
1724 | } |
1725 | |
1726 | InVals.push_back(Val); |
1727 | } |
1728 | |
1729 | return Chain; |
1730 | } |
1731 | |
1732 | /// LowerMemOpCallTo - Store the argument to the stack. |
1733 | SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, |
1734 | SDValue Arg, const SDLoc &dl, |
1735 | SelectionDAG &DAG, |
1736 | const CCValAssign &VA, |
1737 | ISD::ArgFlagsTy Flags) const { |
1738 | unsigned LocMemOffset = VA.getLocMemOffset(); |
1739 | SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); |
1740 | PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), |
1741 | StackPtr, PtrOff); |
1742 | return DAG.getStore( |
1743 | Chain, dl, Arg, PtrOff, |
1744 | MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset)); |
1745 | } |
1746 | |
1747 | void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, |
1748 | SDValue Chain, SDValue &Arg, |
1749 | RegsToPassVector &RegsToPass, |
1750 | CCValAssign &VA, CCValAssign &NextVA, |
1751 | SDValue &StackPtr, |
1752 | SmallVectorImpl<SDValue> &MemOpChains, |
1753 | ISD::ArgFlagsTy Flags) const { |
1754 | SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, |
1755 | DAG.getVTList(MVT::i32, MVT::i32), Arg); |
1756 | unsigned id = Subtarget->isLittle() ? 0 : 1; |
1757 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id))); |
1758 | |
1759 | if (NextVA.isRegLoc()) |
1760 | RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id))); |
1761 | else { |
1762 | assert(NextVA.isMemLoc())(static_cast <bool> (NextVA.isMemLoc()) ? void (0) : __assert_fail ("NextVA.isMemLoc()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 1762, __extension__ __PRETTY_FUNCTION__)); |
1763 | if (!StackPtr.getNode()) |
1764 | StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, |
1765 | getPointerTy(DAG.getDataLayout())); |
1766 | |
1767 | MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id), |
1768 | dl, DAG, NextVA, |
1769 | Flags)); |
1770 | } |
1771 | } |
1772 | |
1773 | /// LowerCall - Lowering a call into a callseq_start <- |
1774 | /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter |
1775 | /// nodes. |
1776 | SDValue |
1777 | ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, |
1778 | SmallVectorImpl<SDValue> &InVals) const { |
1779 | SelectionDAG &DAG = CLI.DAG; |
1780 | SDLoc &dl = CLI.DL; |
1781 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; |
1782 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; |
1783 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; |
1784 | SDValue Chain = CLI.Chain; |
1785 | SDValue Callee = CLI.Callee; |
1786 | bool &isTailCall = CLI.IsTailCall; |
1787 | CallingConv::ID CallConv = CLI.CallConv; |
1788 | bool doesNotRet = CLI.DoesNotReturn; |
1789 | bool isVarArg = CLI.IsVarArg; |
1790 | |
1791 | MachineFunction &MF = DAG.getMachineFunction(); |
1792 | bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); |
1793 | bool isThisReturn = false; |
1794 | bool isSibCall = false; |
1795 | auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls"); |
1796 | |
1797 | // Disable tail calls if they're not supported. |
1798 | if (!Subtarget->supportsTailCall() || Attr.getValueAsString() == "true") |
1799 | isTailCall = false; |
1800 | |
1801 | if (isTailCall) { |
1802 | // Check if it's really possible to do a tail call. |
1803 | isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, |
1804 | isVarArg, isStructRet, MF.getFunction().hasStructRetAttr(), |
1805 | Outs, OutVals, Ins, DAG); |
1806 | if (!isTailCall && CLI.CS && CLI.CS.isMustTailCall()) |
1807 | report_fatal_error("failed to perform tail call elimination on a call " |
1808 | "site marked musttail"); |
1809 | // We don't support GuaranteedTailCallOpt for ARM, only automatically |
1810 | // detected sibcalls. |
1811 | if (isTailCall) { |
1812 | ++NumTailCalls; |
1813 | isSibCall = true; |
1814 | } |
1815 | } |
1816 | |
1817 | // Analyze operands of the call, assigning locations to each operand. |
1818 | SmallVector<CCValAssign, 16> ArgLocs; |
1819 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
1820 | *DAG.getContext()); |
1821 | CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg)); |
1822 | |
1823 | // Get a count of how many bytes are to be pushed on the stack. |
1824 | unsigned NumBytes = CCInfo.getNextStackOffset(); |
1825 | |
1826 | // For tail calls, memory operands are available in our caller's stack. |
1827 | if (isSibCall) |
1828 | NumBytes = 0; |
1829 | |
1830 | // Adjust the stack pointer for the new arguments... |
1831 | // These operations are automatically eliminated by the prolog/epilog pass |
1832 | if (!isSibCall) |
1833 | Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); |
1834 | |
1835 | SDValue StackPtr = |
1836 | DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout())); |
1837 | |
1838 | RegsToPassVector RegsToPass; |
1839 | SmallVector<SDValue, 8> MemOpChains; |
1840 | |
1841 | // Walk the register/memloc assignments, inserting copies/loads. In the case |
1842 | // of tail call optimization, arguments are handled later. |
1843 | for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); |
1844 | i != e; |
1845 | ++i, ++realArgIdx) { |
1846 | CCValAssign &VA = ArgLocs[i]; |
1847 | SDValue Arg = OutVals[realArgIdx]; |
1848 | ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; |
1849 | bool isByVal = Flags.isByVal(); |
1850 | |
1851 | // Promote the value if needed. |
1852 | switch (VA.getLocInfo()) { |
1853 | default: llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 1853); |
1854 | case CCValAssign::Full: break; |
1855 | case CCValAssign::SExt: |
1856 | Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); |
1857 | break; |
1858 | case CCValAssign::ZExt: |
1859 | Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); |
1860 | break; |
1861 | case CCValAssign::AExt: |
1862 | Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); |
1863 | break; |
1864 | case CCValAssign::BCvt: |
1865 | Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); |
1866 | break; |
1867 | } |
1868 | |
1869 | // f64 and v2f64 might be passed in i32 pairs and must be split into pieces |
1870 | if (VA.needsCustom()) { |
1871 | if (VA.getLocVT() == MVT::v2f64) { |
1872 | SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, |
1873 | DAG.getConstant(0, dl, MVT::i32)); |
1874 | SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, |
1875 | DAG.getConstant(1, dl, MVT::i32)); |
1876 | |
1877 | PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, |
1878 | VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); |
1879 | |
1880 | VA = ArgLocs[++i]; // skip ahead to next loc |
1881 | if (VA.isRegLoc()) { |
1882 | PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, |
1883 | VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); |
1884 | } else { |
1885 | assert(VA.isMemLoc())(static_cast <bool> (VA.isMemLoc()) ? void (0) : __assert_fail ("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 1885, __extension__ __PRETTY_FUNCTION__)); |
1886 | |
1887 | MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, |
1888 | dl, DAG, VA, Flags)); |
1889 | } |
1890 | } else { |
1891 | PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], |
1892 | StackPtr, MemOpChains, Flags); |
1893 | } |
1894 | } else if (VA.isRegLoc()) { |
1895 | if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() && |
1896 | Outs[0].VT == MVT::i32) { |
1897 | assert(VA.getLocVT() == MVT::i32 &&(static_cast <bool> (VA.getLocVT() == MVT::i32 && "unexpected calling convention register assignment") ? void ( 0) : __assert_fail ("VA.getLocVT() == MVT::i32 && \"unexpected calling convention register assignment\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 1898, __extension__ __PRETTY_FUNCTION__)) |
1898 | "unexpected calling convention register assignment")(static_cast <bool> (VA.getLocVT() == MVT::i32 && "unexpected calling convention register assignment") ? void ( 0) : __assert_fail ("VA.getLocVT() == MVT::i32 && \"unexpected calling convention register assignment\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 1898, __extension__ __PRETTY_FUNCTION__)); |
1899 | assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&(static_cast <bool> (!Ins.empty() && Ins[0].VT == MVT::i32 && "unexpected use of 'returned'") ? void ( 0) : __assert_fail ("!Ins.empty() && Ins[0].VT == MVT::i32 && \"unexpected use of 'returned'\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 1900, __extension__ __PRETTY_FUNCTION__)) |
1900 | "unexpected use of 'returned'")(static_cast <bool> (!Ins.empty() && Ins[0].VT == MVT::i32 && "unexpected use of 'returned'") ? void ( 0) : __assert_fail ("!Ins.empty() && Ins[0].VT == MVT::i32 && \"unexpected use of 'returned'\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 1900, __extension__ __PRETTY_FUNCTION__)); |
1901 | isThisReturn = true; |
1902 | } |
1903 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); |
1904 | } else if (isByVal) { |
1905 | assert(VA.isMemLoc())(static_cast <bool> (VA.isMemLoc()) ? void (0) : __assert_fail ("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 1905, __extension__ __PRETTY_FUNCTION__)); |
1906 | unsigned offset = 0; |
1907 | |
1908 | // True if this byval aggregate will be split between registers |
1909 | // and memory. |
1910 | unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); |
1911 | unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed(); |
1912 | |
1913 | if (CurByValIdx < ByValArgsCount) { |
1914 | |
1915 | unsigned RegBegin, RegEnd; |
1916 | CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd); |
1917 | |
1918 | EVT PtrVT = |
1919 | DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); |
1920 | unsigned int i, j; |
1921 | for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { |
1922 | SDValue Const = DAG.getConstant(4*i, dl, MVT::i32); |
1923 | SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); |
1924 | SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, |
1925 | MachinePointerInfo(), |
1926 | DAG.InferPtrAlignment(AddArg)); |
1927 | MemOpChains.push_back(Load.getValue(1)); |
1928 | RegsToPass.push_back(std::make_pair(j, Load)); |
1929 | } |
1930 | |
1931 | // If parameter size outsides register area, "offset" value |
1932 | // helps us to calculate stack slot for remained part properly. |
1933 | offset = RegEnd - RegBegin; |
1934 | |
1935 | CCInfo.nextInRegsParam(); |
1936 | } |
1937 | |
1938 | if (Flags.getByValSize() > 4*offset) { |
1939 | auto PtrVT = getPointerTy(DAG.getDataLayout()); |
1940 | unsigned LocMemOffset = VA.getLocMemOffset(); |
1941 | SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); |
1942 | SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff); |
1943 | SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl); |
1944 | SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset); |
1945 | SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl, |
1946 | MVT::i32); |
1947 | SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), dl, |
1948 | MVT::i32); |
1949 | |
1950 | SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); |
1951 | SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; |
1952 | MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs, |
1953 | Ops)); |
1954 | } |
1955 | } else if (!isSibCall) { |
1956 | assert(VA.isMemLoc())(static_cast <bool> (VA.isMemLoc()) ? void (0) : __assert_fail ("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 1956, __extension__ __PRETTY_FUNCTION__)); |
1957 | |
1958 | MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, |
1959 | dl, DAG, VA, Flags)); |
1960 | } |
1961 | } |
1962 | |
1963 | if (!MemOpChains.empty()) |
1964 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); |
1965 | |
1966 | // Build a sequence of copy-to-reg nodes chained together with token chain |
1967 | // and flag operands which copy the outgoing args into the appropriate regs. |
1968 | SDValue InFlag; |
1969 | // Tail call byval lowering might overwrite argument registers so in case of |
1970 | // tail call optimization the copies to registers are lowered later. |
1971 | if (!isTailCall) |
1972 | for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { |
1973 | Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, |
1974 | RegsToPass[i].second, InFlag); |
1975 | InFlag = Chain.getValue(1); |
1976 | } |
1977 | |
1978 | // For tail calls lower the arguments to the 'real' stack slot. |
1979 | if (isTailCall) { |
1980 | // Force all the incoming stack arguments to be loaded from the stack |
1981 | // before any new outgoing arguments are stored to the stack, because the |
1982 | // outgoing stack slots may alias the incoming argument stack slots, and |
1983 | // the alias isn't otherwise explicit. This is slightly more conservative |
1984 | // than necessary, because it means that each store effectively depends |
1985 | // on every argument instead of just those arguments it would clobber. |
1986 | |
1987 | // Do not flag preceding copytoreg stuff together with the following stuff. |
1988 | InFlag = SDValue(); |
1989 | for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { |
1990 | Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, |
1991 | RegsToPass[i].second, InFlag); |
1992 | InFlag = Chain.getValue(1); |
1993 | } |
1994 | InFlag = SDValue(); |
1995 | } |
1996 | |
1997 | // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every |
1998 | // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol |
1999 | // node so that legalize doesn't hack it. |
2000 | bool isDirect = false; |
2001 | |
2002 | const TargetMachine &TM = getTargetMachine(); |
2003 | const Module *Mod = MF.getFunction().getParent(); |
2004 | const GlobalValue *GV = nullptr; |
2005 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) |
2006 | GV = G->getGlobal(); |
2007 | bool isStub = |
2008 | !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO(); |
2009 | |
2010 | bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass()); |
2011 | bool isLocalARMFunc = false; |
2012 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
2013 | auto PtrVt = getPointerTy(DAG.getDataLayout()); |
2014 | |
2015 | if (Subtarget->genLongCalls()) { |
2016 | assert((!isPositionIndependent() || Subtarget->isTargetWindows()) &&(static_cast <bool> ((!isPositionIndependent() || Subtarget ->isTargetWindows()) && "long-calls codegen is not position independent!" ) ? void (0) : __assert_fail ("(!isPositionIndependent() || Subtarget->isTargetWindows()) && \"long-calls codegen is not position independent!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 2017, __extension__ __PRETTY_FUNCTION__)) |
2017 | "long-calls codegen is not position independent!")(static_cast <bool> ((!isPositionIndependent() || Subtarget ->isTargetWindows()) && "long-calls codegen is not position independent!" ) ? void (0) : __assert_fail ("(!isPositionIndependent() || Subtarget->isTargetWindows()) && \"long-calls codegen is not position independent!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 2017, __extension__ __PRETTY_FUNCTION__)); |
2018 | // Handle a global address or an external symbol. If it's not one of |
2019 | // those, the target's already in a register, so we don't need to do |
2020 | // anything extra. |
2021 | if (isa<GlobalAddressSDNode>(Callee)) { |
2022 | // Create a constant pool entry for the callee address |
2023 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
2024 | ARMConstantPoolValue *CPV = |
2025 | ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); |
2026 | |
2027 | // Get the address of the callee into a register |
2028 | SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); |
2029 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
2030 | Callee = DAG.getLoad( |
2031 | PtrVt, dl, DAG.getEntryNode(), CPAddr, |
2032 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
2033 | } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { |
2034 | const char *Sym = S->getSymbol(); |
2035 | |
2036 | // Create a constant pool entry for the callee address |
2037 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
2038 | ARMConstantPoolValue *CPV = |
2039 | ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, |
2040 | ARMPCLabelIndex, 0); |
2041 | // Get the address of the callee into a register |
2042 | SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); |
2043 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
2044 | Callee = DAG.getLoad( |
2045 | PtrVt, dl, DAG.getEntryNode(), CPAddr, |
2046 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
2047 | } |
2048 | } else if (isa<GlobalAddressSDNode>(Callee)) { |
2049 | // If we're optimizing for minimum size and the function is called three or |
2050 | // more times in this block, we can improve codesize by calling indirectly |
2051 | // as BLXr has a 16-bit encoding. |
2052 | auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); |
2053 | auto *BB = CLI.CS.getParent(); |
2054 | bool PreferIndirect = |
2055 | Subtarget->isThumb() && MF.getFunction().optForMinSize() && |
2056 | count_if(GV->users(), [&BB](const User *U) { |
2057 | return isa<Instruction>(U) && cast<Instruction>(U)->getParent() == BB; |
2058 | }) > 2; |
2059 | |
2060 | if (!PreferIndirect) { |
2061 | isDirect = true; |
2062 | bool isDef = GV->isStrongDefinitionForLinker(); |
2063 | |
2064 | // ARM call to a local ARM function is predicable. |
2065 | isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking); |
2066 | // tBX takes a register source operand. |
2067 | if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { |
2068 | assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?")(static_cast <bool> (Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?") ? void (0) : __assert_fail ( "Subtarget->isTargetMachO() && \"WrapperPIC use on non-MachO?\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 2068, __extension__ __PRETTY_FUNCTION__)); |
2069 | Callee = DAG.getNode( |
2070 | ARMISD::WrapperPIC, dl, PtrVt, |
2071 | DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY)); |
2072 | Callee = DAG.getLoad( |
2073 | PtrVt, dl, DAG.getEntryNode(), Callee, |
2074 | MachinePointerInfo::getGOT(DAG.getMachineFunction()), |
2075 | /* Alignment = */ 0, MachineMemOperand::MODereferenceable | |
2076 | MachineMemOperand::MOInvariant); |
2077 | } else if (Subtarget->isTargetCOFF()) { |
2078 | assert(Subtarget->isTargetWindows() &&(static_cast <bool> (Subtarget->isTargetWindows() && "Windows is the only supported COFF target") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"Windows is the only supported COFF target\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 2079, __extension__ __PRETTY_FUNCTION__)) |
2079 | "Windows is the only supported COFF target")(static_cast <bool> (Subtarget->isTargetWindows() && "Windows is the only supported COFF target") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"Windows is the only supported COFF target\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 2079, __extension__ __PRETTY_FUNCTION__)); |
2080 | unsigned TargetFlags = GV->hasDLLImportStorageClass() |
2081 | ? ARMII::MO_DLLIMPORT |
2082 | : ARMII::MO_NO_FLAG; |
2083 | Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*Offset=*/0, |
2084 | TargetFlags); |
2085 | if (GV->hasDLLImportStorageClass()) |
2086 | Callee = |
2087 | DAG.getLoad(PtrVt, dl, DAG.getEntryNode(), |
2088 | DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee), |
2089 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
2090 | } else { |
2091 | Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0); |
2092 | } |
2093 | } |
2094 | } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { |
2095 | isDirect = true; |
2096 | // tBX takes a register source operand. |
2097 | const char *Sym = S->getSymbol(); |
2098 | if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { |
2099 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
2100 | ARMConstantPoolValue *CPV = |
2101 | ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, |
2102 | ARMPCLabelIndex, 4); |
2103 | SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); |
2104 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
2105 | Callee = DAG.getLoad( |
2106 | PtrVt, dl, DAG.getEntryNode(), CPAddr, |
2107 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
2108 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); |
2109 | Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel); |
2110 | } else { |
2111 | Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0); |
2112 | } |
2113 | } |
2114 | |
2115 | // FIXME: handle tail calls differently. |
2116 | unsigned CallOpc; |
2117 | if (Subtarget->isThumb()) { |
2118 | if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) |
2119 | CallOpc = ARMISD::CALL_NOLINK; |
2120 | else |
2121 | CallOpc = ARMISD::CALL; |
2122 | } else { |
2123 | if (!isDirect && !Subtarget->hasV5TOps()) |
2124 | CallOpc = ARMISD::CALL_NOLINK; |
2125 | else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() && |
2126 | // Emit regular call when code size is the priority |
2127 | !MF.getFunction().optForMinSize()) |
2128 | // "mov lr, pc; b _foo" to avoid confusing the RSP |
2129 | CallOpc = ARMISD::CALL_NOLINK; |
2130 | else |
2131 | CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; |
2132 | } |
2133 | |
2134 | std::vector<SDValue> Ops; |
2135 | Ops.push_back(Chain); |
2136 | Ops.push_back(Callee); |
2137 | |
2138 | // Add argument registers to the end of the list so that they are known live |
2139 | // into the call. |
2140 | for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) |
2141 | Ops.push_back(DAG.getRegister(RegsToPass[i].first, |
2142 | RegsToPass[i].second.getValueType())); |
2143 | |
2144 | // Add a register mask operand representing the call-preserved registers. |
2145 | if (!isTailCall) { |
2146 | const uint32_t *Mask; |
2147 | const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); |
2148 | if (isThisReturn) { |
2149 | // For 'this' returns, use the R0-preserving mask if applicable |
2150 | Mask = ARI->getThisReturnPreservedMask(MF, CallConv); |
2151 | if (!Mask) { |
2152 | // Set isThisReturn to false if the calling convention is not one that |
2153 | // allows 'returned' to be modeled in this way, so LowerCallResult does |
2154 | // not try to pass 'this' straight through |
2155 | isThisReturn = false; |
2156 | Mask = ARI->getCallPreservedMask(MF, CallConv); |
2157 | } |
2158 | } else |
2159 | Mask = ARI->getCallPreservedMask(MF, CallConv); |
2160 | |
2161 | assert(Mask && "Missing call preserved mask for calling convention")(static_cast <bool> (Mask && "Missing call preserved mask for calling convention" ) ? void (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 2161, __extension__ __PRETTY_FUNCTION__)); |
2162 | Ops.push_back(DAG.getRegisterMask(Mask)); |
2163 | } |
2164 | |
2165 | if (InFlag.getNode()) |
2166 | Ops.push_back(InFlag); |
2167 | |
2168 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); |
2169 | if (isTailCall) { |
2170 | MF.getFrameInfo().setHasTailCall(); |
2171 | return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops); |
2172 | } |
2173 | |
2174 | // Returns a chain and a flag for retval copy to use. |
2175 | Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); |
2176 | InFlag = Chain.getValue(1); |
2177 | |
2178 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), |
2179 | DAG.getIntPtrConstant(0, dl, true), InFlag, dl); |
2180 | if (!Ins.empty()) |
2181 | InFlag = Chain.getValue(1); |
2182 | |
2183 | // Handle result values, copying them out of physregs into vregs that we |
2184 | // return. |
2185 | return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, |
2186 | InVals, isThisReturn, |
2187 | isThisReturn ? OutVals[0] : SDValue()); |
2188 | } |
2189 | |
2190 | /// HandleByVal - Every parameter *after* a byval parameter is passed |
2191 | /// on the stack. Remember the next parameter register to allocate, |
2192 | /// and then confiscate the rest of the parameter registers to insure |
2193 | /// this. |
2194 | void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size, |
2195 | unsigned Align) const { |
2196 | // Byval (as with any stack) slots are always at least 4 byte aligned. |
2197 | Align = std::max(Align, 4U); |
2198 | |
2199 | unsigned Reg = State->AllocateReg(GPRArgRegs); |
2200 | if (!Reg) |
2201 | return; |
2202 | |
2203 | unsigned AlignInRegs = Align / 4; |
2204 | unsigned Waste = (ARM::R4 - Reg) % AlignInRegs; |
2205 | for (unsigned i = 0; i < Waste; ++i) |
2206 | Reg = State->AllocateReg(GPRArgRegs); |
2207 | |
2208 | if (!Reg) |
2209 | return; |
2210 | |
2211 | unsigned Excess = 4 * (ARM::R4 - Reg); |
2212 | |
2213 | // Special case when NSAA != SP and parameter size greater than size of |
2214 | // all remained GPR regs. In that case we can't split parameter, we must |
2215 | // send it to stack. We also must set NCRN to R4, so waste all |
2216 | // remained registers. |
2217 | const unsigned NSAAOffset = State->getNextStackOffset(); |
2218 | if (NSAAOffset != 0 && Size > Excess) { |
2219 | while (State->AllocateReg(GPRArgRegs)) |
2220 | ; |
2221 | return; |
2222 | } |
2223 | |
2224 | // First register for byval parameter is the first register that wasn't |
2225 | // allocated before this method call, so it would be "reg". |
2226 | // If parameter is small enough to be saved in range [reg, r4), then |
2227 | // the end (first after last) register would be reg + param-size-in-regs, |
2228 | // else parameter would be splitted between registers and stack, |
2229 | // end register would be r4 in this case. |
2230 | unsigned ByValRegBegin = Reg; |
2231 | unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4); |
2232 | State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd); |
2233 | // Note, first register is allocated in the beginning of function already, |
2234 | // allocate remained amount of registers we need. |
2235 | for (unsigned i = Reg + 1; i != ByValRegEnd; ++i) |
2236 | State->AllocateReg(GPRArgRegs); |
2237 | // A byval parameter that is split between registers and memory needs its |
2238 | // size truncated here. |
2239 | // In the case where the entire structure fits in registers, we set the |
2240 | // size in memory to zero. |
2241 | Size = std::max<int>(Size - Excess, 0); |
2242 | } |
2243 | |
2244 | /// MatchingStackOffset - Return true if the given stack call argument is |
2245 | /// already available in the same position (relatively) of the caller's |
2246 | /// incoming argument stack. |
2247 | static |
2248 | bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, |
2249 | MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, |
2250 | const TargetInstrInfo *TII) { |
2251 | unsigned Bytes = Arg.getValueSizeInBits() / 8; |
2252 | int FI = std::numeric_limits<int>::max(); |
2253 | if (Arg.getOpcode() == ISD::CopyFromReg) { |
2254 | unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); |
2255 | if (!TargetRegisterInfo::isVirtualRegister(VR)) |
2256 | return false; |
2257 | MachineInstr *Def = MRI->getVRegDef(VR); |
2258 | if (!Def) |
2259 | return false; |
2260 | if (!Flags.isByVal()) { |
2261 | if (!TII->isLoadFromStackSlot(*Def, FI)) |
2262 | return false; |
2263 | } else { |
2264 | return false; |
2265 | } |
2266 | } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { |
2267 | if (Flags.isByVal()) |
2268 | // ByVal argument is passed in as a pointer but it's now being |
2269 | // dereferenced. e.g. |
2270 | // define @foo(%struct.X* %A) { |
2271 | // tail call @bar(%struct.X* byval %A) |
2272 | // } |
2273 | return false; |
2274 | SDValue Ptr = Ld->getBasePtr(); |
2275 | FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); |
2276 | if (!FINode) |
2277 | return false; |
2278 | FI = FINode->getIndex(); |
2279 | } else |
2280 | return false; |
2281 | |
2282 | assert(FI != std::numeric_limits<int>::max())(static_cast <bool> (FI != std::numeric_limits<int> ::max()) ? void (0) : __assert_fail ("FI != std::numeric_limits<int>::max()" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 2282, __extension__ __PRETTY_FUNCTION__)); |
2283 | if (!MFI.isFixedObjectIndex(FI)) |
2284 | return false; |
2285 | return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI); |
2286 | } |
2287 | |
2288 | /// IsEligibleForTailCallOptimization - Check whether the call is eligible |
2289 | /// for tail call optimization. Targets which want to do tail call |
2290 | /// optimization should implement this function. |
2291 | bool |
2292 | ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, |
2293 | CallingConv::ID CalleeCC, |
2294 | bool isVarArg, |
2295 | bool isCalleeStructRet, |
2296 | bool isCallerStructRet, |
2297 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
2298 | const SmallVectorImpl<SDValue> &OutVals, |
2299 | const SmallVectorImpl<ISD::InputArg> &Ins, |
2300 | SelectionDAG& DAG) const { |
2301 | MachineFunction &MF = DAG.getMachineFunction(); |
2302 | const Function &CallerF = MF.getFunction(); |
2303 | CallingConv::ID CallerCC = CallerF.getCallingConv(); |
2304 | |
2305 | assert(Subtarget->supportsTailCall())(static_cast <bool> (Subtarget->supportsTailCall()) ? void (0) : __assert_fail ("Subtarget->supportsTailCall()" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 2305, __extension__ __PRETTY_FUNCTION__)); |
2306 | |
2307 | // Tail calls to function pointers cannot be optimized for Thumb1 if the args |
2308 | // to the call take up r0-r3. The reason is that there are no legal registers |
2309 | // left to hold the pointer to the function to be called. |
2310 | if (Subtarget->isThumb1Only() && Outs.size() >= 4 && |
2311 | !isa<GlobalAddressSDNode>(Callee.getNode())) |
2312 | return false; |
2313 | |
2314 | // Look for obvious safe cases to perform tail call optimization that do not |
2315 | // require ABI changes. This is what gcc calls sibcall. |
2316 | |
2317 | // Exception-handling functions need a special set of instructions to indicate |
2318 | // a return to the hardware. Tail-calling another function would probably |
2319 | // break this. |
2320 | if (CallerF.hasFnAttribute("interrupt")) |
2321 | return false; |
2322 | |
2323 | // Also avoid sibcall optimization if either caller or callee uses struct |
2324 | // return semantics. |
2325 | if (isCalleeStructRet || isCallerStructRet) |
2326 | return false; |
2327 | |
2328 | // Externally-defined functions with weak linkage should not be |
2329 | // tail-called on ARM when the OS does not support dynamic |
2330 | // pre-emption of symbols, as the AAELF spec requires normal calls |
2331 | // to undefined weak functions to be replaced with a NOP or jump to the |
2332 | // next instruction. The behaviour of branch instructions in this |
2333 | // situation (as used for tail calls) is implementation-defined, so we |
2334 | // cannot rely on the linker replacing the tail call with a return. |
2335 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { |
2336 | const GlobalValue *GV = G->getGlobal(); |
2337 | const Triple &TT = getTargetMachine().getTargetTriple(); |
2338 | if (GV->hasExternalWeakLinkage() && |
2339 | (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO())) |
2340 | return false; |
2341 | } |
2342 | |
2343 | // Check that the call results are passed in the same way. |
2344 | LLVMContext &C = *DAG.getContext(); |
2345 | if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, |
2346 | CCAssignFnForReturn(CalleeCC, isVarArg), |
2347 | CCAssignFnForReturn(CallerCC, isVarArg))) |
2348 | return false; |
2349 | // The callee has to preserve all registers the caller needs to preserve. |
2350 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
2351 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); |
2352 | if (CalleeCC != CallerCC) { |
2353 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); |
2354 | if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) |
2355 | return false; |
2356 | } |
2357 | |
2358 | // If Caller's vararg or byval argument has been split between registers and |
2359 | // stack, do not perform tail call, since part of the argument is in caller's |
2360 | // local frame. |
2361 | const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>(); |
2362 | if (AFI_Caller->getArgRegsSaveSize()) |
2363 | return false; |
2364 | |
2365 | // If the callee takes no arguments then go on to check the results of the |
2366 | // call. |
2367 | if (!Outs.empty()) { |
2368 | // Check if stack adjustment is needed. For now, do not do this if any |
2369 | // argument is passed on the stack. |
2370 | SmallVector<CCValAssign, 16> ArgLocs; |
2371 | CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); |
2372 | CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg)); |
2373 | if (CCInfo.getNextStackOffset()) { |
2374 | // Check if the arguments are already laid out in the right way as |
2375 | // the caller's fixed stack objects. |
2376 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
2377 | const MachineRegisterInfo *MRI = &MF.getRegInfo(); |
2378 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
2379 | for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); |
2380 | i != e; |
2381 | ++i, ++realArgIdx) { |
2382 | CCValAssign &VA = ArgLocs[i]; |
2383 | EVT RegVT = VA.getLocVT(); |
2384 | SDValue Arg = OutVals[realArgIdx]; |
2385 | ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; |
2386 | if (VA.getLocInfo() == CCValAssign::Indirect) |
2387 | return false; |
2388 | if (VA.needsCustom()) { |
2389 | // f64 and vector types are split into multiple registers or |
2390 | // register/stack-slot combinations. The types will not match |
2391 | // the registers; give up on memory f64 refs until we figure |
2392 | // out what to do about this. |
2393 | if (!VA.isRegLoc()) |
2394 | return false; |
2395 | if (!ArgLocs[++i].isRegLoc()) |
2396 | return false; |
2397 | if (RegVT == MVT::v2f64) { |
2398 | if (!ArgLocs[++i].isRegLoc()) |
2399 | return false; |
2400 | if (!ArgLocs[++i].isRegLoc()) |
2401 | return false; |
2402 | } |
2403 | } else if (!VA.isRegLoc()) { |
2404 | if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, |
2405 | MFI, MRI, TII)) |
2406 | return false; |
2407 | } |
2408 | } |
2409 | } |
2410 | |
2411 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
2412 | if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) |
2413 | return false; |
2414 | } |
2415 | |
2416 | return true; |
2417 | } |
2418 | |
2419 | bool |
2420 | ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, |
2421 | MachineFunction &MF, bool isVarArg, |
2422 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
2423 | LLVMContext &Context) const { |
2424 | SmallVector<CCValAssign, 16> RVLocs; |
2425 | CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); |
2426 | return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); |
2427 | } |
2428 | |
2429 | static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, |
2430 | const SDLoc &DL, SelectionDAG &DAG) { |
2431 | const MachineFunction &MF = DAG.getMachineFunction(); |
2432 | const Function &F = MF.getFunction(); |
2433 | |
2434 | StringRef IntKind = F.getFnAttribute("interrupt").getValueAsString(); |
2435 | |
2436 | // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset |
2437 | // version of the "preferred return address". These offsets affect the return |
2438 | // instruction if this is a return from PL1 without hypervisor extensions. |
2439 | // IRQ/FIQ: +4 "subs pc, lr, #4" |
2440 | // SWI: 0 "subs pc, lr, #0" |
2441 | // ABORT: +4 "subs pc, lr, #4" |
2442 | // UNDEF: +4/+2 "subs pc, lr, #0" |
2443 | // UNDEF varies depending on where the exception came from ARM or Thumb |
2444 | // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0. |
2445 | |
2446 | int64_t LROffset; |
2447 | if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" || |
2448 | IntKind == "ABORT") |
2449 | LROffset = 4; |
2450 | else if (IntKind == "SWI" || IntKind == "UNDEF") |
2451 | LROffset = 0; |
2452 | else |
2453 | report_fatal_error("Unsupported interrupt attribute. If present, value " |
2454 | "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF"); |
2455 | |
2456 | RetOps.insert(RetOps.begin() + 1, |
2457 | DAG.getConstant(LROffset, DL, MVT::i32, false)); |
2458 | |
2459 | return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps); |
2460 | } |
2461 | |
2462 | SDValue |
2463 | ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, |
2464 | bool isVarArg, |
2465 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
2466 | const SmallVectorImpl<SDValue> &OutVals, |
2467 | const SDLoc &dl, SelectionDAG &DAG) const { |
2468 | // CCValAssign - represent the assignment of the return value to a location. |
2469 | SmallVector<CCValAssign, 16> RVLocs; |
2470 | |
2471 | // CCState - Info about the registers and stack slots. |
2472 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
2473 | *DAG.getContext()); |
2474 | |
2475 | // Analyze outgoing return values. |
2476 | CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); |
2477 | |
2478 | SDValue Flag; |
2479 | SmallVector<SDValue, 4> RetOps; |
2480 | RetOps.push_back(Chain); // Operand #0 = Chain (updated below) |
2481 | bool isLittleEndian = Subtarget->isLittle(); |
2482 | |
2483 | MachineFunction &MF = DAG.getMachineFunction(); |
2484 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
2485 | AFI->setReturnRegsCount(RVLocs.size()); |
2486 | |
2487 | // Copy the result values into the output registers. |
2488 | for (unsigned i = 0, realRVLocIdx = 0; |
2489 | i != RVLocs.size(); |
2490 | ++i, ++realRVLocIdx) { |
2491 | CCValAssign &VA = RVLocs[i]; |
2492 | assert(VA.isRegLoc() && "Can only return in registers!")(static_cast <bool> (VA.isRegLoc() && "Can only return in registers!" ) ? void (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 2492, __extension__ __PRETTY_FUNCTION__)); |
2493 | |
2494 | SDValue Arg = OutVals[realRVLocIdx]; |
2495 | bool ReturnF16 = false; |
2496 | |
2497 | if (Subtarget->hasFullFP16() && Subtarget->isTargetHardFloat()) { |
2498 | // Half-precision return values can be returned like this: |
2499 | // |
2500 | // t11 f16 = fadd ... |
2501 | // t12: i16 = bitcast t11 |
2502 | // t13: i32 = zero_extend t12 |
2503 | // t14: f32 = bitcast t13 <~~~~~~~ Arg |
2504 | // |
2505 | // to avoid code generation for bitcasts, we simply set Arg to the node |
2506 | // that produces the f16 value, t11 in this case. |
2507 | // |
2508 | if (Arg.getValueType() == MVT::f32 && Arg.getOpcode() == ISD::BITCAST) { |
2509 | SDValue ZE = Arg.getOperand(0); |
2510 | if (ZE.getOpcode() == ISD::ZERO_EXTEND && ZE.getValueType() == MVT::i32) { |
2511 | SDValue BC = ZE.getOperand(0); |
2512 | if (BC.getOpcode() == ISD::BITCAST && BC.getValueType() == MVT::i16) { |
2513 | Arg = BC.getOperand(0); |
2514 | ReturnF16 = true; |
2515 | } |
2516 | } |
2517 | } |
2518 | } |
2519 | |
2520 | switch (VA.getLocInfo()) { |
2521 | default: llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 2521); |
2522 | case CCValAssign::Full: break; |
2523 | case CCValAssign::BCvt: |
2524 | if (!ReturnF16) |
2525 | Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); |
2526 | break; |
2527 | } |
2528 | |
2529 | if (VA.needsCustom()) { |
2530 | if (VA.getLocVT() == MVT::v2f64) { |
2531 | // Extract the first half and return it in two registers. |
2532 | SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, |
2533 | DAG.getConstant(0, dl, MVT::i32)); |
2534 | SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, |
2535 | DAG.getVTList(MVT::i32, MVT::i32), Half); |
2536 | |
2537 | Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), |
2538 | HalfGPRs.getValue(isLittleEndian ? 0 : 1), |
2539 | Flag); |
2540 | Flag = Chain.getValue(1); |
2541 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); |
2542 | VA = RVLocs[++i]; // skip ahead to next loc |
2543 | Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), |
2544 | HalfGPRs.getValue(isLittleEndian ? 1 : 0), |
2545 | Flag); |
2546 | Flag = Chain.getValue(1); |
2547 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); |
2548 | VA = RVLocs[++i]; // skip ahead to next loc |
2549 | |
2550 | // Extract the 2nd half and fall through to handle it as an f64 value. |
2551 | Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, |
2552 | DAG.getConstant(1, dl, MVT::i32)); |
2553 | } |
2554 | // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is |
2555 | // available. |
2556 | SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, |
2557 | DAG.getVTList(MVT::i32, MVT::i32), Arg); |
2558 | Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), |
2559 | fmrrd.getValue(isLittleEndian ? 0 : 1), |
2560 | Flag); |
2561 | Flag = Chain.getValue(1); |
2562 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); |
2563 | VA = RVLocs[++i]; // skip ahead to next loc |
2564 | Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), |
2565 | fmrrd.getValue(isLittleEndian ? 1 : 0), |
2566 | Flag); |
2567 | } else |
2568 | Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); |
2569 | |
2570 | // Guarantee that all emitted copies are |
2571 | // stuck together, avoiding something bad. |
2572 | Flag = Chain.getValue(1); |
2573 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), |
2574 | ReturnF16 ? MVT::f16 : VA.getLocVT())); |
2575 | } |
2576 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
2577 | const MCPhysReg *I = |
2578 | TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); |
2579 | if (I) { |
2580 | for (; *I; ++I) { |
2581 | if (ARM::GPRRegClass.contains(*I)) |
2582 | RetOps.push_back(DAG.getRegister(*I, MVT::i32)); |
2583 | else if (ARM::DPRRegClass.contains(*I)) |
2584 | RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); |
2585 | else |
2586 | llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 2586); |
2587 | } |
2588 | } |
2589 | |
2590 | // Update chain and glue. |
2591 | RetOps[0] = Chain; |
2592 | if (Flag.getNode()) |
2593 | RetOps.push_back(Flag); |
2594 | |
2595 | // CPUs which aren't M-class use a special sequence to return from |
2596 | // exceptions (roughly, any instruction setting pc and cpsr simultaneously, |
2597 | // though we use "subs pc, lr, #N"). |
2598 | // |
2599 | // M-class CPUs actually use a normal return sequence with a special |
2600 | // (hardware-provided) value in LR, so the normal code path works. |
2601 | if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt") && |
2602 | !Subtarget->isMClass()) { |
2603 | if (Subtarget->isThumb1Only()) |
2604 | report_fatal_error("interrupt attribute is not supported in Thumb1"); |
2605 | return LowerInterruptReturn(RetOps, dl, DAG); |
2606 | } |
2607 | |
2608 | return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps); |
2609 | } |
2610 | |
2611 | bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { |
2612 | if (N->getNumValues() != 1) |
2613 | return false; |
2614 | if (!N->hasNUsesOfValue(1, 0)) |
2615 | return false; |
2616 | |
2617 | SDValue TCChain = Chain; |
2618 | SDNode *Copy = *N->use_begin(); |
2619 | if (Copy->getOpcode() == ISD::CopyToReg) { |
2620 | // If the copy has a glue operand, we conservatively assume it isn't safe to |
2621 | // perform a tail call. |
2622 | if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) |
2623 | return false; |
2624 | TCChain = Copy->getOperand(0); |
2625 | } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { |
2626 | SDNode *VMov = Copy; |
2627 | // f64 returned in a pair of GPRs. |
2628 | SmallPtrSet<SDNode*, 2> Copies; |
2629 | for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); |
2630 | UI != UE; ++UI) { |
2631 | if (UI->getOpcode() != ISD::CopyToReg) |
2632 | return false; |
2633 | Copies.insert(*UI); |
2634 | } |
2635 | if (Copies.size() > 2) |
2636 | return false; |
2637 | |
2638 | for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); |
2639 | UI != UE; ++UI) { |
2640 | SDValue UseChain = UI->getOperand(0); |
2641 | if (Copies.count(UseChain.getNode())) |
2642 | // Second CopyToReg |
2643 | Copy = *UI; |
2644 | else { |
2645 | // We are at the top of this chain. |
2646 | // If the copy has a glue operand, we conservatively assume it |
2647 | // isn't safe to perform a tail call. |
2648 | if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue) |
2649 | return false; |
2650 | // First CopyToReg |
2651 | TCChain = UseChain; |
2652 | } |
2653 | } |
2654 | } else if (Copy->getOpcode() == ISD::BITCAST) { |
2655 | // f32 returned in a single GPR. |
2656 | if (!Copy->hasOneUse()) |
2657 | return false; |
2658 | Copy = *Copy->use_begin(); |
2659 | if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0)) |
2660 | return false; |
2661 | // If the copy has a glue operand, we conservatively assume it isn't safe to |
2662 | // perform a tail call. |
2663 | if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) |
2664 | return false; |
2665 | TCChain = Copy->getOperand(0); |
2666 | } else { |
2667 | return false; |
2668 | } |
2669 | |
2670 | bool HasRet = false; |
2671 | for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); |
2672 | UI != UE; ++UI) { |
2673 | if (UI->getOpcode() != ARMISD::RET_FLAG && |
2674 | UI->getOpcode() != ARMISD::INTRET_FLAG) |
2675 | return false; |
2676 | HasRet = true; |
2677 | } |
2678 | |
2679 | if (!HasRet) |
2680 | return false; |
2681 | |
2682 | Chain = TCChain; |
2683 | return true; |
2684 | } |
2685 | |
2686 | bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { |
2687 | if (!Subtarget->supportsTailCall()) |
2688 | return false; |
2689 | |
2690 | auto Attr = |
2691 | CI->getParent()->getParent()->getFnAttribute("disable-tail-calls"); |
2692 | if (!CI->isTailCall() || Attr.getValueAsString() == "true") |
2693 | return false; |
2694 | |
2695 | return true; |
2696 | } |
2697 | |
2698 | // Trying to write a 64 bit value so need to split into two 32 bit values first, |
2699 | // and pass the lower and high parts through. |
2700 | static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) { |
2701 | SDLoc DL(Op); |
2702 | SDValue WriteValue = Op->getOperand(2); |
2703 | |
2704 | // This function is only supposed to be called for i64 type argument. |
2705 | assert(WriteValue.getValueType() == MVT::i64(static_cast <bool> (WriteValue.getValueType() == MVT:: i64 && "LowerWRITE_REGISTER called for non-i64 type argument." ) ? void (0) : __assert_fail ("WriteValue.getValueType() == MVT::i64 && \"LowerWRITE_REGISTER called for non-i64 type argument.\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 2706, __extension__ __PRETTY_FUNCTION__)) |
2706 | && "LowerWRITE_REGISTER called for non-i64 type argument.")(static_cast <bool> (WriteValue.getValueType() == MVT:: i64 && "LowerWRITE_REGISTER called for non-i64 type argument." ) ? void (0) : __assert_fail ("WriteValue.getValueType() == MVT::i64 && \"LowerWRITE_REGISTER called for non-i64 type argument.\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 2706, __extension__ __PRETTY_FUNCTION__)); |
2707 | |
2708 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, |
2709 | DAG.getConstant(0, DL, MVT::i32)); |
2710 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, |
2711 | DAG.getConstant(1, DL, MVT::i32)); |
2712 | SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi }; |
2713 | return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops); |
2714 | } |
2715 | |
2716 | // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as |
2717 | // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is |
2718 | // one of the above mentioned nodes. It has to be wrapped because otherwise |
2719 | // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only |
2720 | // be used to form addressing mode. These wrapped nodes will be selected |
2721 | // into MOVi. |
2722 | SDValue ARMTargetLowering::LowerConstantPool(SDValue Op, |
2723 | SelectionDAG &DAG) const { |
2724 | EVT PtrVT = Op.getValueType(); |
2725 | // FIXME there is no actual debug info here |
2726 | SDLoc dl(Op); |
2727 | ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); |
2728 | SDValue Res; |
2729 | |
2730 | // When generating execute-only code Constant Pools must be promoted to the |
2731 | // global data section. It's a bit ugly that we can't share them across basic |
2732 | // blocks, but this way we guarantee that execute-only behaves correct with |
2733 | // position-independent addressing modes. |
2734 | if (Subtarget->genExecuteOnly()) { |
2735 | auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); |
2736 | auto T = const_cast<Type*>(CP->getType()); |
2737 | auto C = const_cast<Constant*>(CP->getConstVal()); |
2738 | auto M = const_cast<Module*>(DAG.getMachineFunction(). |
2739 | getFunction().getParent()); |
2740 | auto GV = new GlobalVariable( |
2741 | *M, T, /*isConst=*/true, GlobalVariable::InternalLinkage, C, |
2742 | Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" + |
2743 | Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" + |
2744 | Twine(AFI->createPICLabelUId()) |
2745 | ); |
2746 | SDValue GA = DAG.getTargetGlobalAddress(dyn_cast<GlobalValue>(GV), |
2747 | dl, PtrVT); |
2748 | return LowerGlobalAddress(GA, DAG); |
2749 | } |
2750 | |
2751 | if (CP->isMachineConstantPoolEntry()) |
2752 | Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, |
2753 | CP->getAlignment()); |
2754 | else |
2755 | Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, |
2756 | CP->getAlignment()); |
2757 | return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); |
2758 | } |
2759 | |
2760 | unsigned ARMTargetLowering::getJumpTableEncoding() const { |
2761 | return MachineJumpTableInfo::EK_Inline; |
2762 | } |
2763 | |
2764 | SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, |
2765 | SelectionDAG &DAG) const { |
2766 | MachineFunction &MF = DAG.getMachineFunction(); |
2767 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
2768 | unsigned ARMPCLabelIndex = 0; |
2769 | SDLoc DL(Op); |
2770 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
2771 | const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); |
2772 | SDValue CPAddr; |
2773 | bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI(); |
2774 | if (!IsPositionIndependent) { |
2775 | CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); |
2776 | } else { |
2777 | unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; |
2778 | ARMPCLabelIndex = AFI->createPICLabelUId(); |
2779 | ARMConstantPoolValue *CPV = |
2780 | ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, |
2781 | ARMCP::CPBlockAddress, PCAdj); |
2782 | CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); |
2783 | } |
2784 | CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); |
2785 | SDValue Result = DAG.getLoad( |
2786 | PtrVT, DL, DAG.getEntryNode(), CPAddr, |
2787 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
2788 | if (!IsPositionIndependent) |
2789 | return Result; |
2790 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32); |
2791 | return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); |
2792 | } |
2793 | |
2794 | /// \brief Convert a TLS address reference into the correct sequence of loads |
2795 | /// and calls to compute the variable's address for Darwin, and return an |
2796 | /// SDValue containing the final node. |
2797 | |
2798 | /// Darwin only has one TLS scheme which must be capable of dealing with the |
2799 | /// fully general situation, in the worst case. This means: |
2800 | /// + "extern __thread" declaration. |
2801 | /// + Defined in a possibly unknown dynamic library. |
2802 | /// |
2803 | /// The general system is that each __thread variable has a [3 x i32] descriptor |
2804 | /// which contains information used by the runtime to calculate the address. The |
2805 | /// only part of this the compiler needs to know about is the first word, which |
2806 | /// contains a function pointer that must be called with the address of the |
2807 | /// entire descriptor in "r0". |
2808 | /// |
2809 | /// Since this descriptor may be in a different unit, in general access must |
2810 | /// proceed along the usual ARM rules. A common sequence to produce is: |
2811 | /// |
2812 | /// movw rT1, :lower16:_var$non_lazy_ptr |
2813 | /// movt rT1, :upper16:_var$non_lazy_ptr |
2814 | /// ldr r0, [rT1] |
2815 | /// ldr rT2, [r0] |
2816 | /// blx rT2 |
2817 | /// [...address now in r0...] |
2818 | SDValue |
2819 | ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op, |
2820 | SelectionDAG &DAG) const { |
2821 | assert(Subtarget->isTargetDarwin() &&(static_cast <bool> (Subtarget->isTargetDarwin() && "This function expects a Darwin target") ? void (0) : __assert_fail ("Subtarget->isTargetDarwin() && \"This function expects a Darwin target\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 2822, __extension__ __PRETTY_FUNCTION__)) |
2822 | "This function expects a Darwin target")(static_cast <bool> (Subtarget->isTargetDarwin() && "This function expects a Darwin target") ? void (0) : __assert_fail ("Subtarget->isTargetDarwin() && \"This function expects a Darwin target\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 2822, __extension__ __PRETTY_FUNCTION__)); |
2823 | SDLoc DL(Op); |
2824 | |
2825 | // First step is to get the address of the actua global symbol. This is where |
2826 | // the TLS descriptor lives. |
2827 | SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG); |
2828 | |
2829 | // The first entry in the descriptor is a function pointer that we must call |
2830 | // to obtain the address of the variable. |
2831 | SDValue Chain = DAG.getEntryNode(); |
2832 | SDValue FuncTLVGet = DAG.getLoad( |
2833 | MVT::i32, DL, Chain, DescAddr, |
2834 | MachinePointerInfo::getGOT(DAG.getMachineFunction()), |
2835 | /* Alignment = */ 4, |
2836 | MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable | |
2837 | MachineMemOperand::MOInvariant); |
2838 | Chain = FuncTLVGet.getValue(1); |
2839 | |
2840 | MachineFunction &F = DAG.getMachineFunction(); |
2841 | MachineFrameInfo &MFI = F.getFrameInfo(); |
2842 | MFI.setAdjustsStack(true); |
2843 | |
2844 | // TLS calls preserve all registers except those that absolutely must be |
2845 | // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be |
2846 | // silly). |
2847 | auto TRI = |
2848 | getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo(); |
2849 | auto ARI = static_cast<const ARMRegisterInfo *>(TRI); |
2850 | const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction()); |
2851 | |
2852 | // Finally, we can make the call. This is just a degenerate version of a |
2853 | // normal AArch64 call node: r0 takes the address of the descriptor, and |
2854 | // returns the address of the variable in this thread. |
2855 | Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue()); |
2856 | Chain = |
2857 | DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue), |
2858 | Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32), |
2859 | DAG.getRegisterMask(Mask), Chain.getValue(1)); |
2860 | return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1)); |
2861 | } |
2862 | |
2863 | SDValue |
2864 | ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op, |
2865 | SelectionDAG &DAG) const { |
2866 | assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering")(static_cast <bool> (Subtarget->isTargetWindows() && "Windows specific TLS lowering") ? void (0) : __assert_fail ( "Subtarget->isTargetWindows() && \"Windows specific TLS lowering\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 2866, __extension__ __PRETTY_FUNCTION__)); |
2867 | |
2868 | SDValue Chain = DAG.getEntryNode(); |
2869 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
2870 | SDLoc DL(Op); |
2871 | |
2872 | // Load the current TEB (thread environment block) |
2873 | SDValue Ops[] = {Chain, |
2874 | DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32), |
2875 | DAG.getConstant(15, DL, MVT::i32), |
2876 | DAG.getConstant(0, DL, MVT::i32), |
2877 | DAG.getConstant(13, DL, MVT::i32), |
2878 | DAG.getConstant(0, DL, MVT::i32), |
2879 | DAG.getConstant(2, DL, MVT::i32)}; |
2880 | SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, |
2881 | DAG.getVTList(MVT::i32, MVT::Other), Ops); |
2882 | |
2883 | SDValue TEB = CurrentTEB.getValue(0); |
2884 | Chain = CurrentTEB.getValue(1); |
2885 | |
2886 | // Load the ThreadLocalStoragePointer from the TEB |
2887 | // A pointer to the TLS array is located at offset 0x2c from the TEB. |
2888 | SDValue TLSArray = |
2889 | DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL)); |
2890 | TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo()); |
2891 | |
2892 | // The pointer to the thread's TLS data area is at the TLS Index scaled by 4 |
2893 | // offset into the TLSArray. |
2894 | |
2895 | // Load the TLS index from the C runtime |
2896 | SDValue TLSIndex = |
2897 | DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG); |
2898 | TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex); |
2899 | TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo()); |
2900 | |
2901 | SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex, |
2902 | DAG.getConstant(2, DL, MVT::i32)); |
2903 | SDValue TLS = DAG.getLoad(PtrVT, DL, Chain, |
2904 | DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot), |
2905 | MachinePointerInfo()); |
2906 | |
2907 | // Get the offset of the start of the .tls section (section base) |
2908 | const auto *GA = cast<GlobalAddressSDNode>(Op); |
2909 | auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL); |
2910 | SDValue Offset = DAG.getLoad( |
2911 | PtrVT, DL, Chain, DAG.getNode(ARMISD::Wrapper, DL, MVT::i32, |
2912 | DAG.getTargetConstantPool(CPV, PtrVT, 4)), |
2913 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
2914 | |
2915 | return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset); |
2916 | } |
2917 | |
2918 | // Lower ISD::GlobalTLSAddress using the "general dynamic" model |
2919 | SDValue |
2920 | ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, |
2921 | SelectionDAG &DAG) const { |
2922 | SDLoc dl(GA); |
2923 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
2924 | unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; |
2925 | MachineFunction &MF = DAG.getMachineFunction(); |
2926 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
2927 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
2928 | ARMConstantPoolValue *CPV = |
2929 | ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, |
2930 | ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); |
2931 | SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); |
2932 | Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); |
2933 | Argument = DAG.getLoad( |
2934 | PtrVT, dl, DAG.getEntryNode(), Argument, |
2935 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
2936 | SDValue Chain = Argument.getValue(1); |
2937 | |
2938 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); |
2939 | Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); |
2940 | |
2941 | // call __tls_get_addr. |
2942 | ArgListTy Args; |
2943 | ArgListEntry Entry; |
2944 | Entry.Node = Argument; |
2945 | Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); |
2946 | Args.push_back(Entry); |
2947 | |
2948 | // FIXME: is there useful debug info available here? |
2949 | TargetLowering::CallLoweringInfo CLI(DAG); |
2950 | CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( |
2951 | CallingConv::C, Type::getInt32Ty(*DAG.getContext()), |
2952 | DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args)); |
2953 | |
2954 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
2955 | return CallResult.first; |
2956 | } |
2957 | |
2958 | // Lower ISD::GlobalTLSAddress using the "initial exec" or |
2959 | // "local exec" model. |
2960 | SDValue |
2961 | ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, |
2962 | SelectionDAG &DAG, |
2963 | TLSModel::Model model) const { |
2964 | const GlobalValue *GV = GA->getGlobal(); |
2965 | SDLoc dl(GA); |
2966 | SDValue Offset; |
2967 | SDValue Chain = DAG.getEntryNode(); |
2968 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
2969 | // Get the Thread Pointer |
2970 | SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); |
2971 | |
2972 | if (model == TLSModel::InitialExec) { |
2973 | MachineFunction &MF = DAG.getMachineFunction(); |
2974 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
2975 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
2976 | // Initial exec model. |
2977 | unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; |
2978 | ARMConstantPoolValue *CPV = |
2979 | ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, |
2980 | ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, |
2981 | true); |
2982 | Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); |
2983 | Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); |
2984 | Offset = DAG.getLoad( |
2985 | PtrVT, dl, Chain, Offset, |
2986 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
2987 | Chain = Offset.getValue(1); |
2988 | |
2989 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); |
2990 | Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); |
2991 | |
2992 | Offset = DAG.getLoad( |
2993 | PtrVT, dl, Chain, Offset, |
2994 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
2995 | } else { |
2996 | // local exec model |
2997 | assert(model == TLSModel::LocalExec)(static_cast <bool> (model == TLSModel::LocalExec) ? void (0) : __assert_fail ("model == TLSModel::LocalExec", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 2997, __extension__ __PRETTY_FUNCTION__)); |
2998 | ARMConstantPoolValue *CPV = |
2999 | ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); |
3000 | Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); |
3001 | Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); |
3002 | Offset = DAG.getLoad( |
3003 | PtrVT, dl, Chain, Offset, |
3004 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
3005 | } |
3006 | |
3007 | // The address of the thread local variable is the add of the thread |
3008 | // pointer with the offset of the variable. |
3009 | return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); |
3010 | } |
3011 | |
3012 | SDValue |
3013 | ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { |
3014 | GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); |
3015 | if (DAG.getTarget().useEmulatedTLS()) |
3016 | return LowerToTLSEmulatedModel(GA, DAG); |
3017 | |
3018 | if (Subtarget->isTargetDarwin()) |
3019 | return LowerGlobalTLSAddressDarwin(Op, DAG); |
3020 | |
3021 | if (Subtarget->isTargetWindows()) |
3022 | return LowerGlobalTLSAddressWindows(Op, DAG); |
3023 | |
3024 | // TODO: implement the "local dynamic" model |
3025 | assert(Subtarget->isTargetELF() && "Only ELF implemented here")(static_cast <bool> (Subtarget->isTargetELF() && "Only ELF implemented here") ? void (0) : __assert_fail ("Subtarget->isTargetELF() && \"Only ELF implemented here\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3025, __extension__ __PRETTY_FUNCTION__)); |
3026 | TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal()); |
3027 | |
3028 | switch (model) { |
3029 | case TLSModel::GeneralDynamic: |
3030 | case TLSModel::LocalDynamic: |
3031 | return LowerToTLSGeneralDynamicModel(GA, DAG); |
3032 | case TLSModel::InitialExec: |
3033 | case TLSModel::LocalExec: |
3034 | return LowerToTLSExecModels(GA, DAG, model); |
3035 | } |
3036 | llvm_unreachable("bogus TLS model")::llvm::llvm_unreachable_internal("bogus TLS model", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3036); |
3037 | } |
3038 | |
3039 | /// Return true if all users of V are within function F, looking through |
3040 | /// ConstantExprs. |
3041 | static bool allUsersAreInFunction(const Value *V, const Function *F) { |
3042 | SmallVector<const User*,4> Worklist; |
3043 | for (auto *U : V->users()) |
3044 | Worklist.push_back(U); |
3045 | while (!Worklist.empty()) { |
3046 | auto *U = Worklist.pop_back_val(); |
3047 | if (isa<ConstantExpr>(U)) { |
3048 | for (auto *UU : U->users()) |
3049 | Worklist.push_back(UU); |
3050 | continue; |
3051 | } |
3052 | |
3053 | auto *I = dyn_cast<Instruction>(U); |
3054 | if (!I || I->getParent()->getParent() != F) |
3055 | return false; |
3056 | } |
3057 | return true; |
3058 | } |
3059 | |
3060 | /// Return true if all users of V are within some (any) function, looking through |
3061 | /// ConstantExprs. In other words, are there any global constant users? |
3062 | static bool allUsersAreInFunctions(const Value *V) { |
3063 | SmallVector<const User*,4> Worklist; |
3064 | for (auto *U : V->users()) |
3065 | Worklist.push_back(U); |
3066 | while (!Worklist.empty()) { |
3067 | auto *U = Worklist.pop_back_val(); |
3068 | if (isa<ConstantExpr>(U)) { |
3069 | for (auto *UU : U->users()) |
3070 | Worklist.push_back(UU); |
3071 | continue; |
3072 | } |
3073 | |
3074 | if (!isa<Instruction>(U)) |
3075 | return false; |
3076 | } |
3077 | return true; |
3078 | } |
3079 | |
3080 | // Return true if T is an integer, float or an array/vector of either. |
3081 | static bool isSimpleType(Type *T) { |
3082 | if (T->isIntegerTy() || T->isFloatingPointTy()) |
3083 | return true; |
3084 | Type *SubT = nullptr; |
3085 | if (T->isArrayTy()) |
3086 | SubT = T->getArrayElementType(); |
3087 | else if (T->isVectorTy()) |
3088 | SubT = T->getVectorElementType(); |
3089 | else |
3090 | return false; |
3091 | return SubT->isIntegerTy() || SubT->isFloatingPointTy(); |
3092 | } |
3093 | |
3094 | static SDValue promoteToConstantPool(const GlobalValue *GV, SelectionDAG &DAG, |
3095 | EVT PtrVT, const SDLoc &dl) { |
3096 | // If we're creating a pool entry for a constant global with unnamed address, |
3097 | // and the global is small enough, we can emit it inline into the constant pool |
3098 | // to save ourselves an indirection. |
3099 | // |
3100 | // This is a win if the constant is only used in one function (so it doesn't |
3101 | // need to be duplicated) or duplicating the constant wouldn't increase code |
3102 | // size (implying the constant is no larger than 4 bytes). |
3103 | const Function &F = DAG.getMachineFunction().getFunction(); |
3104 | |
3105 | // We rely on this decision to inline being idemopotent and unrelated to the |
3106 | // use-site. We know that if we inline a variable at one use site, we'll |
3107 | // inline it elsewhere too (and reuse the constant pool entry). Fast-isel |
3108 | // doesn't know about this optimization, so bail out if it's enabled else |
3109 | // we could decide to inline here (and thus never emit the GV) but require |
3110 | // the GV from fast-isel generated code. |
3111 | if (!EnableConstpoolPromotion || |
3112 | DAG.getMachineFunction().getTarget().Options.EnableFastISel) |
3113 | return SDValue(); |
3114 | |
3115 | auto *GVar = dyn_cast<GlobalVariable>(GV); |
3116 | if (!GVar || !GVar->hasInitializer() || |
3117 | !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() || |
3118 | !GVar->hasLocalLinkage()) |
3119 | return SDValue(); |
3120 | |
3121 | // Ensure that we don't try and inline any type that contains pointers. If |
3122 | // we inline a value that contains relocations, we move the relocations from |
3123 | // .data to .text which is not ideal. |
3124 | auto *Init = GVar->getInitializer(); |
3125 | if (!isSimpleType(Init->getType())) |
3126 | return SDValue(); |
3127 | |
3128 | // The constant islands pass can only really deal with alignment requests |
3129 | // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote |
3130 | // any type wanting greater alignment requirements than 4 bytes. We also |
3131 | // can only promote constants that are multiples of 4 bytes in size or |
3132 | // are paddable to a multiple of 4. Currently we only try and pad constants |
3133 | // that are strings for simplicity. |
3134 | auto *CDAInit = dyn_cast<ConstantDataArray>(Init); |
3135 | unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType()); |
3136 | unsigned Align = GVar->getAlignment(); |
3137 | unsigned RequiredPadding = 4 - (Size % 4); |
3138 | bool PaddingPossible = |
3139 | RequiredPadding == 4 || (CDAInit && CDAInit->isString()); |
3140 | if (!PaddingPossible || Align > 4 || Size > ConstpoolPromotionMaxSize || |
3141 | Size == 0) |
3142 | return SDValue(); |
3143 | |
3144 | unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding); |
3145 | MachineFunction &MF = DAG.getMachineFunction(); |
3146 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
3147 | |
3148 | // We can't bloat the constant pool too much, else the ConstantIslands pass |
3149 | // may fail to converge. If we haven't promoted this global yet (it may have |
3150 | // multiple uses), and promoting it would increase the constant pool size (Sz |
3151 | // > 4), ensure we have space to do so up to MaxTotal. |
3152 | if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4) |
3153 | if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >= |
3154 | ConstpoolPromotionMaxTotal) |
3155 | return SDValue(); |
3156 | |
3157 | // This is only valid if all users are in a single function OR it has users |
3158 | // in multiple functions but it no larger than a pointer. We also check if |
3159 | // GVar has constant (non-ConstantExpr) users. If so, it essentially has its |
3160 | // address taken. |
3161 | if (!allUsersAreInFunction(GVar, &F) && |
3162 | !(Size <= 4 && allUsersAreInFunctions(GVar))) |
3163 | return SDValue(); |
3164 | |
3165 | // We're going to inline this global. Pad it out if needed. |
3166 | if (RequiredPadding != 4) { |
3167 | StringRef S = CDAInit->getAsString(); |
3168 | |
3169 | SmallVector<uint8_t,16> V(S.size()); |
3170 | std::copy(S.bytes_begin(), S.bytes_end(), V.begin()); |
3171 | while (RequiredPadding--) |
3172 | V.push_back(0); |
3173 | Init = ConstantDataArray::get(*DAG.getContext(), V); |
3174 | } |
3175 | |
3176 | auto CPVal = ARMConstantPoolConstant::Create(GVar, Init); |
3177 | SDValue CPAddr = |
3178 | DAG.getTargetConstantPool(CPVal, PtrVT, /*Align=*/4); |
3179 | if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) { |
3180 | AFI->markGlobalAsPromotedToConstantPool(GVar); |
3181 | AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() + |
3182 | PaddedSize - 4); |
3183 | } |
3184 | ++NumConstpoolPromoted; |
3185 | return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
3186 | } |
3187 | |
3188 | bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const { |
3189 | if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) |
3190 | GV = GA->getBaseObject(); |
3191 | return (isa<GlobalVariable>(GV) && cast<GlobalVariable>(GV)->isConstant()) || |
3192 | isa<Function>(GV); |
3193 | } |
3194 | |
3195 | SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op, |
3196 | SelectionDAG &DAG) const { |
3197 | switch (Subtarget->getTargetTriple().getObjectFormat()) { |
3198 | default: llvm_unreachable("unknown object format")::llvm::llvm_unreachable_internal("unknown object format", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3198); |
3199 | case Triple::COFF: |
3200 | return LowerGlobalAddressWindows(Op, DAG); |
3201 | case Triple::ELF: |
3202 | return LowerGlobalAddressELF(Op, DAG); |
3203 | case Triple::MachO: |
3204 | return LowerGlobalAddressDarwin(Op, DAG); |
3205 | } |
3206 | } |
3207 | |
3208 | SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, |
3209 | SelectionDAG &DAG) const { |
3210 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3211 | SDLoc dl(Op); |
3212 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); |
3213 | const TargetMachine &TM = getTargetMachine(); |
3214 | bool IsRO = isReadOnly(GV); |
3215 | |
3216 | // promoteToConstantPool only if not generating XO text section |
3217 | if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly()) |
3218 | if (SDValue V = promoteToConstantPool(GV, DAG, PtrVT, dl)) |
3219 | return V; |
3220 | |
3221 | if (isPositionIndependent()) { |
3222 | bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV); |
3223 | SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, |
3224 | UseGOT_PREL ? ARMII::MO_GOT : 0); |
3225 | SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G); |
3226 | if (UseGOT_PREL) |
3227 | Result = |
3228 | DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, |
3229 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
3230 | return Result; |
3231 | } else if (Subtarget->isROPI() && IsRO) { |
3232 | // PC-relative. |
3233 | SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT); |
3234 | SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G); |
3235 | return Result; |
3236 | } else if (Subtarget->isRWPI() && !IsRO) { |
3237 | // SB-relative. |
3238 | SDValue RelAddr; |
3239 | if (Subtarget->useMovt(DAG.getMachineFunction())) { |
3240 | ++NumMovwMovt; |
3241 | SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_SBREL); |
3242 | RelAddr = DAG.getNode(ARMISD::Wrapper, dl, PtrVT, G); |
3243 | } else { // use literal pool for address constant |
3244 | ARMConstantPoolValue *CPV = |
3245 | ARMConstantPoolConstant::Create(GV, ARMCP::SBREL); |
3246 | SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); |
3247 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
3248 | RelAddr = DAG.getLoad( |
3249 | PtrVT, dl, DAG.getEntryNode(), CPAddr, |
3250 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
3251 | } |
3252 | SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT); |
3253 | SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, RelAddr); |
3254 | return Result; |
3255 | } |
3256 | |
3257 | // If we have T2 ops, we can materialize the address directly via movt/movw |
3258 | // pair. This is always cheaper. |
3259 | if (Subtarget->useMovt(DAG.getMachineFunction())) { |
3260 | ++NumMovwMovt; |
3261 | // FIXME: Once remat is capable of dealing with instructions with register |
3262 | // operands, expand this into two nodes. |
3263 | return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, |
3264 | DAG.getTargetGlobalAddress(GV, dl, PtrVT)); |
3265 | } else { |
3266 | SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); |
3267 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
3268 | return DAG.getLoad( |
3269 | PtrVT, dl, DAG.getEntryNode(), CPAddr, |
3270 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
3271 | } |
3272 | } |
3273 | |
3274 | SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, |
3275 | SelectionDAG &DAG) const { |
3276 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&(static_cast <bool> (!Subtarget->isROPI() && !Subtarget->isRWPI() && "ROPI/RWPI not currently supported for Darwin" ) ? void (0) : __assert_fail ("!Subtarget->isROPI() && !Subtarget->isRWPI() && \"ROPI/RWPI not currently supported for Darwin\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3277, __extension__ __PRETTY_FUNCTION__)) |
3277 | "ROPI/RWPI not currently supported for Darwin")(static_cast <bool> (!Subtarget->isROPI() && !Subtarget->isRWPI() && "ROPI/RWPI not currently supported for Darwin" ) ? void (0) : __assert_fail ("!Subtarget->isROPI() && !Subtarget->isRWPI() && \"ROPI/RWPI not currently supported for Darwin\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3277, __extension__ __PRETTY_FUNCTION__)); |
3278 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3279 | SDLoc dl(Op); |
3280 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); |
3281 | |
3282 | if (Subtarget->useMovt(DAG.getMachineFunction())) |
3283 | ++NumMovwMovt; |
3284 | |
3285 | // FIXME: Once remat is capable of dealing with instructions with register |
3286 | // operands, expand this into multiple nodes |
3287 | unsigned Wrapper = |
3288 | isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper; |
3289 | |
3290 | SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY); |
3291 | SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G); |
3292 | |
3293 | if (Subtarget->isGVIndirectSymbol(GV)) |
3294 | Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, |
3295 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
3296 | return Result; |
3297 | } |
3298 | |
3299 | SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op, |
3300 | SelectionDAG &DAG) const { |
3301 | assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported")(static_cast <bool> (Subtarget->isTargetWindows() && "non-Windows COFF is not supported") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"non-Windows COFF is not supported\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3301, __extension__ __PRETTY_FUNCTION__)); |
3302 | assert(Subtarget->useMovt(DAG.getMachineFunction()) &&(static_cast <bool> (Subtarget->useMovt(DAG.getMachineFunction ()) && "Windows on ARM expects to use movw/movt") ? void (0) : __assert_fail ("Subtarget->useMovt(DAG.getMachineFunction()) && \"Windows on ARM expects to use movw/movt\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3303, __extension__ __PRETTY_FUNCTION__)) |
3303 | "Windows on ARM expects to use movw/movt")(static_cast <bool> (Subtarget->useMovt(DAG.getMachineFunction ()) && "Windows on ARM expects to use movw/movt") ? void (0) : __assert_fail ("Subtarget->useMovt(DAG.getMachineFunction()) && \"Windows on ARM expects to use movw/movt\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3303, __extension__ __PRETTY_FUNCTION__)); |
3304 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&(static_cast <bool> (!Subtarget->isROPI() && !Subtarget->isRWPI() && "ROPI/RWPI not currently supported for Windows" ) ? void (0) : __assert_fail ("!Subtarget->isROPI() && !Subtarget->isRWPI() && \"ROPI/RWPI not currently supported for Windows\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3305, __extension__ __PRETTY_FUNCTION__)) |
3305 | "ROPI/RWPI not currently supported for Windows")(static_cast <bool> (!Subtarget->isROPI() && !Subtarget->isRWPI() && "ROPI/RWPI not currently supported for Windows" ) ? void (0) : __assert_fail ("!Subtarget->isROPI() && !Subtarget->isRWPI() && \"ROPI/RWPI not currently supported for Windows\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3305, __extension__ __PRETTY_FUNCTION__)); |
3306 | |
3307 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); |
3308 | const ARMII::TOF TargetFlags = |
3309 | (GV->hasDLLImportStorageClass() ? ARMII::MO_DLLIMPORT : ARMII::MO_NO_FLAG); |
3310 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3311 | SDValue Result; |
3312 | SDLoc DL(Op); |
3313 | |
3314 | ++NumMovwMovt; |
3315 | |
3316 | // FIXME: Once remat is capable of dealing with instructions with register |
3317 | // operands, expand this into two nodes. |
3318 | Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, |
3319 | DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0, |
3320 | TargetFlags)); |
3321 | if (GV->hasDLLImportStorageClass()) |
3322 | Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, |
3323 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
3324 | return Result; |
3325 | } |
3326 | |
3327 | SDValue |
3328 | ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { |
3329 | SDLoc dl(Op); |
3330 | SDValue Val = DAG.getConstant(0, dl, MVT::i32); |
3331 | return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, |
3332 | DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), |
3333 | Op.getOperand(1), Val); |
3334 | } |
3335 | |
3336 | SDValue |
3337 | ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { |
3338 | SDLoc dl(Op); |
3339 | return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), |
3340 | Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32)); |
3341 | } |
3342 | |
3343 | SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, |
3344 | SelectionDAG &DAG) const { |
3345 | SDLoc dl(Op); |
3346 | return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other, |
3347 | Op.getOperand(0)); |
3348 | } |
3349 | |
3350 | SDValue |
3351 | ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, |
3352 | const ARMSubtarget *Subtarget) const { |
3353 | unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
3354 | SDLoc dl(Op); |
3355 | switch (IntNo) { |
3356 | default: return SDValue(); // Don't custom lower most intrinsics. |
3357 | case Intrinsic::thread_pointer: { |
3358 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3359 | return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); |
3360 | } |
3361 | case Intrinsic::eh_sjlj_lsda: { |
3362 | MachineFunction &MF = DAG.getMachineFunction(); |
3363 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
3364 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
3365 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3366 | SDValue CPAddr; |
3367 | bool IsPositionIndependent = isPositionIndependent(); |
3368 | unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; |
3369 | ARMConstantPoolValue *CPV = |
3370 | ARMConstantPoolConstant::Create(&MF.getFunction(), ARMPCLabelIndex, |
3371 | ARMCP::CPLSDA, PCAdj); |
3372 | CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); |
3373 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
3374 | SDValue Result = DAG.getLoad( |
3375 | PtrVT, dl, DAG.getEntryNode(), CPAddr, |
3376 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
3377 | |
3378 | if (IsPositionIndependent) { |
3379 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); |
3380 | Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); |
3381 | } |
3382 | return Result; |
3383 | } |
3384 | case Intrinsic::arm_neon_vabs: |
3385 | return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(), |
3386 | Op.getOperand(1)); |
3387 | case Intrinsic::arm_neon_vmulls: |
3388 | case Intrinsic::arm_neon_vmullu: { |
3389 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) |
3390 | ? ARMISD::VMULLs : ARMISD::VMULLu; |
3391 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), |
3392 | Op.getOperand(1), Op.getOperand(2)); |
3393 | } |
3394 | case Intrinsic::arm_neon_vminnm: |
3395 | case Intrinsic::arm_neon_vmaxnm: { |
3396 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm) |
3397 | ? ISD::FMINNUM : ISD::FMAXNUM; |
3398 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), |
3399 | Op.getOperand(1), Op.getOperand(2)); |
3400 | } |
3401 | case Intrinsic::arm_neon_vminu: |
3402 | case Intrinsic::arm_neon_vmaxu: { |
3403 | if (Op.getValueType().isFloatingPoint()) |
3404 | return SDValue(); |
3405 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu) |
3406 | ? ISD::UMIN : ISD::UMAX; |
3407 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), |
3408 | Op.getOperand(1), Op.getOperand(2)); |
3409 | } |
3410 | case Intrinsic::arm_neon_vmins: |
3411 | case Intrinsic::arm_neon_vmaxs: { |
3412 | // v{min,max}s is overloaded between signed integers and floats. |
3413 | if (!Op.getValueType().isFloatingPoint()) { |
3414 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) |
3415 | ? ISD::SMIN : ISD::SMAX; |
3416 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), |
3417 | Op.getOperand(1), Op.getOperand(2)); |
3418 | } |
3419 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) |
3420 | ? ISD::FMINNAN : ISD::FMAXNAN; |
3421 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), |
3422 | Op.getOperand(1), Op.getOperand(2)); |
3423 | } |
3424 | case Intrinsic::arm_neon_vtbl1: |
3425 | return DAG.getNode(ARMISD::VTBL1, SDLoc(Op), Op.getValueType(), |
3426 | Op.getOperand(1), Op.getOperand(2)); |
3427 | case Intrinsic::arm_neon_vtbl2: |
3428 | return DAG.getNode(ARMISD::VTBL2, SDLoc(Op), Op.getValueType(), |
3429 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
3430 | } |
3431 | } |
3432 | |
3433 | static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, |
3434 | const ARMSubtarget *Subtarget) { |
3435 | SDLoc dl(Op); |
3436 | ConstantSDNode *SSIDNode = cast<ConstantSDNode>(Op.getOperand(2)); |
3437 | auto SSID = static_cast<SyncScope::ID>(SSIDNode->getZExtValue()); |
3438 | if (SSID == SyncScope::SingleThread) |
3439 | return Op; |
3440 | |
3441 | if (!Subtarget->hasDataBarrier()) { |
3442 | // Some ARMv6 cpus can support data barriers with an mcr instruction. |
3443 | // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get |
3444 | // here. |
3445 | assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&(static_cast <bool> (Subtarget->hasV6Ops() && !Subtarget->isThumb() && "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!" ) ? void (0) : __assert_fail ("Subtarget->hasV6Ops() && !Subtarget->isThumb() && \"Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3446, __extension__ __PRETTY_FUNCTION__)) |
3446 | "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!")(static_cast <bool> (Subtarget->hasV6Ops() && !Subtarget->isThumb() && "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!" ) ? void (0) : __assert_fail ("Subtarget->hasV6Ops() && !Subtarget->isThumb() && \"Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3446, __extension__ __PRETTY_FUNCTION__)); |
3447 | return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), |
3448 | DAG.getConstant(0, dl, MVT::i32)); |
3449 | } |
3450 | |
3451 | ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1)); |
3452 | AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue()); |
3453 | ARM_MB::MemBOpt Domain = ARM_MB::ISH; |
3454 | if (Subtarget->isMClass()) { |
3455 | // Only a full system barrier exists in the M-class architectures. |
3456 | Domain = ARM_MB::SY; |
3457 | } else if (Subtarget->preferISHSTBarriers() && |
3458 | Ord == AtomicOrdering::Release) { |
3459 | // Swift happens to implement ISHST barriers in a way that's compatible with |
3460 | // Release semantics but weaker than ISH so we'd be fools not to use |
3461 | // it. Beware: other processors probably don't! |
3462 | Domain = ARM_MB::ISHST; |
3463 | } |
3464 | |
3465 | return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0), |
3466 | DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32), |
3467 | DAG.getConstant(Domain, dl, MVT::i32)); |
3468 | } |
3469 | |
3470 | static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, |
3471 | const ARMSubtarget *Subtarget) { |
3472 | // ARM pre v5TE and Thumb1 does not have preload instructions. |
3473 | if (!(Subtarget->isThumb2() || |
3474 | (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) |
3475 | // Just preserve the chain. |
3476 | return Op.getOperand(0); |
3477 | |
3478 | SDLoc dl(Op); |
3479 | unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; |
3480 | if (!isRead && |
3481 | (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) |
3482 | // ARMv7 with MP extension has PLDW. |
3483 | return Op.getOperand(0); |
3484 | |
3485 | unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); |
3486 | if (Subtarget->isThumb()) { |
3487 | // Invert the bits. |
3488 | isRead = ~isRead & 1; |
3489 | isData = ~isData & 1; |
3490 | } |
3491 | |
3492 | return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), |
3493 | Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32), |
3494 | DAG.getConstant(isData, dl, MVT::i32)); |
3495 | } |
3496 | |
3497 | static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { |
3498 | MachineFunction &MF = DAG.getMachineFunction(); |
3499 | ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); |
3500 | |
3501 | // vastart just stores the address of the VarArgsFrameIndex slot into the |
3502 | // memory location argument. |
3503 | SDLoc dl(Op); |
3504 | EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); |
3505 | SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); |
3506 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); |
3507 | return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), |
3508 | MachinePointerInfo(SV)); |
3509 | } |
3510 | |
3511 | SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, |
3512 | CCValAssign &NextVA, |
3513 | SDValue &Root, |
3514 | SelectionDAG &DAG, |
3515 | const SDLoc &dl) const { |
3516 | MachineFunction &MF = DAG.getMachineFunction(); |
3517 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
3518 | |
3519 | const TargetRegisterClass *RC; |
3520 | if (AFI->isThumb1OnlyFunction()) |
3521 | RC = &ARM::tGPRRegClass; |
3522 | else |
3523 | RC = &ARM::GPRRegClass; |
3524 | |
3525 | // Transform the arguments stored in physical registers into virtual ones. |
3526 | unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); |
3527 | SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); |
3528 | |
3529 | SDValue ArgValue2; |
3530 | if (NextVA.isMemLoc()) { |
3531 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
3532 | int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true); |
3533 | |
3534 | // Create load node to retrieve arguments from the stack. |
3535 | SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); |
3536 | ArgValue2 = DAG.getLoad( |
3537 | MVT::i32, dl, Root, FIN, |
3538 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); |
3539 | } else { |
3540 | Reg = MF.addLiveIn(NextVA.getLocReg(), RC); |
3541 | ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); |
3542 | } |
3543 | if (!Subtarget->isLittle()) |
3544 | std::swap (ArgValue, ArgValue2); |
3545 | return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); |
3546 | } |
3547 | |
3548 | // The remaining GPRs hold either the beginning of variable-argument |
3549 | // data, or the beginning of an aggregate passed by value (usually |
3550 | // byval). Either way, we allocate stack slots adjacent to the data |
3551 | // provided by our caller, and store the unallocated registers there. |
3552 | // If this is a variadic function, the va_list pointer will begin with |
3553 | // these values; otherwise, this reassembles a (byval) structure that |
3554 | // was split between registers and memory. |
3555 | // Return: The frame index registers were stored into. |
3556 | int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, |
3557 | const SDLoc &dl, SDValue &Chain, |
3558 | const Value *OrigArg, |
3559 | unsigned InRegsParamRecordIdx, |
3560 | int ArgOffset, unsigned ArgSize) const { |
3561 | // Currently, two use-cases possible: |
3562 | // Case #1. Non-var-args function, and we meet first byval parameter. |
3563 | // Setup first unallocated register as first byval register; |
3564 | // eat all remained registers |
3565 | // (these two actions are performed by HandleByVal method). |
3566 | // Then, here, we initialize stack frame with |
3567 | // "store-reg" instructions. |
3568 | // Case #2. Var-args function, that doesn't contain byval parameters. |
3569 | // The same: eat all remained unallocated registers, |
3570 | // initialize stack frame. |
3571 | |
3572 | MachineFunction &MF = DAG.getMachineFunction(); |
3573 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
3574 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
3575 | unsigned RBegin, REnd; |
3576 | if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { |
3577 | CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd); |
3578 | } else { |
3579 | unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs); |
3580 | RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx]; |
3581 | REnd = ARM::R4; |
3582 | } |
3583 | |
3584 | if (REnd != RBegin) |
3585 | ArgOffset = -4 * (ARM::R4 - RBegin); |
3586 | |
3587 | auto PtrVT = getPointerTy(DAG.getDataLayout()); |
3588 | int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false); |
3589 | SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT); |
3590 | |
3591 | SmallVector<SDValue, 4> MemOps; |
3592 | const TargetRegisterClass *RC = |
3593 | AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass; |
3594 | |
3595 | for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) { |
3596 | unsigned VReg = MF.addLiveIn(Reg, RC); |
3597 | SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); |
3598 | SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, |
3599 | MachinePointerInfo(OrigArg, 4 * i)); |
3600 | MemOps.push_back(Store); |
3601 | FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT)); |
3602 | } |
3603 | |
3604 | if (!MemOps.empty()) |
3605 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); |
3606 | return FrameIndex; |
3607 | } |
3608 | |
3609 | // Setup stack frame, the va_list pointer will start from. |
3610 | void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, |
3611 | const SDLoc &dl, SDValue &Chain, |
3612 | unsigned ArgOffset, |
3613 | unsigned TotalArgRegsSaveSize, |
3614 | bool ForceMutable) const { |
3615 | MachineFunction &MF = DAG.getMachineFunction(); |
3616 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
3617 | |
3618 | // Try to store any remaining integer argument regs |
3619 | // to their spots on the stack so that they may be loaded by dereferencing |
3620 | // the result of va_next. |
3621 | // If there is no regs to be stored, just point address after last |
3622 | // argument passed via stack. |
3623 | int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr, |
3624 | CCInfo.getInRegsParamsCount(), |
3625 | CCInfo.getNextStackOffset(), 4); |
3626 | AFI->setVarArgsFrameIndex(FrameIndex); |
3627 | } |
3628 | |
3629 | SDValue ARMTargetLowering::LowerFormalArguments( |
3630 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
3631 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
3632 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
3633 | MachineFunction &MF = DAG.getMachineFunction(); |
3634 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
3635 | |
3636 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
3637 | |
3638 | // Assign locations to all of the incoming arguments. |
3639 | SmallVector<CCValAssign, 16> ArgLocs; |
3640 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
3641 | *DAG.getContext()); |
3642 | CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg)); |
3643 | |
3644 | SmallVector<SDValue, 16> ArgValues; |
3645 | SDValue ArgValue; |
3646 | Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin(); |
3647 | unsigned CurArgIdx = 0; |
3648 | |
3649 | // Initially ArgRegsSaveSize is zero. |
3650 | // Then we increase this value each time we meet byval parameter. |
3651 | // We also increase this value in case of varargs function. |
3652 | AFI->setArgRegsSaveSize(0); |
3653 | |
3654 | // Calculate the amount of stack space that we need to allocate to store |
3655 | // byval and variadic arguments that are passed in registers. |
3656 | // We need to know this before we allocate the first byval or variadic |
3657 | // argument, as they will be allocated a stack slot below the CFA (Canonical |
3658 | // Frame Address, the stack pointer at entry to the function). |
3659 | unsigned ArgRegBegin = ARM::R4; |
3660 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
3661 | if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount()) |
3662 | break; |
3663 | |
3664 | CCValAssign &VA = ArgLocs[i]; |
3665 | unsigned Index = VA.getValNo(); |
3666 | ISD::ArgFlagsTy Flags = Ins[Index].Flags; |
3667 | if (!Flags.isByVal()) |
3668 | continue; |
3669 | |
3670 | assert(VA.isMemLoc() && "unexpected byval pointer in reg")(static_cast <bool> (VA.isMemLoc() && "unexpected byval pointer in reg" ) ? void (0) : __assert_fail ("VA.isMemLoc() && \"unexpected byval pointer in reg\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3670, __extension__ __PRETTY_FUNCTION__)); |
3671 | unsigned RBegin, REnd; |
3672 | CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd); |
3673 | ArgRegBegin = std::min(ArgRegBegin, RBegin); |
3674 | |
3675 | CCInfo.nextInRegsParam(); |
3676 | } |
3677 | CCInfo.rewindByValRegsInfo(); |
3678 | |
3679 | int lastInsIndex = -1; |
3680 | if (isVarArg && MFI.hasVAStart()) { |
3681 | unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs); |
3682 | if (RegIdx != array_lengthof(GPRArgRegs)) |
3683 | ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]); |
3684 | } |
3685 | |
3686 | unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin); |
3687 | AFI->setArgRegsSaveSize(TotalArgRegsSaveSize); |
3688 | auto PtrVT = getPointerTy(DAG.getDataLayout()); |
3689 | |
3690 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
3691 | CCValAssign &VA = ArgLocs[i]; |
3692 | if (Ins[VA.getValNo()].isOrigArg()) { |
3693 | std::advance(CurOrigArg, |
3694 | Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx); |
3695 | CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex(); |
3696 | } |
3697 | // Arguments stored in registers. |
3698 | if (VA.isRegLoc()) { |
3699 | EVT RegVT = VA.getLocVT(); |
3700 | |
3701 | if (VA.needsCustom()) { |
3702 | // f64 and vector types are split up into multiple registers or |
3703 | // combinations of registers and stack slots. |
3704 | if (VA.getLocVT() == MVT::v2f64) { |
3705 | SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], |
3706 | Chain, DAG, dl); |
3707 | VA = ArgLocs[++i]; // skip ahead to next loc |
3708 | SDValue ArgValue2; |
3709 | if (VA.isMemLoc()) { |
3710 | int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true); |
3711 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); |
3712 | ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, |
3713 | MachinePointerInfo::getFixedStack( |
3714 | DAG.getMachineFunction(), FI)); |
3715 | } else { |
3716 | ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], |
3717 | Chain, DAG, dl); |
3718 | } |
3719 | ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); |
3720 | ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, |
3721 | ArgValue, ArgValue1, |
3722 | DAG.getIntPtrConstant(0, dl)); |
3723 | ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, |
3724 | ArgValue, ArgValue2, |
3725 | DAG.getIntPtrConstant(1, dl)); |
3726 | } else |
3727 | ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); |
3728 | } else { |
3729 | const TargetRegisterClass *RC; |
3730 | |
3731 | |
3732 | if (RegVT == MVT::f16) |
3733 | RC = &ARM::HPRRegClass; |
3734 | else if (RegVT == MVT::f32) |
3735 | RC = &ARM::SPRRegClass; |
3736 | else if (RegVT == MVT::f64 || RegVT == MVT::v4f16) |
3737 | RC = &ARM::DPRRegClass; |
3738 | else if (RegVT == MVT::v2f64 || RegVT == MVT::v8f16) |
3739 | RC = &ARM::QPRRegClass; |
3740 | else if (RegVT == MVT::i32) |
3741 | RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass |
3742 | : &ARM::GPRRegClass; |
3743 | else |
3744 | llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering")::llvm::llvm_unreachable_internal("RegVT not supported by FORMAL_ARGUMENTS Lowering" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3744); |
3745 | |
3746 | // Transform the arguments in physical registers into virtual ones. |
3747 | unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); |
3748 | ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); |
3749 | } |
3750 | |
3751 | // If this is an 8 or 16-bit value, it is really passed promoted |
3752 | // to 32 bits. Insert an assert[sz]ext to capture this, then |
3753 | // truncate to the right size. |
3754 | switch (VA.getLocInfo()) { |
3755 | default: llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3755); |
3756 | case CCValAssign::Full: break; |
3757 | case CCValAssign::BCvt: |
3758 | ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); |
3759 | break; |
3760 | case CCValAssign::SExt: |
3761 | ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, |
3762 | DAG.getValueType(VA.getValVT())); |
3763 | ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); |
3764 | break; |
3765 | case CCValAssign::ZExt: |
3766 | ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, |
3767 | DAG.getValueType(VA.getValVT())); |
3768 | ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); |
3769 | break; |
3770 | } |
3771 | |
3772 | InVals.push_back(ArgValue); |
3773 | } else { // VA.isRegLoc() |
3774 | // sanity check |
3775 | assert(VA.isMemLoc())(static_cast <bool> (VA.isMemLoc()) ? void (0) : __assert_fail ("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3775, __extension__ __PRETTY_FUNCTION__)); |
3776 | assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered")(static_cast <bool> (VA.getValVT() != MVT::i64 && "i64 should already be lowered") ? void (0) : __assert_fail ( "VA.getValVT() != MVT::i64 && \"i64 should already be lowered\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3776, __extension__ __PRETTY_FUNCTION__)); |
3777 | |
3778 | int index = VA.getValNo(); |
3779 | |
3780 | // Some Ins[] entries become multiple ArgLoc[] entries. |
3781 | // Process them only once. |
3782 | if (index != lastInsIndex) |
3783 | { |
3784 | ISD::ArgFlagsTy Flags = Ins[index].Flags; |
3785 | // FIXME: For now, all byval parameter objects are marked mutable. |
3786 | // This can be changed with more analysis. |
3787 | // In case of tail call optimization mark all arguments mutable. |
3788 | // Since they could be overwritten by lowering of arguments in case of |
3789 | // a tail call. |
3790 | if (Flags.isByVal()) { |
3791 | assert(Ins[index].isOrigArg() &&(static_cast <bool> (Ins[index].isOrigArg() && "Byval arguments cannot be implicit" ) ? void (0) : __assert_fail ("Ins[index].isOrigArg() && \"Byval arguments cannot be implicit\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3792, __extension__ __PRETTY_FUNCTION__)) |
3792 | "Byval arguments cannot be implicit")(static_cast <bool> (Ins[index].isOrigArg() && "Byval arguments cannot be implicit" ) ? void (0) : __assert_fail ("Ins[index].isOrigArg() && \"Byval arguments cannot be implicit\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3792, __extension__ __PRETTY_FUNCTION__)); |
3793 | unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed(); |
3794 | |
3795 | int FrameIndex = StoreByValRegs( |
3796 | CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex, |
3797 | VA.getLocMemOffset(), Flags.getByValSize()); |
3798 | InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT)); |
3799 | CCInfo.nextInRegsParam(); |
3800 | } else { |
3801 | unsigned FIOffset = VA.getLocMemOffset(); |
3802 | int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8, |
3803 | FIOffset, true); |
3804 | |
3805 | // Create load nodes to retrieve arguments from the stack. |
3806 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); |
3807 | InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, |
3808 | MachinePointerInfo::getFixedStack( |
3809 | DAG.getMachineFunction(), FI))); |
3810 | } |
3811 | lastInsIndex = index; |
3812 | } |
3813 | } |
3814 | } |
3815 | |
3816 | // varargs |
3817 | if (isVarArg && MFI.hasVAStart()) |
3818 | VarArgStyleRegisters(CCInfo, DAG, dl, Chain, |
3819 | CCInfo.getNextStackOffset(), |
3820 | TotalArgRegsSaveSize); |
3821 | |
3822 | AFI->setArgumentStackSize(CCInfo.getNextStackOffset()); |
3823 | |
3824 | return Chain; |
3825 | } |
3826 | |
3827 | /// isFloatingPointZero - Return true if this is +0.0. |
3828 | static bool isFloatingPointZero(SDValue Op) { |
3829 | if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) |
3830 | return CFP->getValueAPF().isPosZero(); |
3831 | else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { |
3832 | // Maybe this has already been legalized into the constant pool? |
3833 | if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { |
3834 | SDValue WrapperOp = Op.getOperand(1).getOperand(0); |
3835 | if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) |
3836 | if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) |
3837 | return CFP->getValueAPF().isPosZero(); |
3838 | } |
3839 | } else if (Op->getOpcode() == ISD::BITCAST && |
3840 | Op->getValueType(0) == MVT::f64) { |
3841 | // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64) |
3842 | // created by LowerConstantFP(). |
3843 | SDValue BitcastOp = Op->getOperand(0); |
3844 | if (BitcastOp->getOpcode() == ARMISD::VMOVIMM && |
3845 | isNullConstant(BitcastOp->getOperand(0))) |
3846 | return true; |
3847 | } |
3848 | return false; |
3849 | } |
3850 | |
3851 | /// Returns appropriate ARM CMP (cmp) and corresponding condition code for |
3852 | /// the given operands. |
3853 | SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, |
3854 | SDValue &ARMcc, SelectionDAG &DAG, |
3855 | const SDLoc &dl) const { |
3856 | if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { |
3857 | unsigned C = RHSC->getZExtValue(); |
3858 | if (!isLegalICmpImmediate(C)) { |
3859 | // Constant does not fit, try adjusting it by one? |
3860 | switch (CC) { |
3861 | default: break; |
3862 | case ISD::SETLT: |
3863 | case ISD::SETGE: |
3864 | if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { |
3865 | CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; |
3866 | RHS = DAG.getConstant(C - 1, dl, MVT::i32); |
3867 | } |
3868 | break; |
3869 | case ISD::SETULT: |
3870 | case ISD::SETUGE: |
3871 | if (C != 0 && isLegalICmpImmediate(C-1)) { |
3872 | CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; |
3873 | RHS = DAG.getConstant(C - 1, dl, MVT::i32); |
3874 | } |
3875 | break; |
3876 | case ISD::SETLE: |
3877 | case ISD::SETGT: |
3878 | if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { |
3879 | CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; |
3880 | RHS = DAG.getConstant(C + 1, dl, MVT::i32); |
3881 | } |
3882 | break; |
3883 | case ISD::SETULE: |
3884 | case ISD::SETUGT: |
3885 | if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { |
3886 | CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; |
3887 | RHS = DAG.getConstant(C + 1, dl, MVT::i32); |
3888 | } |
3889 | break; |
3890 | } |
3891 | } |
3892 | } else if ((ARM_AM::getShiftOpcForNode(LHS.getOpcode()) != ARM_AM::no_shift) && |
3893 | (ARM_AM::getShiftOpcForNode(RHS.getOpcode()) == ARM_AM::no_shift)) { |
3894 | // In ARM and Thumb-2, the compare instructions can shift their second |
3895 | // operand. |
3896 | CC = ISD::getSetCCSwappedOperands(CC); |
3897 | std::swap(LHS, RHS); |
3898 | } |
3899 | |
3900 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
3901 | ARMISD::NodeType CompareType; |
3902 | switch (CondCode) { |
3903 | default: |
3904 | CompareType = ARMISD::CMP; |
3905 | break; |
3906 | case ARMCC::EQ: |
3907 | case ARMCC::NE: |
3908 | // Uses only Z Flag |
3909 | CompareType = ARMISD::CMPZ; |
3910 | break; |
3911 | } |
3912 | ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); |
3913 | return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); |
3914 | } |
3915 | |
3916 | /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. |
3917 | SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, |
3918 | SelectionDAG &DAG, const SDLoc &dl, |
3919 | bool InvalidOnQNaN) const { |
3920 | assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64)(static_cast <bool> (!Subtarget->isFPOnlySP() || RHS .getValueType() != MVT::f64) ? void (0) : __assert_fail ("!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3920, __extension__ __PRETTY_FUNCTION__)); |
3921 | SDValue Cmp; |
3922 | SDValue C = DAG.getConstant(InvalidOnQNaN, dl, MVT::i32); |
3923 | if (!isFloatingPointZero(RHS)) |
3924 | Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS, C); |
3925 | else |
3926 | Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS, C); |
3927 | return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); |
3928 | } |
3929 | |
3930 | /// duplicateCmp - Glue values can have only one use, so this function |
3931 | /// duplicates a comparison node. |
3932 | SDValue |
3933 | ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { |
3934 | unsigned Opc = Cmp.getOpcode(); |
3935 | SDLoc DL(Cmp); |
3936 | if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) |
3937 | return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); |
3938 | |
3939 | assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation")(static_cast <bool> (Opc == ARMISD::FMSTAT && "unexpected comparison operation" ) ? void (0) : __assert_fail ("Opc == ARMISD::FMSTAT && \"unexpected comparison operation\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3939, __extension__ __PRETTY_FUNCTION__)); |
3940 | Cmp = Cmp.getOperand(0); |
3941 | Opc = Cmp.getOpcode(); |
3942 | if (Opc == ARMISD::CMPFP) |
3943 | Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0), |
3944 | Cmp.getOperand(1), Cmp.getOperand(2)); |
3945 | else { |
3946 | assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT")(static_cast <bool> (Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT" ) ? void (0) : __assert_fail ("Opc == ARMISD::CMPFPw0 && \"unexpected operand of FMSTAT\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3946, __extension__ __PRETTY_FUNCTION__)); |
3947 | Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0), |
3948 | Cmp.getOperand(1)); |
3949 | } |
3950 | return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); |
3951 | } |
3952 | |
3953 | // This function returns three things: the arithmetic computation itself |
3954 | // (Value), a comparison (OverflowCmp), and a condition code (ARMcc). The |
3955 | // comparison and the condition code define the case in which the arithmetic |
3956 | // computation *does not* overflow. |
3957 | std::pair<SDValue, SDValue> |
3958 | ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG, |
3959 | SDValue &ARMcc) const { |
3960 | assert(Op.getValueType() == MVT::i32 && "Unsupported value type")(static_cast <bool> (Op.getValueType() == MVT::i32 && "Unsupported value type") ? void (0) : __assert_fail ("Op.getValueType() == MVT::i32 && \"Unsupported value type\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3960, __extension__ __PRETTY_FUNCTION__)); |
3961 | |
3962 | SDValue Value, OverflowCmp; |
3963 | SDValue LHS = Op.getOperand(0); |
3964 | SDValue RHS = Op.getOperand(1); |
3965 | SDLoc dl(Op); |
3966 | |
3967 | // FIXME: We are currently always generating CMPs because we don't support |
3968 | // generating CMN through the backend. This is not as good as the natural |
3969 | // CMP case because it causes a register dependency and cannot be folded |
3970 | // later. |
3971 | |
3972 | switch (Op.getOpcode()) { |
3973 | default: |
3974 | llvm_unreachable("Unknown overflow instruction!")::llvm::llvm_unreachable_internal("Unknown overflow instruction!" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 3974); |
3975 | case ISD::SADDO: |
3976 | ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); |
3977 | Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS); |
3978 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); |
3979 | break; |
3980 | case ISD::UADDO: |
3981 | ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); |
3982 | // We use ADDC here to correspond to its use in LowerUnsignedALUO. |
3983 | // We do not use it in the USUBO case as Value may not be used. |
3984 | Value = DAG.getNode(ARMISD::ADDC, dl, |
3985 | DAG.getVTList(Op.getValueType(), MVT::i32), LHS, RHS) |
3986 | .getValue(0); |
3987 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); |
3988 | break; |
3989 | case ISD::SSUBO: |
3990 | ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); |
3991 | Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); |
3992 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); |
3993 | break; |
3994 | case ISD::USUBO: |
3995 | ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); |
3996 | Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); |
3997 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); |
3998 | break; |
3999 | case ISD::UMULO: |
4000 | // We generate a UMUL_LOHI and then check if the high word is 0. |
4001 | ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32); |
4002 | Value = DAG.getNode(ISD::UMUL_LOHI, dl, |
4003 | DAG.getVTList(Op.getValueType(), Op.getValueType()), |
4004 | LHS, RHS); |
4005 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1), |
4006 | DAG.getConstant(0, dl, MVT::i32)); |
4007 | Value = Value.getValue(0); // We only want the low 32 bits for the result. |
4008 | break; |
4009 | case ISD::SMULO: |
4010 | // We generate a SMUL_LOHI and then check if all the bits of the high word |
4011 | // are the same as the sign bit of the low word. |
4012 | ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32); |
4013 | Value = DAG.getNode(ISD::SMUL_LOHI, dl, |
4014 | DAG.getVTList(Op.getValueType(), Op.getValueType()), |
4015 | LHS, RHS); |
4016 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1), |
4017 | DAG.getNode(ISD::SRA, dl, Op.getValueType(), |
4018 | Value.getValue(0), |
4019 | DAG.getConstant(31, dl, MVT::i32))); |
4020 | Value = Value.getValue(0); // We only want the low 32 bits for the result. |
4021 | break; |
4022 | } // switch (...) |
4023 | |
4024 | return std::make_pair(Value, OverflowCmp); |
4025 | } |
4026 | |
4027 | SDValue |
4028 | ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const { |
4029 | // Let legalize expand this if it isn't a legal type yet. |
4030 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) |
4031 | return SDValue(); |
4032 | |
4033 | SDValue Value, OverflowCmp; |
4034 | SDValue ARMcc; |
4035 | std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc); |
4036 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
4037 | SDLoc dl(Op); |
4038 | // We use 0 and 1 as false and true values. |
4039 | SDValue TVal = DAG.getConstant(1, dl, MVT::i32); |
4040 | SDValue FVal = DAG.getConstant(0, dl, MVT::i32); |
4041 | EVT VT = Op.getValueType(); |
4042 | |
4043 | SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal, |
4044 | ARMcc, CCR, OverflowCmp); |
4045 | |
4046 | SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); |
4047 | return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); |
4048 | } |
4049 | |
4050 | static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry, |
4051 | SelectionDAG &DAG) { |
4052 | SDLoc DL(BoolCarry); |
4053 | EVT CarryVT = BoolCarry.getValueType(); |
4054 | |
4055 | // This converts the boolean value carry into the carry flag by doing |
4056 | // ARMISD::SUBC Carry, 1 |
4057 | return DAG.getNode(ARMISD::SUBC, DL, DAG.getVTList(CarryVT, MVT::i32), |
4058 | BoolCarry, DAG.getConstant(1, DL, CarryVT)); |
4059 | } |
4060 | |
4061 | static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT, |
4062 | SelectionDAG &DAG) { |
4063 | SDLoc DL(Flags); |
4064 | |
4065 | // Now convert the carry flag into a boolean carry. We do this |
4066 | // using ARMISD:ADDE 0, 0, Carry |
4067 | return DAG.getNode(ARMISD::ADDE, DL, DAG.getVTList(VT, MVT::i32), |
4068 | DAG.getConstant(0, DL, MVT::i32), |
4069 | DAG.getConstant(0, DL, MVT::i32), Flags); |
4070 | } |
4071 | |
4072 | SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op, |
4073 | SelectionDAG &DAG) const { |
4074 | // Let legalize expand this if it isn't a legal type yet. |
4075 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) |
4076 | return SDValue(); |
4077 | |
4078 | SDValue LHS = Op.getOperand(0); |
4079 | SDValue RHS = Op.getOperand(1); |
4080 | SDLoc dl(Op); |
4081 | |
4082 | EVT VT = Op.getValueType(); |
4083 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); |
4084 | SDValue Value; |
4085 | SDValue Overflow; |
4086 | switch (Op.getOpcode()) { |
4087 | default: |
4088 | llvm_unreachable("Unknown overflow instruction!")::llvm::llvm_unreachable_internal("Unknown overflow instruction!" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 4088); |
4089 | case ISD::UADDO: |
4090 | Value = DAG.getNode(ARMISD::ADDC, dl, VTs, LHS, RHS); |
4091 | // Convert the carry flag into a boolean value. |
4092 | Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG); |
4093 | break; |
4094 | case ISD::USUBO: { |
4095 | Value = DAG.getNode(ARMISD::SUBC, dl, VTs, LHS, RHS); |
4096 | // Convert the carry flag into a boolean value. |
4097 | Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG); |
4098 | // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow |
4099 | // value. So compute 1 - C. |
4100 | Overflow = DAG.getNode(ISD::SUB, dl, MVT::i32, |
4101 | DAG.getConstant(1, dl, MVT::i32), Overflow); |
4102 | break; |
4103 | } |
4104 | } |
4105 | |
4106 | return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); |
4107 | } |
4108 | |
4109 | SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { |
4110 | SDValue Cond = Op.getOperand(0); |
4111 | SDValue SelectTrue = Op.getOperand(1); |
4112 | SDValue SelectFalse = Op.getOperand(2); |
4113 | SDLoc dl(Op); |
4114 | unsigned Opc = Cond.getOpcode(); |
4115 | |
4116 | if (Cond.getResNo() == 1 && |
4117 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
4118 | Opc == ISD::USUBO)) { |
4119 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) |
4120 | return SDValue(); |
4121 | |
4122 | SDValue Value, OverflowCmp; |
4123 | SDValue ARMcc; |
4124 | std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); |
4125 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
4126 | EVT VT = Op.getValueType(); |
4127 | |
4128 | return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR, |
4129 | OverflowCmp, DAG); |
4130 | } |
4131 | |
4132 | // Convert: |
4133 | // |
4134 | // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) |
4135 | // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) |
4136 | // |
4137 | if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { |
4138 | const ConstantSDNode *CMOVTrue = |
4139 | dyn_cast<ConstantSDNode>(Cond.getOperand(0)); |
4140 | const ConstantSDNode *CMOVFalse = |
4141 | dyn_cast<ConstantSDNode>(Cond.getOperand(1)); |
4142 | |
4143 | if (CMOVTrue && CMOVFalse) { |
4144 | unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); |
4145 | unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); |
4146 | |
4147 | SDValue True; |
4148 | SDValue False; |
4149 | if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { |
4150 | True = SelectTrue; |
4151 | False = SelectFalse; |
4152 | } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { |
4153 | True = SelectFalse; |
4154 | False = SelectTrue; |
4155 | } |
4156 | |
4157 | if (True.getNode() && False.getNode()) { |
4158 | EVT VT = Op.getValueType(); |
4159 | SDValue ARMcc = Cond.getOperand(2); |
4160 | SDValue CCR = Cond.getOperand(3); |
4161 | SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); |
4162 | assert(True.getValueType() == VT)(static_cast <bool> (True.getValueType() == VT) ? void ( 0) : __assert_fail ("True.getValueType() == VT", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 4162, __extension__ __PRETTY_FUNCTION__)); |
4163 | return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG); |
4164 | } |
4165 | } |
4166 | } |
4167 | |
4168 | // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the |
4169 | // undefined bits before doing a full-word comparison with zero. |
4170 | Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond, |
4171 | DAG.getConstant(1, dl, Cond.getValueType())); |
4172 | |
4173 | return DAG.getSelectCC(dl, Cond, |
4174 | DAG.getConstant(0, dl, Cond.getValueType()), |
4175 | SelectTrue, SelectFalse, ISD::SETNE); |
4176 | } |
4177 | |
4178 | static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, |
4179 | bool &swpCmpOps, bool &swpVselOps) { |
4180 | // Start by selecting the GE condition code for opcodes that return true for |
4181 | // 'equality' |
4182 | if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE || |
4183 | CC == ISD::SETULE) |
4184 | CondCode = ARMCC::GE; |
4185 | |
4186 | // and GT for opcodes that return false for 'equality'. |
4187 | else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT || |
4188 | CC == ISD::SETULT) |
4189 | CondCode = ARMCC::GT; |
4190 | |
4191 | // Since we are constrained to GE/GT, if the opcode contains 'less', we need |
4192 | // to swap the compare operands. |
4193 | if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT || |
4194 | CC == ISD::SETULT) |
4195 | swpCmpOps = true; |
4196 | |
4197 | // Both GT and GE are ordered comparisons, and return false for 'unordered'. |
4198 | // If we have an unordered opcode, we need to swap the operands to the VSEL |
4199 | // instruction (effectively negating the condition). |
4200 | // |
4201 | // This also has the effect of swapping which one of 'less' or 'greater' |
4202 | // returns true, so we also swap the compare operands. It also switches |
4203 | // whether we return true for 'equality', so we compensate by picking the |
4204 | // opposite condition code to our original choice. |
4205 | if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE || |
4206 | CC == ISD::SETUGT) { |
4207 | swpCmpOps = !swpCmpOps; |
4208 | swpVselOps = !swpVselOps; |
4209 | CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT; |
4210 | } |
4211 | |
4212 | // 'ordered' is 'anything but unordered', so use the VS condition code and |
4213 | // swap the VSEL operands. |
4214 | if (CC == ISD::SETO) { |
4215 | CondCode = ARMCC::VS; |
4216 | swpVselOps = true; |
4217 | } |
4218 | |
4219 | // 'unordered or not equal' is 'anything but equal', so use the EQ condition |
4220 | // code and swap the VSEL operands. |
4221 | if (CC == ISD::SETUNE) { |
4222 | CondCode = ARMCC::EQ; |
4223 | swpVselOps = true; |
4224 | } |
4225 | } |
4226 | |
4227 | SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, |
4228 | SDValue TrueVal, SDValue ARMcc, SDValue CCR, |
4229 | SDValue Cmp, SelectionDAG &DAG) const { |
4230 | if (Subtarget->isFPOnlySP() && VT == MVT::f64) { |
4231 | FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl, |
4232 | DAG.getVTList(MVT::i32, MVT::i32), FalseVal); |
4233 | TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl, |
4234 | DAG.getVTList(MVT::i32, MVT::i32), TrueVal); |
4235 | |
4236 | SDValue TrueLow = TrueVal.getValue(0); |
4237 | SDValue TrueHigh = TrueVal.getValue(1); |
4238 | SDValue FalseLow = FalseVal.getValue(0); |
4239 | SDValue FalseHigh = FalseVal.getValue(1); |
4240 | |
4241 | SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow, |
4242 | ARMcc, CCR, Cmp); |
4243 | SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh, |
4244 | ARMcc, CCR, duplicateCmp(Cmp, DAG)); |
4245 | |
4246 | return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High); |
4247 | } else { |
4248 | return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR, |
4249 | Cmp); |
4250 | } |
4251 | } |
4252 | |
4253 | static bool isGTorGE(ISD::CondCode CC) { |
4254 | return CC == ISD::SETGT || CC == ISD::SETGE; |
4255 | } |
4256 | |
4257 | static bool isLTorLE(ISD::CondCode CC) { |
4258 | return CC == ISD::SETLT || CC == ISD::SETLE; |
4259 | } |
4260 | |
4261 | // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating. |
4262 | // All of these conditions (and their <= and >= counterparts) will do: |
4263 | // x < k ? k : x |
4264 | // x > k ? x : k |
4265 | // k < x ? x : k |
4266 | // k > x ? k : x |
4267 | static bool isLowerSaturate(const SDValue LHS, const SDValue RHS, |
4268 | const SDValue TrueVal, const SDValue FalseVal, |
4269 | const ISD::CondCode CC, const SDValue K) { |
4270 | return (isGTorGE(CC) && |
4271 | ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) || |
4272 | (isLTorLE(CC) && |
4273 | ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))); |
4274 | } |
4275 | |
4276 | // Similar to isLowerSaturate(), but checks for upper-saturating conditions. |
4277 | static bool isUpperSaturate(const SDValue LHS, const SDValue RHS, |
4278 | const SDValue TrueVal, const SDValue FalseVal, |
4279 | const ISD::CondCode CC, const SDValue K) { |
4280 | return (isGTorGE(CC) && |
4281 | ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) || |
4282 | (isLTorLE(CC) && |
4283 | ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))); |
4284 | } |
4285 | |
4286 | // Check if two chained conditionals could be converted into SSAT or USAT. |
4287 | // |
4288 | // SSAT can replace a set of two conditional selectors that bound a number to an |
4289 | // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples: |
4290 | // |
4291 | // x < -k ? -k : (x > k ? k : x) |
4292 | // x < -k ? -k : (x < k ? x : k) |
4293 | // x > -k ? (x > k ? k : x) : -k |
4294 | // x < k ? (x < -k ? -k : x) : k |
4295 | // etc. |
4296 | // |
4297 | // USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1 is |
4298 | // a power of 2. |
4299 | // |
4300 | // It returns true if the conversion can be done, false otherwise. |
4301 | // Additionally, the variable is returned in parameter V, the constant in K and |
4302 | // usat is set to true if the conditional represents an unsigned saturation |
4303 | static bool isSaturatingConditional(const SDValue &Op, SDValue &V, |
4304 | uint64_t &K, bool &usat) { |
4305 | SDValue LHS1 = Op.getOperand(0); |
4306 | SDValue RHS1 = Op.getOperand(1); |
4307 | SDValue TrueVal1 = Op.getOperand(2); |
4308 | SDValue FalseVal1 = Op.getOperand(3); |
4309 | ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get(); |
4310 | |
4311 | const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1; |
4312 | if (Op2.getOpcode() != ISD::SELECT_CC) |
4313 | return false; |
4314 | |
4315 | SDValue LHS2 = Op2.getOperand(0); |
4316 | SDValue RHS2 = Op2.getOperand(1); |
4317 | SDValue TrueVal2 = Op2.getOperand(2); |
4318 | SDValue FalseVal2 = Op2.getOperand(3); |
4319 | ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get(); |
4320 | |
4321 | // Find out which are the constants and which are the variables |
4322 | // in each conditional |
4323 | SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1) |
4324 | ? &RHS1 |
4325 | : nullptr; |
4326 | SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2) |
4327 | ? &RHS2 |
4328 | : nullptr; |
4329 | SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2; |
4330 | SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1; |
4331 | SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2; |
4332 | SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2; |
4333 | |
4334 | // We must detect cases where the original operations worked with 16- or |
4335 | // 8-bit values. In such case, V2Tmp != V2 because the comparison operations |
4336 | // must work with sign-extended values but the select operations return |
4337 | // the original non-extended value. |
4338 | SDValue V2TmpReg = V2Tmp; |
4339 | if (V2Tmp->getOpcode() == ISD::SIGN_EXTEND_INREG) |
4340 | V2TmpReg = V2Tmp->getOperand(0); |
4341 | |
4342 | // Check that the registers and the constants have the correct values |
4343 | // in both conditionals |
4344 | if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp || |
4345 | V2TmpReg != V2) |
4346 | return false; |
4347 | |
4348 | // Figure out which conditional is saturating the lower/upper bound. |
4349 | const SDValue *LowerCheckOp = |
4350 | isLowerSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1) |
4351 | ? &Op |
4352 | : isLowerSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2) |
4353 | ? &Op2 |
4354 | : nullptr; |
4355 | const SDValue *UpperCheckOp = |
4356 | isUpperSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1) |
4357 | ? &Op |
4358 | : isUpperSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2) |
4359 | ? &Op2 |
4360 | : nullptr; |
4361 | |
4362 | if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp) |
4363 | return false; |
4364 | |
4365 | // Check that the constant in the lower-bound check is |
4366 | // the opposite of the constant in the upper-bound check |
4367 | // in 1's complement. |
4368 | int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue(); |
4369 | int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue(); |
4370 | int64_t PosVal = std::max(Val1, Val2); |
4371 | int64_t NegVal = std::min(Val1, Val2); |
4372 | |
4373 | if (((Val1 > Val2 && UpperCheckOp == &Op) || |
4374 | (Val1 < Val2 && UpperCheckOp == &Op2)) && |
4375 | isPowerOf2_64(PosVal + 1)) { |
4376 | |
4377 | // Handle the difference between USAT (unsigned) and SSAT (signed) saturation |
4378 | if (Val1 == ~Val2) |
4379 | usat = false; |
4380 | else if (NegVal == 0) |
4381 | usat = true; |
4382 | else |
4383 | return false; |
4384 | |
4385 | V = V2; |
4386 | K = (uint64_t)PosVal; // At this point, PosVal is guaranteed to be positive |
4387 | |
4388 | return true; |
4389 | } |
4390 | |
4391 | return false; |
4392 | } |
4393 | |
4394 | // Check if a condition of the type x < k ? k : x can be converted into a |
4395 | // bit operation instead of conditional moves. |
4396 | // Currently this is allowed given: |
4397 | // - The conditions and values match up |
4398 | // - k is 0 or -1 (all ones) |
4399 | // This function will not check the last condition, thats up to the caller |
4400 | // It returns true if the transformation can be made, and in such case |
4401 | // returns x in V, and k in SatK. |
4402 | static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V, |
4403 | SDValue &SatK) |
4404 | { |
4405 | SDValue LHS = Op.getOperand(0); |
4406 | SDValue RHS = Op.getOperand(1); |
4407 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); |
4408 | SDValue TrueVal = Op.getOperand(2); |
4409 | SDValue FalseVal = Op.getOperand(3); |
4410 | |
4411 | SDValue *K = isa<ConstantSDNode>(LHS) ? &LHS : isa<ConstantSDNode>(RHS) |
4412 | ? &RHS |
4413 | : nullptr; |
4414 | |
4415 | // No constant operation in comparison, early out |
4416 | if (!K) |
4417 | return false; |
4418 | |
4419 | SDValue KTmp = isa<ConstantSDNode>(TrueVal) ? TrueVal : FalseVal; |
4420 | V = (KTmp == TrueVal) ? FalseVal : TrueVal; |
4421 | SDValue VTmp = (K && *K == LHS) ? RHS : LHS; |
4422 | |
4423 | // If the constant on left and right side, or variable on left and right, |
4424 | // does not match, early out |
4425 | if (*K != KTmp || V != VTmp) |
4426 | return false; |
4427 | |
4428 | if (isLowerSaturate(LHS, RHS, TrueVal, FalseVal, CC, *K)) { |
4429 | SatK = *K; |
4430 | return true; |
4431 | } |
4432 | |
4433 | return false; |
4434 | } |
4435 | |
4436 | SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { |
4437 | EVT VT = Op.getValueType(); |
4438 | SDLoc dl(Op); |
4439 | |
4440 | // Try to convert two saturating conditional selects into a single SSAT |
4441 | SDValue SatValue; |
4442 | uint64_t SatConstant; |
4443 | bool SatUSat; |
4444 | if (((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) && |
4445 | isSaturatingConditional(Op, SatValue, SatConstant, SatUSat)) { |
4446 | if (SatUSat) |
4447 | return DAG.getNode(ARMISD::USAT, dl, VT, SatValue, |
4448 | DAG.getConstant(countTrailingOnes(SatConstant), dl, VT)); |
4449 | else |
4450 | return DAG.getNode(ARMISD::SSAT, dl, VT, SatValue, |
4451 | DAG.getConstant(countTrailingOnes(SatConstant), dl, VT)); |
4452 | } |
4453 | |
4454 | // Try to convert expressions of the form x < k ? k : x (and similar forms) |
4455 | // into more efficient bit operations, which is possible when k is 0 or -1 |
4456 | // On ARM and Thumb-2 which have flexible operand 2 this will result in |
4457 | // single instructions. On Thumb the shift and the bit operation will be two |
4458 | // instructions. |
4459 | // Only allow this transformation on full-width (32-bit) operations |
4460 | SDValue LowerSatConstant; |
4461 | if (VT == MVT::i32 && |
4462 | isLowerSaturatingConditional(Op, SatValue, LowerSatConstant)) { |
4463 | SDValue ShiftV = DAG.getNode(ISD::SRA, dl, VT, SatValue, |
4464 | DAG.getConstant(31, dl, VT)); |
4465 | if (isNullConstant(LowerSatConstant)) { |
4466 | SDValue NotShiftV = DAG.getNode(ISD::XOR, dl, VT, ShiftV, |
4467 | DAG.getAllOnesConstant(dl, VT)); |
4468 | return DAG.getNode(ISD::AND, dl, VT, SatValue, NotShiftV); |
4469 | } else if (isAllOnesConstant(LowerSatConstant)) |
4470 | return DAG.getNode(ISD::OR, dl, VT, SatValue, ShiftV); |
4471 | } |
4472 | |
4473 | SDValue LHS = Op.getOperand(0); |
4474 | SDValue RHS = Op.getOperand(1); |
4475 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); |
4476 | SDValue TrueVal = Op.getOperand(2); |
4477 | SDValue FalseVal = Op.getOperand(3); |
4478 | |
4479 | if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) { |
4480 | DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC, |
4481 | dl); |
4482 | |
4483 | // If softenSetCCOperands only returned one value, we should compare it to |
4484 | // zero. |
4485 | if (!RHS.getNode()) { |
4486 | RHS = DAG.getConstant(0, dl, LHS.getValueType()); |
4487 | CC = ISD::SETNE; |
4488 | } |
4489 | } |
4490 | |
4491 | if (LHS.getValueType() == MVT::i32) { |
4492 | // Try to generate VSEL on ARMv8. |
4493 | // The VSEL instruction can't use all the usual ARM condition |
4494 | // codes: it only has two bits to select the condition code, so it's |
4495 | // constrained to use only GE, GT, VS and EQ. |
4496 | // |
4497 | // To implement all the various ISD::SETXXX opcodes, we sometimes need to |
4498 | // swap the operands of the previous compare instruction (effectively |
4499 | // inverting the compare condition, swapping 'less' and 'greater') and |
4500 | // sometimes need to swap the operands to the VSEL (which inverts the |
4501 | // condition in the sense of firing whenever the previous condition didn't) |
4502 | if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 || |
4503 | TrueVal.getValueType() == MVT::f64)) { |
4504 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
4505 | if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || |
4506 | CondCode == ARMCC::VC || CondCode == ARMCC::NE) { |
4507 | CC = ISD::getSetCCInverse(CC, true); |
4508 | std::swap(TrueVal, FalseVal); |
4509 | } |
4510 | } |
4511 | |
4512 | SDValue ARMcc; |
4513 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
4514 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
4515 | return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); |
4516 | } |
4517 | |
4518 | ARMCC::CondCodes CondCode, CondCode2; |
4519 | bool InvalidOnQNaN; |
4520 | FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN); |
4521 | |
4522 | // Normalize the fp compare. If RHS is zero we keep it there so we match |
4523 | // CMPFPw0 instead of CMPFP. |
4524 | if (Subtarget->hasFPARMv8() && !isFloatingPointZero(RHS) && |
4525 | (TrueVal.getValueType() == MVT::f32 || TrueVal.getValueType() == MVT::f64)) { |
4526 | bool swpCmpOps = false; |
4527 | bool swpVselOps = false; |
4528 | checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps); |
4529 | |
4530 | if (CondCode == ARMCC::GT || CondCode == ARMCC::GE || |
4531 | CondCode == ARMCC::VS || CondCode == ARMCC::EQ) { |
4532 | if (swpCmpOps) |
4533 | std::swap(LHS, RHS); |
4534 | if (swpVselOps) |
4535 | std::swap(TrueVal, FalseVal); |
4536 | } |
4537 | } |
4538 | |
4539 | SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); |
4540 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN); |
4541 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
4542 | SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); |
4543 | if (CondCode2 != ARMCC::AL) { |
4544 | SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32); |
4545 | // FIXME: Needs another CMP because flag can have but one use. |
4546 | SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN); |
4547 | Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG); |
4548 | } |
4549 | return Result; |
4550 | } |
4551 | |
4552 | /// canChangeToInt - Given the fp compare operand, return true if it is suitable |
4553 | /// to morph to an integer compare sequence. |
4554 | static bool canChangeToInt(SDValue Op, bool &SeenZero, |
4555 | const ARMSubtarget *Subtarget) { |
4556 | SDNode *N = Op.getNode(); |
4557 | if (!N->hasOneUse()) |
4558 | // Otherwise it requires moving the value from fp to integer registers. |
4559 | return false; |
4560 | if (!N->getNumValues()) |
4561 | return false; |
4562 | EVT VT = Op.getValueType(); |
4563 | if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) |
4564 | // f32 case is generally profitable. f64 case only makes sense when vcmpe + |
4565 | // vmrs are very slow, e.g. cortex-a8. |
4566 | return false; |
4567 | |
4568 | if (isFloatingPointZero(Op)) { |
4569 | SeenZero = true; |
4570 | return true; |
4571 | } |
4572 | return ISD::isNormalLoad(N); |
4573 | } |
4574 | |
4575 | static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { |
4576 | if (isFloatingPointZero(Op)) |
4577 | return DAG.getConstant(0, SDLoc(Op), MVT::i32); |
4578 | |
4579 | if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) |
4580 | return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(), |
4581 | Ld->getPointerInfo(), Ld->getAlignment(), |
4582 | Ld->getMemOperand()->getFlags()); |
4583 | |
4584 | llvm_unreachable("Unknown VFP cmp argument!")::llvm::llvm_unreachable_internal("Unknown VFP cmp argument!" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 4584); |
4585 | } |
4586 | |
4587 | static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, |
4588 | SDValue &RetVal1, SDValue &RetVal2) { |
4589 | SDLoc dl(Op); |
4590 | |
4591 | if (isFloatingPointZero(Op)) { |
4592 | RetVal1 = DAG.getConstant(0, dl, MVT::i32); |
4593 | RetVal2 = DAG.getConstant(0, dl, MVT::i32); |
4594 | return; |
4595 | } |
4596 | |
4597 | if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { |
4598 | SDValue Ptr = Ld->getBasePtr(); |
4599 | RetVal1 = |
4600 | DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(), |
4601 | Ld->getAlignment(), Ld->getMemOperand()->getFlags()); |
4602 | |
4603 | EVT PtrType = Ptr.getValueType(); |
4604 | unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); |
4605 | SDValue NewPtr = DAG.getNode(ISD::ADD, dl, |
4606 | PtrType, Ptr, DAG.getConstant(4, dl, PtrType)); |
4607 | RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr, |
4608 | Ld->getPointerInfo().getWithOffset(4), NewAlign, |
4609 | Ld->getMemOperand()->getFlags()); |
4610 | return; |
4611 | } |
4612 | |
4613 | llvm_unreachable("Unknown VFP cmp argument!")::llvm::llvm_unreachable_internal("Unknown VFP cmp argument!" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 4613); |
4614 | } |
4615 | |
4616 | /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some |
4617 | /// f32 and even f64 comparisons to integer ones. |
4618 | SDValue |
4619 | ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { |
4620 | SDValue Chain = Op.getOperand(0); |
4621 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); |
4622 | SDValue LHS = Op.getOperand(2); |
4623 | SDValue RHS = Op.getOperand(3); |
4624 | SDValue Dest = Op.getOperand(4); |
4625 | SDLoc dl(Op); |
4626 | |
4627 | bool LHSSeenZero = false; |
4628 | bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget); |
4629 | bool RHSSeenZero = false; |
4630 | bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget); |
4631 | if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { |
4632 | // If unsafe fp math optimization is enabled and there are no other uses of |
4633 | // the CMP operands, and the condition code is EQ or NE, we can optimize it |
4634 | // to an integer comparison. |
4635 | if (CC == ISD::SETOEQ) |
4636 | CC = ISD::SETEQ; |
4637 | else if (CC == ISD::SETUNE) |
4638 | CC = ISD::SETNE; |
4639 | |
4640 | SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32); |
4641 | SDValue ARMcc; |
4642 | if (LHS.getValueType() == MVT::f32) { |
4643 | LHS = DAG.getNode(ISD::AND, dl, MVT::i32, |
4644 | bitcastf32Toi32(LHS, DAG), Mask); |
4645 | RHS = DAG.getNode(ISD::AND, dl, MVT::i32, |
4646 | bitcastf32Toi32(RHS, DAG), Mask); |
4647 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
4648 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
4649 | return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, |
4650 | Chain, Dest, ARMcc, CCR, Cmp); |
4651 | } |
4652 | |
4653 | SDValue LHS1, LHS2; |
4654 | SDValue RHS1, RHS2; |
4655 | expandf64Toi32(LHS, DAG, LHS1, LHS2); |
4656 | expandf64Toi32(RHS, DAG, RHS1, RHS2); |
4657 | LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask); |
4658 | RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask); |
4659 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
4660 | ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); |
4661 | SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); |
4662 | SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; |
4663 | return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops); |
4664 | } |
4665 | |
4666 | return SDValue(); |
4667 | } |
4668 | |
4669 | SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { |
4670 | SDValue Chain = Op.getOperand(0); |
4671 | SDValue Cond = Op.getOperand(1); |
4672 | SDValue Dest = Op.getOperand(2); |
4673 | SDLoc dl(Op); |
4674 | |
4675 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch |
4676 | // instruction. |
4677 | unsigned Opc = Cond.getOpcode(); |
4678 | if (Cond.getResNo() == 1 && |
4679 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
4680 | Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO)) { |
4681 | // Only lower legal XALUO ops. |
4682 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) |
4683 | return SDValue(); |
4684 | |
4685 | // The actual operation with overflow check. |
4686 | SDValue Value, OverflowCmp; |
4687 | SDValue ARMcc; |
4688 | std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); |
4689 | |
4690 | // Reverse the condition code. |
4691 | ARMCC::CondCodes CondCode = |
4692 | (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue(); |
4693 | CondCode = ARMCC::getOppositeCondition(CondCode); |
4694 | ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32); |
4695 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
4696 | |
4697 | return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR, |
4698 | OverflowCmp); |
4699 | } |
4700 | |
4701 | return SDValue(); |
4702 | } |
4703 | |
4704 | SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { |
4705 | SDValue Chain = Op.getOperand(0); |
4706 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); |
4707 | SDValue LHS = Op.getOperand(2); |
4708 | SDValue RHS = Op.getOperand(3); |
4709 | SDValue Dest = Op.getOperand(4); |
4710 | SDLoc dl(Op); |
4711 | |
4712 | if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) { |
4713 | DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC, |
4714 | dl); |
4715 | |
4716 | // If softenSetCCOperands only returned one value, we should compare it to |
4717 | // zero. |
4718 | if (!RHS.getNode()) { |
4719 | RHS = DAG.getConstant(0, dl, LHS.getValueType()); |
4720 | CC = ISD::SETNE; |
4721 | } |
4722 | } |
4723 | |
4724 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch |
4725 | // instruction. |
4726 | unsigned Opc = LHS.getOpcode(); |
4727 | if (LHS.getResNo() == 1 && (isOneConstant(RHS) || isNullConstant(RHS)) && |
4728 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
4729 | Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO) && |
4730 | (CC == ISD::SETEQ || CC == ISD::SETNE)) { |
4731 | // Only lower legal XALUO ops. |
4732 | if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0))) |
4733 | return SDValue(); |
4734 | |
4735 | // The actual operation with overflow check. |
4736 | SDValue Value, OverflowCmp; |
4737 | SDValue ARMcc; |
4738 | std::tie(Value, OverflowCmp) = getARMXALUOOp(LHS.getValue(0), DAG, ARMcc); |
4739 | |
4740 | if ((CC == ISD::SETNE) != isOneConstant(RHS)) { |
4741 | // Reverse the condition code. |
4742 | ARMCC::CondCodes CondCode = |
4743 | (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue(); |
4744 | CondCode = ARMCC::getOppositeCondition(CondCode); |
4745 | ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32); |
4746 | } |
4747 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
4748 | |
4749 | return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR, |
4750 | OverflowCmp); |
4751 | } |
4752 | |
4753 | if (LHS.getValueType() == MVT::i32) { |
4754 | SDValue ARMcc; |
4755 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
4756 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
4757 | return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, |
4758 | Chain, Dest, ARMcc, CCR, Cmp); |
4759 | } |
4760 | |
4761 | if (getTargetMachine().Options.UnsafeFPMath && |
4762 | (CC == ISD::SETEQ || CC == ISD::SETOEQ || |
4763 | CC == ISD::SETNE || CC == ISD::SETUNE)) { |
4764 | if (SDValue Result = OptimizeVFPBrcond(Op, DAG)) |
4765 | return Result; |
4766 | } |
4767 | |
4768 | ARMCC::CondCodes CondCode, CondCode2; |
4769 | bool InvalidOnQNaN; |
4770 | FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN); |
4771 | |
4772 | SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); |
4773 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN); |
4774 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
4775 | SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); |
4776 | SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; |
4777 | SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); |
4778 | if (CondCode2 != ARMCC::AL) { |
4779 | ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32); |
4780 | SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; |
4781 | Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); |
4782 | } |
4783 | return Res; |
4784 | } |
4785 | |
4786 | SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { |
4787 | SDValue Chain = Op.getOperand(0); |
4788 | SDValue Table = Op.getOperand(1); |
4789 | SDValue Index = Op.getOperand(2); |
4790 | SDLoc dl(Op); |
4791 | |
4792 | EVT PTy = getPointerTy(DAG.getDataLayout()); |
4793 | JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); |
4794 | SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); |
4795 | Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI); |
4796 | Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy)); |
4797 | SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Index); |
4798 | if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) { |
4799 | // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table |
4800 | // which does another jump to the destination. This also makes it easier |
4801 | // to translate it to TBB / TBH later (Thumb2 only). |
4802 | // FIXME: This might not work if the function is extremely large. |
4803 | return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, |
4804 | Addr, Op.getOperand(2), JTI); |
4805 | } |
4806 | if (isPositionIndependent() || Subtarget->isROPI()) { |
4807 | Addr = |
4808 | DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, |
4809 | MachinePointerInfo::getJumpTable(DAG.getMachineFunction())); |
4810 | Chain = Addr.getValue(1); |
4811 | Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Addr); |
4812 | return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); |
4813 | } else { |
4814 | Addr = |
4815 | DAG.getLoad(PTy, dl, Chain, Addr, |
4816 | MachinePointerInfo::getJumpTable(DAG.getMachineFunction())); |
4817 | Chain = Addr.getValue(1); |
4818 | return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); |
4819 | } |
4820 | } |
4821 | |
4822 | static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { |
4823 | EVT VT = Op.getValueType(); |
4824 | SDLoc dl(Op); |
4825 | |
4826 | if (Op.getValueType().getVectorElementType() == MVT::i32) { |
4827 | if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32) |
4828 | return Op; |
4829 | return DAG.UnrollVectorOp(Op.getNode()); |
4830 | } |
4831 | |
4832 | assert(Op.getOperand(0).getValueType() == MVT::v4f32 &&(static_cast <bool> (Op.getOperand(0).getValueType() == MVT::v4f32 && "Invalid type for custom lowering!") ? void (0) : __assert_fail ("Op.getOperand(0).getValueType() == MVT::v4f32 && \"Invalid type for custom lowering!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 4833, __extension__ __PRETTY_FUNCTION__)) |
4833 | "Invalid type for custom lowering!")(static_cast <bool> (Op.getOperand(0).getValueType() == MVT::v4f32 && "Invalid type for custom lowering!") ? void (0) : __assert_fail ("Op.getOperand(0).getValueType() == MVT::v4f32 && \"Invalid type for custom lowering!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 4833, __extension__ __PRETTY_FUNCTION__)); |
4834 | if (VT != MVT::v4i16) |
4835 | return DAG.UnrollVectorOp(Op.getNode()); |
4836 | |
4837 | Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0)); |
4838 | return DAG.getNode(ISD::TRUNCATE, dl, VT, Op); |
4839 | } |
4840 | |
4841 | SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { |
4842 | EVT VT = Op.getValueType(); |
4843 | if (VT.isVector()) |
4844 | return LowerVectorFP_TO_INT(Op, DAG); |
4845 | if (Subtarget->isFPOnlySP() && Op.getOperand(0).getValueType() == MVT::f64) { |
4846 | RTLIB::Libcall LC; |
4847 | if (Op.getOpcode() == ISD::FP_TO_SINT) |
4848 | LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), |
4849 | Op.getValueType()); |
4850 | else |
4851 | LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), |
4852 | Op.getValueType()); |
4853 | return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0), |
4854 | /*isSigned*/ false, SDLoc(Op)).first; |
4855 | } |
4856 | |
4857 | return Op; |
4858 | } |
4859 | |
4860 | static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { |
4861 | EVT VT = Op.getValueType(); |
4862 | SDLoc dl(Op); |
4863 | |
4864 | if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) { |
4865 | if (VT.getVectorElementType() == MVT::f32) |
4866 | return Op; |
4867 | return DAG.UnrollVectorOp(Op.getNode()); |
4868 | } |
4869 | |
4870 | assert(Op.getOperand(0).getValueType() == MVT::v4i16 &&(static_cast <bool> (Op.getOperand(0).getValueType() == MVT::v4i16 && "Invalid type for custom lowering!") ? void (0) : __assert_fail ("Op.getOperand(0).getValueType() == MVT::v4i16 && \"Invalid type for custom lowering!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 4871, __extension__ __PRETTY_FUNCTION__)) |
4871 | "Invalid type for custom lowering!")(static_cast <bool> (Op.getOperand(0).getValueType() == MVT::v4i16 && "Invalid type for custom lowering!") ? void (0) : __assert_fail ("Op.getOperand(0).getValueType() == MVT::v4i16 && \"Invalid type for custom lowering!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 4871, __extension__ __PRETTY_FUNCTION__)); |
4872 | if (VT != MVT::v4f32) |
4873 | return DAG.UnrollVectorOp(Op.getNode()); |
4874 | |
4875 | unsigned CastOpc; |
4876 | unsigned Opc; |
4877 | switch (Op.getOpcode()) { |
4878 | default: llvm_unreachable("Invalid opcode!")::llvm::llvm_unreachable_internal("Invalid opcode!", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 4878); |
4879 | case ISD::SINT_TO_FP: |
4880 | CastOpc = ISD::SIGN_EXTEND; |
4881 | Opc = ISD::SINT_TO_FP; |
4882 | break; |
4883 | case ISD::UINT_TO_FP: |
4884 | CastOpc = ISD::ZERO_EXTEND; |
4885 | Opc = ISD::UINT_TO_FP; |
4886 | break; |
4887 | } |
4888 | |
4889 | Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0)); |
4890 | return DAG.getNode(Opc, dl, VT, Op); |
4891 | } |
4892 | |
4893 | SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { |
4894 | EVT VT = Op.getValueType(); |
4895 | if (VT.isVector()) |
4896 | return LowerVectorINT_TO_FP(Op, DAG); |
4897 | if (Subtarget->isFPOnlySP() && Op.getValueType() == MVT::f64) { |
4898 | RTLIB::Libcall LC; |
4899 | if (Op.getOpcode() == ISD::SINT_TO_FP) |
4900 | LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), |
4901 | Op.getValueType()); |
4902 | else |
4903 | LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), |
4904 | Op.getValueType()); |
4905 | return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0), |
4906 | /*isSigned*/ false, SDLoc(Op)).first; |
4907 | } |
4908 | |
4909 | return Op; |
4910 | } |
4911 | |
4912 | SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { |
4913 | // Implement fcopysign with a fabs and a conditional fneg. |
4914 | SDValue Tmp0 = Op.getOperand(0); |
4915 | SDValue Tmp1 = Op.getOperand(1); |
4916 | SDLoc dl(Op); |
4917 | EVT VT = Op.getValueType(); |
4918 | EVT SrcVT = Tmp1.getValueType(); |
4919 | bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || |
4920 | Tmp0.getOpcode() == ARMISD::VMOVDRR; |
4921 | bool UseNEON = !InGPR && Subtarget->hasNEON(); |
4922 | |
4923 | if (UseNEON) { |
4924 | // Use VBSL to copy the sign bit. |
4925 | unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); |
4926 | SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, |
4927 | DAG.getTargetConstant(EncodedVal, dl, MVT::i32)); |
4928 | EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; |
4929 | if (VT == MVT::f64) |
4930 | Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, |
4931 | DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), |
4932 | DAG.getConstant(32, dl, MVT::i32)); |
4933 | else /*if (VT == MVT::f32)*/ |
4934 | Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); |
4935 | if (SrcVT == MVT::f32) { |
4936 | Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); |
4937 | if (VT == MVT::f64) |
4938 | Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, |
4939 | DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), |
4940 | DAG.getConstant(32, dl, MVT::i32)); |
4941 | } else if (VT == MVT::f32) |
4942 | Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, |
4943 | DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), |
4944 | DAG.getConstant(32, dl, MVT::i32)); |
4945 | Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); |
4946 | Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); |
4947 | |
4948 | SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), |
4949 | dl, MVT::i32); |
4950 | AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); |
4951 | SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, |
4952 | DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); |
4953 | |
4954 | SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, |
4955 | DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), |
4956 | DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); |
4957 | if (VT == MVT::f32) { |
4958 | Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); |
4959 | Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, |
4960 | DAG.getConstant(0, dl, MVT::i32)); |
4961 | } else { |
4962 | Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); |
4963 | } |
4964 | |
4965 | return Res; |
4966 | } |
4967 | |
4968 | // Bitcast operand 1 to i32. |
4969 | if (SrcVT == MVT::f64) |
4970 | Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), |
4971 | Tmp1).getValue(1); |
4972 | Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); |
4973 | |
4974 | // Or in the signbit with integer operations. |
4975 | SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32); |
4976 | SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32); |
4977 | Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); |
4978 | if (VT == MVT::f32) { |
4979 | Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, |
4980 | DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); |
4981 | return DAG.getNode(ISD::BITCAST, dl, MVT::f32, |
4982 | DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); |
4983 | } |
4984 | |
4985 | // f64: Or the high part with signbit and then combine two parts. |
4986 | Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), |
4987 | Tmp0); |
4988 | SDValue Lo = Tmp0.getValue(0); |
4989 | SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); |
4990 | Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); |
4991 | return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); |
4992 | } |
4993 | |
4994 | SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ |
4995 | MachineFunction &MF = DAG.getMachineFunction(); |
4996 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
4997 | MFI.setReturnAddressIsTaken(true); |
4998 | |
4999 | if (verifyReturnAddressArgumentIsConstant(Op, DAG)) |
5000 | return SDValue(); |
5001 | |
5002 | EVT VT = Op.getValueType(); |
5003 | SDLoc dl(Op); |
5004 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
5005 | if (Depth) { |
5006 | SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); |
5007 | SDValue Offset = DAG.getConstant(4, dl, MVT::i32); |
5008 | return DAG.getLoad(VT, dl, DAG.getEntryNode(), |
5009 | DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), |
5010 | MachinePointerInfo()); |
5011 | } |
5012 | |
5013 | // Return LR, which contains the return address. Mark it an implicit live-in. |
5014 | unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); |
5015 | return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); |
5016 | } |
5017 | |
5018 | SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { |
5019 | const ARMBaseRegisterInfo &ARI = |
5020 | *static_cast<const ARMBaseRegisterInfo*>(RegInfo); |
5021 | MachineFunction &MF = DAG.getMachineFunction(); |
5022 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
5023 | MFI.setFrameAddressIsTaken(true); |
5024 | |
5025 | EVT VT = Op.getValueType(); |
5026 | SDLoc dl(Op); // FIXME probably not meaningful |
5027 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
5028 | unsigned FrameReg = ARI.getFrameRegister(MF); |
5029 | SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); |
5030 | while (Depth--) |
5031 | FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, |
5032 | MachinePointerInfo()); |
5033 | return FrameAddr; |
5034 | } |
5035 | |
5036 | // FIXME? Maybe this could be a TableGen attribute on some registers and |
5037 | // this table could be generated automatically from RegInfo. |
5038 | unsigned ARMTargetLowering::getRegisterByName(const char* RegName, EVT VT, |
5039 | SelectionDAG &DAG) const { |
5040 | unsigned Reg = StringSwitch<unsigned>(RegName) |
5041 | .Case("sp", ARM::SP) |
5042 | .Default(0); |
5043 | if (Reg) |
5044 | return Reg; |
5045 | report_fatal_error(Twine("Invalid register name \"" |
5046 | + StringRef(RegName) + "\".")); |
5047 | } |
5048 | |
5049 | // Result is 64 bit value so split into two 32 bit values and return as a |
5050 | // pair of values. |
5051 | static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results, |
5052 | SelectionDAG &DAG) { |
5053 | SDLoc DL(N); |
5054 | |
5055 | // This function is only supposed to be called for i64 type destination. |
5056 | assert(N->getValueType(0) == MVT::i64(static_cast <bool> (N->getValueType(0) == MVT::i64 && "ExpandREAD_REGISTER called for non-i64 type result.") ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i64 && \"ExpandREAD_REGISTER called for non-i64 type result.\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5057, __extension__ __PRETTY_FUNCTION__)) |
5057 | && "ExpandREAD_REGISTER called for non-i64 type result.")(static_cast <bool> (N->getValueType(0) == MVT::i64 && "ExpandREAD_REGISTER called for non-i64 type result.") ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i64 && \"ExpandREAD_REGISTER called for non-i64 type result.\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5057, __extension__ __PRETTY_FUNCTION__)); |
5058 | |
5059 | SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL, |
5060 | DAG.getVTList(MVT::i32, MVT::i32, MVT::Other), |
5061 | N->getOperand(0), |
5062 | N->getOperand(1)); |
5063 | |
5064 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0), |
5065 | Read.getValue(1))); |
5066 | Results.push_back(Read.getOperand(0)); |
5067 | } |
5068 | |
5069 | /// \p BC is a bitcast that is about to be turned into a VMOVDRR. |
5070 | /// When \p DstVT, the destination type of \p BC, is on the vector |
5071 | /// register bank and the source of bitcast, \p Op, operates on the same bank, |
5072 | /// it might be possible to combine them, such that everything stays on the |
5073 | /// vector register bank. |
5074 | /// \p return The node that would replace \p BT, if the combine |
5075 | /// is possible. |
5076 | static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, |
5077 | SelectionDAG &DAG) { |
5078 | SDValue Op = BC->getOperand(0); |
5079 | EVT DstVT = BC->getValueType(0); |
5080 | |
5081 | // The only vector instruction that can produce a scalar (remember, |
5082 | // since the bitcast was about to be turned into VMOVDRR, the source |
5083 | // type is i64) from a vector is EXTRACT_VECTOR_ELT. |
5084 | // Moreover, we can do this combine only if there is one use. |
5085 | // Finally, if the destination type is not a vector, there is not |
5086 | // much point on forcing everything on the vector bank. |
5087 | if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
5088 | !Op.hasOneUse()) |
5089 | return SDValue(); |
5090 | |
5091 | // If the index is not constant, we will introduce an additional |
5092 | // multiply that will stick. |
5093 | // Give up in that case. |
5094 | ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1)); |
5095 | if (!Index) |
5096 | return SDValue(); |
5097 | unsigned DstNumElt = DstVT.getVectorNumElements(); |
5098 | |
5099 | // Compute the new index. |
5100 | const APInt &APIntIndex = Index->getAPIntValue(); |
5101 | APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt); |
5102 | NewIndex *= APIntIndex; |
5103 | // Check if the new constant index fits into i32. |
5104 | if (NewIndex.getBitWidth() > 32) |
5105 | return SDValue(); |
5106 | |
5107 | // vMTy bitcast(i64 extractelt vNi64 src, i32 index) -> |
5108 | // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M) |
5109 | SDLoc dl(Op); |
5110 | SDValue ExtractSrc = Op.getOperand(0); |
5111 | EVT VecVT = EVT::getVectorVT( |
5112 | *DAG.getContext(), DstVT.getScalarType(), |
5113 | ExtractSrc.getValueType().getVectorNumElements() * DstNumElt); |
5114 | SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc); |
5115 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast, |
5116 | DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32)); |
5117 | } |
5118 | |
5119 | /// ExpandBITCAST - If the target supports VFP, this function is called to |
5120 | /// expand a bit convert where either the source or destination type is i64 to |
5121 | /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 |
5122 | /// operand type is illegal (e.g., v2f32 for a target that doesn't support |
5123 | /// vectors), since the legalizer won't know what to do with that. |
5124 | static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG, |
5125 | const ARMSubtarget *Subtarget) { |
5126 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
5127 | SDLoc dl(N); |
5128 | SDValue Op = N->getOperand(0); |
5129 | |
5130 | // This function is only supposed to be called for i64 types, either as the |
5131 | // source or destination of the bit convert. |
5132 | EVT SrcVT = Op.getValueType(); |
5133 | EVT DstVT = N->getValueType(0); |
5134 | const bool HasFullFP16 = Subtarget->hasFullFP16(); |
5135 | |
5136 | if (SrcVT == MVT::f32 && DstVT == MVT::i32) { |
5137 | // FullFP16: half values are passed in S-registers, and we don't |
5138 | // need any of the bitcast and moves: |
5139 | // |
5140 | // t2: f32,ch = CopyFromReg t0, Register:f32 %0 |
5141 | // t5: i32 = bitcast t2 |
5142 | // t18: f16 = ARMISD::VMOVhr t5 |
5143 | if (Op.getOpcode() != ISD::CopyFromReg || |
5144 | Op.getValueType() != MVT::f32) |
5145 | return SDValue(); |
5146 | |
5147 | auto Move = N->use_begin(); |
5148 | if (Move->getOpcode() != ARMISD::VMOVhr) |
5149 | return SDValue(); |
5150 | |
5151 | SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; |
5152 | SDValue Copy = DAG.getNode(ISD::CopyFromReg, SDLoc(Op), MVT::f16, Ops); |
5153 | DAG.ReplaceAllUsesWith(*Move, &Copy); |
5154 | return Copy; |
5155 | } |
5156 | |
5157 | if (SrcVT == MVT::i16 && DstVT == MVT::f16) { |
5158 | if (!HasFullFP16) |
5159 | return SDValue(); |
5160 | // SoftFP: read half-precision arguments: |
5161 | // |
5162 | // t2: i32,ch = ... |
5163 | // t7: i16 = truncate t2 <~~~~ Op |
5164 | // t8: f16 = bitcast t7 <~~~~ N |
5165 | // |
5166 | if (Op.getOperand(0).getValueType() == MVT::i32) |
5167 | return DAG.getNode(ARMISD::VMOVhr, SDLoc(Op), |
5168 | MVT::f16, Op.getOperand(0)); |
5169 | |
5170 | return SDValue(); |
5171 | } |
5172 | |
5173 | // Half-precision return values |
5174 | if (SrcVT == MVT::f16 && DstVT == MVT::i16) { |
5175 | if (!HasFullFP16) |
5176 | return SDValue(); |
5177 | // |
5178 | // t11: f16 = fadd t8, t10 |
5179 | // t12: i16 = bitcast t11 <~~~ SDNode N |
5180 | // t13: i32 = zero_extend t12 |
5181 | // t16: ch,glue = CopyToReg t0, Register:i32 %r0, t13 |
5182 | // t17: ch = ARMISD::RET_FLAG t16, Register:i32 %r0, t16:1 |
5183 | // |
5184 | // transform this into: |
5185 | // |
5186 | // t20: i32 = ARMISD::VMOVrh t11 |
5187 | // t16: ch,glue = CopyToReg t0, Register:i32 %r0, t20 |
5188 | // |
5189 | auto ZeroExtend = N->use_begin(); |
5190 | if (N->use_size() != 1 || ZeroExtend->getOpcode() != ISD::ZERO_EXTEND || |
5191 | ZeroExtend->getValueType(0) != MVT::i32) |
5192 | return SDValue(); |
5193 | |
5194 | auto Copy = ZeroExtend->use_begin(); |
5195 | if (Copy->getOpcode() == ISD::CopyToReg && |
5196 | Copy->use_begin()->getOpcode() == ARMISD::RET_FLAG) { |
5197 | SDValue Cvt = DAG.getNode(ARMISD::VMOVrh, SDLoc(Op), MVT::i32, Op); |
5198 | DAG.ReplaceAllUsesWith(*ZeroExtend, &Cvt); |
5199 | return Cvt; |
5200 | } |
5201 | return SDValue(); |
5202 | } |
5203 | |
5204 | if (!(SrcVT == MVT::i64 || DstVT == MVT::i64)) |
5205 | return SDValue(); |
5206 | |
5207 | // Turn i64->f64 into VMOVDRR. |
5208 | if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { |
5209 | // Do not force values to GPRs (this is what VMOVDRR does for the inputs) |
5210 | // if we can combine the bitcast with its source. |
5211 | if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG)) |
5212 | return Val; |
5213 | |
5214 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, |
5215 | DAG.getConstant(0, dl, MVT::i32)); |
5216 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, |
5217 | DAG.getConstant(1, dl, MVT::i32)); |
5218 | return DAG.getNode(ISD::BITCAST, dl, DstVT, |
5219 | DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); |
5220 | } |
5221 | |
5222 | // Turn f64->i64 into VMOVRRD. |
5223 | if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { |
5224 | SDValue Cvt; |
5225 | if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() && |
5226 | SrcVT.getVectorNumElements() > 1) |
5227 | Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, |
5228 | DAG.getVTList(MVT::i32, MVT::i32), |
5229 | DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op)); |
5230 | else |
5231 | Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, |
5232 | DAG.getVTList(MVT::i32, MVT::i32), Op); |
5233 | // Merge the pieces into a single i64 value. |
5234 | return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); |
5235 | } |
5236 | |
5237 | return SDValue(); |
5238 | } |
5239 | |
5240 | /// getZeroVector - Returns a vector of specified type with all zero elements. |
5241 | /// Zero vectors are used to represent vector negation and in those cases |
5242 | /// will be implemented with the NEON VNEG instruction. However, VNEG does |
5243 | /// not support i64 elements, so sometimes the zero vectors will need to be |
5244 | /// explicitly constructed. Regardless, use a canonical VMOV to create the |
5245 | /// zero vector. |
5246 | static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) { |
5247 | assert(VT.isVector() && "Expected a vector type")(static_cast <bool> (VT.isVector() && "Expected a vector type" ) ? void (0) : __assert_fail ("VT.isVector() && \"Expected a vector type\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5247, __extension__ __PRETTY_FUNCTION__)); |
5248 | // The canonical modified immediate encoding of a zero vector is....0! |
5249 | SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32); |
5250 | EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; |
5251 | SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); |
5252 | return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); |
5253 | } |
5254 | |
5255 | /// LowerShiftRightParts - Lower SRA_PARTS, which returns two |
5256 | /// i32 values and take a 2 x i32 value to shift plus a shift amount. |
5257 | SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, |
5258 | SelectionDAG &DAG) const { |
5259 | assert(Op.getNumOperands() == 3 && "Not a double-shift!")(static_cast <bool> (Op.getNumOperands() == 3 && "Not a double-shift!") ? void (0) : __assert_fail ("Op.getNumOperands() == 3 && \"Not a double-shift!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5259, __extension__ __PRETTY_FUNCTION__)); |
5260 | EVT VT = Op.getValueType(); |
5261 | unsigned VTBits = VT.getSizeInBits(); |
5262 | SDLoc dl(Op); |
5263 | SDValue ShOpLo = Op.getOperand(0); |
5264 | SDValue ShOpHi = Op.getOperand(1); |
5265 | SDValue ShAmt = Op.getOperand(2); |
5266 | SDValue ARMcc; |
5267 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
5268 | unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; |
5269 | |
5270 | assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS)(static_cast <bool> (Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS) ? void (0) : __assert_fail ("Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5270, __extension__ __PRETTY_FUNCTION__)); |
5271 | |
5272 | SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, |
5273 | DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); |
5274 | SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); |
5275 | SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, |
5276 | DAG.getConstant(VTBits, dl, MVT::i32)); |
5277 | SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); |
5278 | SDValue LoSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); |
5279 | SDValue LoBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); |
5280 | SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), |
5281 | ISD::SETGE, ARMcc, DAG, dl); |
5282 | SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift, |
5283 | ARMcc, CCR, CmpLo); |
5284 | |
5285 | SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); |
5286 | SDValue HiBigShift = Opc == ISD::SRA |
5287 | ? DAG.getNode(Opc, dl, VT, ShOpHi, |
5288 | DAG.getConstant(VTBits - 1, dl, VT)) |
5289 | : DAG.getConstant(0, dl, VT); |
5290 | SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), |
5291 | ISD::SETGE, ARMcc, DAG, dl); |
5292 | SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift, |
5293 | ARMcc, CCR, CmpHi); |
5294 | |
5295 | SDValue Ops[2] = { Lo, Hi }; |
5296 | return DAG.getMergeValues(Ops, dl); |
5297 | } |
5298 | |
5299 | /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two |
5300 | /// i32 values and take a 2 x i32 value to shift plus a shift amount. |
5301 | SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, |
5302 | SelectionDAG &DAG) const { |
5303 | assert(Op.getNumOperands() == 3 && "Not a double-shift!")(static_cast <bool> (Op.getNumOperands() == 3 && "Not a double-shift!") ? void (0) : __assert_fail ("Op.getNumOperands() == 3 && \"Not a double-shift!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5303, __extension__ __PRETTY_FUNCTION__)); |
5304 | EVT VT = Op.getValueType(); |
5305 | unsigned VTBits = VT.getSizeInBits(); |
5306 | SDLoc dl(Op); |
5307 | SDValue ShOpLo = Op.getOperand(0); |
5308 | SDValue ShOpHi = Op.getOperand(1); |
5309 | SDValue ShAmt = Op.getOperand(2); |
5310 | SDValue ARMcc; |
5311 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
5312 | |
5313 | assert(Op.getOpcode() == ISD::SHL_PARTS)(static_cast <bool> (Op.getOpcode() == ISD::SHL_PARTS) ? void (0) : __assert_fail ("Op.getOpcode() == ISD::SHL_PARTS" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5313, __extension__ __PRETTY_FUNCTION__)); |
5314 | SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, |
5315 | DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); |
5316 | SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); |
5317 | SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); |
5318 | SDValue HiSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); |
5319 | |
5320 | SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, |
5321 | DAG.getConstant(VTBits, dl, MVT::i32)); |
5322 | SDValue HiBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); |
5323 | SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), |
5324 | ISD::SETGE, ARMcc, DAG, dl); |
5325 | SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift, |
5326 | ARMcc, CCR, CmpHi); |
5327 | |
5328 | SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), |
5329 | ISD::SETGE, ARMcc, DAG, dl); |
5330 | SDValue LoSmallShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); |
5331 | SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, |
5332 | DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo); |
5333 | |
5334 | SDValue Ops[2] = { Lo, Hi }; |
5335 | return DAG.getMergeValues(Ops, dl); |
5336 | } |
5337 | |
5338 | SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, |
5339 | SelectionDAG &DAG) const { |
5340 | // The rounding mode is in bits 23:22 of the FPSCR. |
5341 | // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 |
5342 | // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) |
5343 | // so that the shift + and get folded into a bitfield extract. |
5344 | SDLoc dl(Op); |
5345 | SDValue Ops[] = { DAG.getEntryNode(), |
5346 | DAG.getConstant(Intrinsic::arm_get_fpscr, dl, MVT::i32) }; |
5347 | |
5348 | SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_W_CHAIN, dl, MVT::i32, Ops); |
5349 | SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, |
5350 | DAG.getConstant(1U << 22, dl, MVT::i32)); |
5351 | SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, |
5352 | DAG.getConstant(22, dl, MVT::i32)); |
5353 | return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, |
5354 | DAG.getConstant(3, dl, MVT::i32)); |
5355 | } |
5356 | |
5357 | static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, |
5358 | const ARMSubtarget *ST) { |
5359 | SDLoc dl(N); |
5360 | EVT VT = N->getValueType(0); |
5361 | if (VT.isVector()) { |
5362 | assert(ST->hasNEON())(static_cast <bool> (ST->hasNEON()) ? void (0) : __assert_fail ("ST->hasNEON()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5362, __extension__ __PRETTY_FUNCTION__)); |
5363 | |
5364 | // Compute the least significant set bit: LSB = X & -X |
5365 | SDValue X = N->getOperand(0); |
5366 | SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X); |
5367 | SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX); |
5368 | |
5369 | EVT ElemTy = VT.getVectorElementType(); |
5370 | |
5371 | if (ElemTy == MVT::i8) { |
5372 | // Compute with: cttz(x) = ctpop(lsb - 1) |
5373 | SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, |
5374 | DAG.getTargetConstant(1, dl, ElemTy)); |
5375 | SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); |
5376 | return DAG.getNode(ISD::CTPOP, dl, VT, Bits); |
5377 | } |
5378 | |
5379 | if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) && |
5380 | (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) { |
5381 | // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0 |
5382 | unsigned NumBits = ElemTy.getSizeInBits(); |
5383 | SDValue WidthMinus1 = |
5384 | DAG.getNode(ARMISD::VMOVIMM, dl, VT, |
5385 | DAG.getTargetConstant(NumBits - 1, dl, ElemTy)); |
5386 | SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB); |
5387 | return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ); |
5388 | } |
5389 | |
5390 | // Compute with: cttz(x) = ctpop(lsb - 1) |
5391 | |
5392 | // Since we can only compute the number of bits in a byte with vcnt.8, we |
5393 | // have to gather the result with pairwise addition (vpaddl) for i16, i32, |
5394 | // and i64. |
5395 | |
5396 | // Compute LSB - 1. |
5397 | SDValue Bits; |
5398 | if (ElemTy == MVT::i64) { |
5399 | // Load constant 0xffff'ffff'ffff'ffff to register. |
5400 | SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT, |
5401 | DAG.getTargetConstant(0x1eff, dl, MVT::i32)); |
5402 | Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF); |
5403 | } else { |
5404 | SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, |
5405 | DAG.getTargetConstant(1, dl, ElemTy)); |
5406 | Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); |
5407 | } |
5408 | |
5409 | // Count #bits with vcnt.8. |
5410 | EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; |
5411 | SDValue BitsVT8 = DAG.getNode(ISD::BITCAST, dl, VT8Bit, Bits); |
5412 | SDValue Cnt8 = DAG.getNode(ISD::CTPOP, dl, VT8Bit, BitsVT8); |
5413 | |
5414 | // Gather the #bits with vpaddl (pairwise add.) |
5415 | EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16; |
5416 | SDValue Cnt16 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT16Bit, |
5417 | DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32), |
5418 | Cnt8); |
5419 | if (ElemTy == MVT::i16) |
5420 | return Cnt16; |
5421 | |
5422 | EVT VT32Bit = VT.is64BitVector() ? MVT::v2i32 : MVT::v4i32; |
5423 | SDValue Cnt32 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT32Bit, |
5424 | DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32), |
5425 | Cnt16); |
5426 | if (ElemTy == MVT::i32) |
5427 | return Cnt32; |
5428 | |
5429 | assert(ElemTy == MVT::i64)(static_cast <bool> (ElemTy == MVT::i64) ? void (0) : __assert_fail ("ElemTy == MVT::i64", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5429, __extension__ __PRETTY_FUNCTION__)); |
5430 | SDValue Cnt64 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, |
5431 | DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32), |
5432 | Cnt32); |
5433 | return Cnt64; |
5434 | } |
5435 | |
5436 | if (!ST->hasV6T2Ops()) |
5437 | return SDValue(); |
5438 | |
5439 | SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0)); |
5440 | return DAG.getNode(ISD::CTLZ, dl, VT, rbit); |
5441 | } |
5442 | |
5443 | /// getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count |
5444 | /// for each 16-bit element from operand, repeated. The basic idea is to |
5445 | /// leverage vcnt to get the 8-bit counts, gather and add the results. |
5446 | /// |
5447 | /// Trace for v4i16: |
5448 | /// input = [v0 v1 v2 v3 ] (vi 16-bit element) |
5449 | /// cast: N0 = [w0 w1 w2 w3 w4 w5 w6 w7] (v0 = [w0 w1], wi 8-bit element) |
5450 | /// vcnt: N1 = [b0 b1 b2 b3 b4 b5 b6 b7] (bi = bit-count of 8-bit element wi) |
5451 | /// vrev: N2 = [b1 b0 b3 b2 b5 b4 b7 b6] |
5452 | /// [b0 b1 b2 b3 b4 b5 b6 b7] |
5453 | /// +[b1 b0 b3 b2 b5 b4 b7 b6] |
5454 | /// N3=N1+N2 = [k0 k0 k1 k1 k2 k2 k3 k3] (k0 = b0+b1 = bit-count of 16-bit v0, |
5455 | /// vuzp: = [k0 k1 k2 k3 k0 k1 k2 k3] each ki is 8-bits) |
5456 | static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) { |
5457 | EVT VT = N->getValueType(0); |
5458 | SDLoc DL(N); |
5459 | |
5460 | EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; |
5461 | SDValue N0 = DAG.getNode(ISD::BITCAST, DL, VT8Bit, N->getOperand(0)); |
5462 | SDValue N1 = DAG.getNode(ISD::CTPOP, DL, VT8Bit, N0); |
5463 | SDValue N2 = DAG.getNode(ARMISD::VREV16, DL, VT8Bit, N1); |
5464 | SDValue N3 = DAG.getNode(ISD::ADD, DL, VT8Bit, N1, N2); |
5465 | return DAG.getNode(ARMISD::VUZP, DL, VT8Bit, N3, N3); |
5466 | } |
5467 | |
5468 | /// lowerCTPOP16BitElements - Returns a v4i16/v8i16 vector containing the |
5469 | /// bit-count for each 16-bit element from the operand. We need slightly |
5470 | /// different sequencing for v4i16 and v8i16 to stay within NEON's available |
5471 | /// 64/128-bit registers. |
5472 | /// |
5473 | /// Trace for v4i16: |
5474 | /// input = [v0 v1 v2 v3 ] (vi 16-bit element) |
5475 | /// v8i8: BitCounts = [k0 k1 k2 k3 k0 k1 k2 k3 ] (ki is the bit-count of vi) |
5476 | /// v8i16:Extended = [k0 k1 k2 k3 k0 k1 k2 k3 ] |
5477 | /// v4i16:Extracted = [k0 k1 k2 k3 ] |
5478 | static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) { |
5479 | EVT VT = N->getValueType(0); |
5480 | SDLoc DL(N); |
5481 | |
5482 | SDValue BitCounts = getCTPOP16BitCounts(N, DAG); |
5483 | if (VT.is64BitVector()) { |
5484 | SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, BitCounts); |
5485 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Extended, |
5486 | DAG.getIntPtrConstant(0, DL)); |
5487 | } else { |
5488 | SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, |
5489 | BitCounts, DAG.getIntPtrConstant(0, DL)); |
5490 | return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, Extracted); |
5491 | } |
5492 | } |
5493 | |
5494 | /// lowerCTPOP32BitElements - Returns a v2i32/v4i32 vector containing the |
5495 | /// bit-count for each 32-bit element from the operand. The idea here is |
5496 | /// to split the vector into 16-bit elements, leverage the 16-bit count |
5497 | /// routine, and then combine the results. |
5498 | /// |
5499 | /// Trace for v2i32 (v4i32 similar with Extracted/Extended exchanged): |
5500 | /// input = [v0 v1 ] (vi: 32-bit elements) |
5501 | /// Bitcast = [w0 w1 w2 w3 ] (wi: 16-bit elements, v0 = [w0 w1]) |
5502 | /// Counts16 = [k0 k1 k2 k3 ] (ki: 16-bit elements, bit-count of wi) |
5503 | /// vrev: N0 = [k1 k0 k3 k2 ] |
5504 | /// [k0 k1 k2 k3 ] |
5505 | /// N1 =+[k1 k0 k3 k2 ] |
5506 | /// [k0 k2 k1 k3 ] |
5507 | /// N2 =+[k1 k3 k0 k2 ] |
5508 | /// [k0 k2 k1 k3 ] |
5509 | /// Extended =+[k1 k3 k0 k2 ] |
5510 | /// [k0 k2 ] |
5511 | /// Extracted=+[k1 k3 ] |
5512 | /// |
5513 | static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) { |
5514 | EVT VT = N->getValueType(0); |
5515 | SDLoc DL(N); |
5516 | |
5517 | EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16; |
5518 | |
5519 | SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, VT16Bit, N->getOperand(0)); |
5520 | SDValue Counts16 = lowerCTPOP16BitElements(Bitcast.getNode(), DAG); |
5521 | SDValue N0 = DAG.getNode(ARMISD::VREV32, DL, VT16Bit, Counts16); |
5522 | SDValue N1 = DAG.getNode(ISD::ADD, DL, VT16Bit, Counts16, N0); |
5523 | SDValue N2 = DAG.getNode(ARMISD::VUZP, DL, VT16Bit, N1, N1); |
5524 | |
5525 | if (VT.is64BitVector()) { |
5526 | SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, N2); |
5527 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Extended, |
5528 | DAG.getIntPtrConstant(0, DL)); |
5529 | } else { |
5530 | SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, N2, |
5531 | DAG.getIntPtrConstant(0, DL)); |
5532 | return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, Extracted); |
5533 | } |
5534 | } |
5535 | |
5536 | static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, |
5537 | const ARMSubtarget *ST) { |
5538 | EVT VT = N->getValueType(0); |
5539 | |
5540 | assert(ST->hasNEON() && "Custom ctpop lowering requires NEON.")(static_cast <bool> (ST->hasNEON() && "Custom ctpop lowering requires NEON." ) ? void (0) : __assert_fail ("ST->hasNEON() && \"Custom ctpop lowering requires NEON.\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5540, __extension__ __PRETTY_FUNCTION__)); |
5541 | assert((VT == MVT::v2i32 || VT == MVT::v4i32 ||(static_cast <bool> ((VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && "Unexpected type for custom ctpop lowering" ) ? void (0) : __assert_fail ("(VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && \"Unexpected type for custom ctpop lowering\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5543, __extension__ __PRETTY_FUNCTION__)) |
5542 | VT == MVT::v4i16 || VT == MVT::v8i16) &&(static_cast <bool> ((VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && "Unexpected type for custom ctpop lowering" ) ? void (0) : __assert_fail ("(VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && \"Unexpected type for custom ctpop lowering\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5543, __extension__ __PRETTY_FUNCTION__)) |
5543 | "Unexpected type for custom ctpop lowering")(static_cast <bool> ((VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && "Unexpected type for custom ctpop lowering" ) ? void (0) : __assert_fail ("(VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && \"Unexpected type for custom ctpop lowering\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5543, __extension__ __PRETTY_FUNCTION__)); |
5544 | |
5545 | if (VT.getVectorElementType() == MVT::i32) |
5546 | return lowerCTPOP32BitElements(N, DAG); |
5547 | else |
5548 | return lowerCTPOP16BitElements(N, DAG); |
5549 | } |
5550 | |
5551 | static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, |
5552 | const ARMSubtarget *ST) { |
5553 | EVT VT = N->getValueType(0); |
5554 | SDLoc dl(N); |
5555 | |
5556 | if (!VT.isVector()) |
5557 | return SDValue(); |
5558 | |
5559 | // Lower vector shifts on NEON to use VSHL. |
5560 | assert(ST->hasNEON() && "unexpected vector shift")(static_cast <bool> (ST->hasNEON() && "unexpected vector shift" ) ? void (0) : __assert_fail ("ST->hasNEON() && \"unexpected vector shift\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5560, __extension__ __PRETTY_FUNCTION__)); |
5561 | |
5562 | // Left shifts translate directly to the vshiftu intrinsic. |
5563 | if (N->getOpcode() == ISD::SHL) |
5564 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, |
5565 | DAG.getConstant(Intrinsic::arm_neon_vshiftu, dl, |
5566 | MVT::i32), |
5567 | N->getOperand(0), N->getOperand(1)); |
5568 | |
5569 | assert((N->getOpcode() == ISD::SRA ||(static_cast <bool> ((N->getOpcode() == ISD::SRA || N ->getOpcode() == ISD::SRL) && "unexpected vector shift opcode" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) && \"unexpected vector shift opcode\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5570, __extension__ __PRETTY_FUNCTION__)) |
5570 | N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode")(static_cast <bool> ((N->getOpcode() == ISD::SRA || N ->getOpcode() == ISD::SRL) && "unexpected vector shift opcode" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) && \"unexpected vector shift opcode\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5570, __extension__ __PRETTY_FUNCTION__)); |
5571 | |
5572 | // NEON uses the same intrinsics for both left and right shifts. For |
5573 | // right shifts, the shift amounts are negative, so negate the vector of |
5574 | // shift amounts. |
5575 | EVT ShiftVT = N->getOperand(1).getValueType(); |
5576 | SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, |
5577 | getZeroVector(ShiftVT, DAG, dl), |
5578 | N->getOperand(1)); |
5579 | Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? |
5580 | Intrinsic::arm_neon_vshifts : |
5581 | Intrinsic::arm_neon_vshiftu); |
5582 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, |
5583 | DAG.getConstant(vshiftInt, dl, MVT::i32), |
5584 | N->getOperand(0), NegatedCount); |
5585 | } |
5586 | |
5587 | static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, |
5588 | const ARMSubtarget *ST) { |
5589 | EVT VT = N->getValueType(0); |
5590 | SDLoc dl(N); |
5591 | |
5592 | // We can get here for a node like i32 = ISD::SHL i32, i64 |
5593 | if (VT != MVT::i64) |
5594 | return SDValue(); |
5595 | |
5596 | assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&(static_cast <bool> ((N->getOpcode() == ISD::SRL || N ->getOpcode() == ISD::SRA) && "Unknown shift to lower!" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && \"Unknown shift to lower!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5597, __extension__ __PRETTY_FUNCTION__)) |
5597 | "Unknown shift to lower!")(static_cast <bool> ((N->getOpcode() == ISD::SRL || N ->getOpcode() == ISD::SRA) && "Unknown shift to lower!" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && \"Unknown shift to lower!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5597, __extension__ __PRETTY_FUNCTION__)); |
5598 | |
5599 | // We only lower SRA, SRL of 1 here, all others use generic lowering. |
5600 | if (!isOneConstant(N->getOperand(1))) |
5601 | return SDValue(); |
5602 | |
5603 | // If we are in thumb mode, we don't have RRX. |
5604 | if (ST->isThumb1Only()) return SDValue(); |
5605 | |
5606 | // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. |
5607 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), |
5608 | DAG.getConstant(0, dl, MVT::i32)); |
5609 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), |
5610 | DAG.getConstant(1, dl, MVT::i32)); |
5611 | |
5612 | // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and |
5613 | // captures the result into a carry flag. |
5614 | unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; |
5615 | Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi); |
5616 | |
5617 | // The low part is an ARMISD::RRX operand, which shifts the carry in. |
5618 | Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); |
5619 | |
5620 | // Merge the pieces into a single i64 value. |
5621 | return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); |
5622 | } |
5623 | |
5624 | static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { |
5625 | SDValue TmpOp0, TmpOp1; |
5626 | bool Invert = false; |
5627 | bool Swap = false; |
5628 | unsigned Opc = 0; |
5629 | |
5630 | SDValue Op0 = Op.getOperand(0); |
5631 | SDValue Op1 = Op.getOperand(1); |
5632 | SDValue CC = Op.getOperand(2); |
5633 | EVT CmpVT = Op0.getValueType().changeVectorElementTypeToInteger(); |
5634 | EVT VT = Op.getValueType(); |
5635 | ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); |
5636 | SDLoc dl(Op); |
5637 | |
5638 | if (Op0.getValueType().getVectorElementType() == MVT::i64 && |
5639 | (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) { |
5640 | // Special-case integer 64-bit equality comparisons. They aren't legal, |
5641 | // but they can be lowered with a few vector instructions. |
5642 | unsigned CmpElements = CmpVT.getVectorNumElements() * 2; |
5643 | EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, CmpElements); |
5644 | SDValue CastOp0 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op0); |
5645 | SDValue CastOp1 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op1); |
5646 | SDValue Cmp = DAG.getNode(ISD::SETCC, dl, SplitVT, CastOp0, CastOp1, |
5647 | DAG.getCondCode(ISD::SETEQ)); |
5648 | SDValue Reversed = DAG.getNode(ARMISD::VREV64, dl, SplitVT, Cmp); |
5649 | SDValue Merged = DAG.getNode(ISD::AND, dl, SplitVT, Cmp, Reversed); |
5650 | Merged = DAG.getNode(ISD::BITCAST, dl, CmpVT, Merged); |
5651 | if (SetCCOpcode == ISD::SETNE) |
5652 | Merged = DAG.getNOT(dl, Merged, CmpVT); |
5653 | Merged = DAG.getSExtOrTrunc(Merged, dl, VT); |
5654 | return Merged; |
5655 | } |
5656 | |
5657 | if (CmpVT.getVectorElementType() == MVT::i64) |
5658 | // 64-bit comparisons are not legal in general. |
5659 | return SDValue(); |
5660 | |
5661 | if (Op1.getValueType().isFloatingPoint()) { |
5662 | switch (SetCCOpcode) { |
5663 | default: llvm_unreachable("Illegal FP comparison")::llvm::llvm_unreachable_internal("Illegal FP comparison", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5663); |
5664 | case ISD::SETUNE: |
5665 | case ISD::SETNE: Invert = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; |
5666 | case ISD::SETOEQ: |
5667 | case ISD::SETEQ: Opc = ARMISD::VCEQ; break; |
5668 | case ISD::SETOLT: |
5669 | case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; |
5670 | case ISD::SETOGT: |
5671 | case ISD::SETGT: Opc = ARMISD::VCGT; break; |
5672 | case ISD::SETOLE: |
5673 | case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; |
5674 | case ISD::SETOGE: |
5675 | case ISD::SETGE: Opc = ARMISD::VCGE; break; |
5676 | case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; |
5677 | case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; |
5678 | case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; |
5679 | case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; |
5680 | case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; |
5681 | case ISD::SETONE: |
5682 | // Expand this to (OLT | OGT). |
5683 | TmpOp0 = Op0; |
5684 | TmpOp1 = Op1; |
5685 | Opc = ISD::OR; |
5686 | Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0); |
5687 | Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1); |
5688 | break; |
5689 | case ISD::SETUO: |
5690 | Invert = true; |
5691 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
5692 | case ISD::SETO: |
5693 | // Expand this to (OLT | OGE). |
5694 | TmpOp0 = Op0; |
5695 | TmpOp1 = Op1; |
5696 | Opc = ISD::OR; |
5697 | Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0); |
5698 | Op1 = DAG.getNode(ARMISD::VCGE, dl, CmpVT, TmpOp0, TmpOp1); |
5699 | break; |
5700 | } |
5701 | } else { |
5702 | // Integer comparisons. |
5703 | switch (SetCCOpcode) { |
5704 | default: llvm_unreachable("Illegal integer comparison")::llvm::llvm_unreachable_internal("Illegal integer comparison" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5704); |
5705 | case ISD::SETNE: Invert = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; |
5706 | case ISD::SETEQ: Opc = ARMISD::VCEQ; break; |
5707 | case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; |
5708 | case ISD::SETGT: Opc = ARMISD::VCGT; break; |
5709 | case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; |
5710 | case ISD::SETGE: Opc = ARMISD::VCGE; break; |
5711 | case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; |
5712 | case ISD::SETUGT: Opc = ARMISD::VCGTU; break; |
5713 | case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; |
5714 | case ISD::SETUGE: Opc = ARMISD::VCGEU; break; |
5715 | } |
5716 | |
5717 | // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). |
5718 | if (Opc == ARMISD::VCEQ) { |
5719 | SDValue AndOp; |
5720 | if (ISD::isBuildVectorAllZeros(Op1.getNode())) |
5721 | AndOp = Op0; |
5722 | else if (ISD::isBuildVectorAllZeros(Op0.getNode())) |
5723 | AndOp = Op1; |
5724 | |
5725 | // Ignore bitconvert. |
5726 | if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) |
5727 | AndOp = AndOp.getOperand(0); |
5728 | |
5729 | if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { |
5730 | Opc = ARMISD::VTST; |
5731 | Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0)); |
5732 | Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1)); |
5733 | Invert = !Invert; |
5734 | } |
5735 | } |
5736 | } |
5737 | |
5738 | if (Swap) |
5739 | std::swap(Op0, Op1); |
5740 | |
5741 | // If one of the operands is a constant vector zero, attempt to fold the |
5742 | // comparison to a specialized compare-against-zero form. |
5743 | SDValue SingleOp; |
5744 | if (ISD::isBuildVectorAllZeros(Op1.getNode())) |
5745 | SingleOp = Op0; |
5746 | else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { |
5747 | if (Opc == ARMISD::VCGE) |
5748 | Opc = ARMISD::VCLEZ; |
5749 | else if (Opc == ARMISD::VCGT) |
5750 | Opc = ARMISD::VCLTZ; |
5751 | SingleOp = Op1; |
5752 | } |
5753 | |
5754 | SDValue Result; |
5755 | if (SingleOp.getNode()) { |
5756 | switch (Opc) { |
5757 | case ARMISD::VCEQ: |
5758 | Result = DAG.getNode(ARMISD::VCEQZ, dl, CmpVT, SingleOp); break; |
5759 | case ARMISD::VCGE: |
5760 | Result = DAG.getNode(ARMISD::VCGEZ, dl, CmpVT, SingleOp); break; |
5761 | case ARMISD::VCLEZ: |
5762 | Result = DAG.getNode(ARMISD::VCLEZ, dl, CmpVT, SingleOp); break; |
5763 | case ARMISD::VCGT: |
5764 | Result = DAG.getNode(ARMISD::VCGTZ, dl, CmpVT, SingleOp); break; |
5765 | case ARMISD::VCLTZ: |
5766 | Result = DAG.getNode(ARMISD::VCLTZ, dl, CmpVT, SingleOp); break; |
5767 | default: |
5768 | Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1); |
5769 | } |
5770 | } else { |
5771 | Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1); |
5772 | } |
5773 | |
5774 | Result = DAG.getSExtOrTrunc(Result, dl, VT); |
5775 | |
5776 | if (Invert) |
5777 | Result = DAG.getNOT(dl, Result, VT); |
5778 | |
5779 | return Result; |
5780 | } |
5781 | |
5782 | static SDValue LowerSETCCE(SDValue Op, SelectionDAG &DAG) { |
5783 | SDValue LHS = Op.getOperand(0); |
5784 | SDValue RHS = Op.getOperand(1); |
5785 | SDValue Carry = Op.getOperand(2); |
5786 | SDValue Cond = Op.getOperand(3); |
5787 | SDLoc DL(Op); |
5788 | |
5789 | assert(LHS.getSimpleValueType().isInteger() && "SETCCE is integer only.")(static_cast <bool> (LHS.getSimpleValueType().isInteger () && "SETCCE is integer only.") ? void (0) : __assert_fail ("LHS.getSimpleValueType().isInteger() && \"SETCCE is integer only.\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5789, __extension__ __PRETTY_FUNCTION__)); |
5790 | |
5791 | assert(Carry.getOpcode() != ISD::CARRY_FALSE)(static_cast <bool> (Carry.getOpcode() != ISD::CARRY_FALSE ) ? void (0) : __assert_fail ("Carry.getOpcode() != ISD::CARRY_FALSE" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5791, __extension__ __PRETTY_FUNCTION__)); |
5792 | SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); |
5793 | SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry); |
5794 | |
5795 | SDValue FVal = DAG.getConstant(0, DL, MVT::i32); |
5796 | SDValue TVal = DAG.getConstant(1, DL, MVT::i32); |
5797 | SDValue ARMcc = DAG.getConstant( |
5798 | IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32); |
5799 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
5800 | SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR, |
5801 | Cmp.getValue(1), SDValue()); |
5802 | return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc, |
5803 | CCR, Chain.getValue(1)); |
5804 | } |
5805 | |
5806 | /// isNEONModifiedImm - Check if the specified splat value corresponds to a |
5807 | /// valid vector constant for a NEON instruction with a "modified immediate" |
5808 | /// operand (e.g., VMOV). If so, return the encoded value. |
5809 | static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, |
5810 | unsigned SplatBitSize, SelectionDAG &DAG, |
5811 | const SDLoc &dl, EVT &VT, bool is128Bits, |
5812 | NEONModImmType type) { |
5813 | unsigned OpCmode, Imm; |
5814 | |
5815 | // SplatBitSize is set to the smallest size that splats the vector, so a |
5816 | // zero vector will always have SplatBitSize == 8. However, NEON modified |
5817 | // immediate instructions others than VMOV do not support the 8-bit encoding |
5818 | // of a zero vector, and the default encoding of zero is supposed to be the |
5819 | // 32-bit version. |
5820 | if (SplatBits == 0) |
5821 | SplatBitSize = 32; |
5822 | |
5823 | switch (SplatBitSize) { |
5824 | case 8: |
5825 | if (type != VMOVModImm) |
5826 | return SDValue(); |
5827 | // Any 1-byte value is OK. Op=0, Cmode=1110. |
5828 | assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big")(static_cast <bool> ((SplatBits & ~0xff) == 0 && "one byte splat value is too big") ? void (0) : __assert_fail ("(SplatBits & ~0xff) == 0 && \"one byte splat value is too big\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5828, __extension__ __PRETTY_FUNCTION__)); |
5829 | OpCmode = 0xe; |
5830 | Imm = SplatBits; |
5831 | VT = is128Bits ? MVT::v16i8 : MVT::v8i8; |
5832 | break; |
5833 | |
5834 | case 16: |
5835 | // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. |
5836 | VT = is128Bits ? MVT::v8i16 : MVT::v4i16; |
5837 | if ((SplatBits & ~0xff) == 0) { |
5838 | // Value = 0x00nn: Op=x, Cmode=100x. |
5839 | OpCmode = 0x8; |
5840 | Imm = SplatBits; |
5841 | break; |
5842 | } |
5843 | if ((SplatBits & ~0xff00) == 0) { |
5844 | // Value = 0xnn00: Op=x, Cmode=101x. |
5845 | OpCmode = 0xa; |
5846 | Imm = SplatBits >> 8; |
5847 | break; |
5848 | } |
5849 | return SDValue(); |
5850 | |
5851 | case 32: |
5852 | // NEON's 32-bit VMOV supports splat values where: |
5853 | // * only one byte is nonzero, or |
5854 | // * the least significant byte is 0xff and the second byte is nonzero, or |
5855 | // * the least significant 2 bytes are 0xff and the third is nonzero. |
5856 | VT = is128Bits ? MVT::v4i32 : MVT::v2i32; |
5857 | if ((SplatBits & ~0xff) == 0) { |
5858 | // Value = 0x000000nn: Op=x, Cmode=000x. |
5859 | OpCmode = 0; |
5860 | Imm = SplatBits; |
5861 | break; |
5862 | } |
5863 | if ((SplatBits & ~0xff00) == 0) { |
5864 | // Value = 0x0000nn00: Op=x, Cmode=001x. |
5865 | OpCmode = 0x2; |
5866 | Imm = SplatBits >> 8; |
5867 | break; |
5868 | } |
5869 | if ((SplatBits & ~0xff0000) == 0) { |
5870 | // Value = 0x00nn0000: Op=x, Cmode=010x. |
5871 | OpCmode = 0x4; |
5872 | Imm = SplatBits >> 16; |
5873 | break; |
5874 | } |
5875 | if ((SplatBits & ~0xff000000) == 0) { |
5876 | // Value = 0xnn000000: Op=x, Cmode=011x. |
5877 | OpCmode = 0x6; |
5878 | Imm = SplatBits >> 24; |
5879 | break; |
5880 | } |
5881 | |
5882 | // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC |
5883 | if (type == OtherModImm) return SDValue(); |
5884 | |
5885 | if ((SplatBits & ~0xffff) == 0 && |
5886 | ((SplatBits | SplatUndef) & 0xff) == 0xff) { |
5887 | // Value = 0x0000nnff: Op=x, Cmode=1100. |
5888 | OpCmode = 0xc; |
5889 | Imm = SplatBits >> 8; |
5890 | break; |
5891 | } |
5892 | |
5893 | if ((SplatBits & ~0xffffff) == 0 && |
5894 | ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { |
5895 | // Value = 0x00nnffff: Op=x, Cmode=1101. |
5896 | OpCmode = 0xd; |
5897 | Imm = SplatBits >> 16; |
5898 | break; |
5899 | } |
5900 | |
5901 | // Note: there are a few 32-bit splat values (specifically: 00ffff00, |
5902 | // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not |
5903 | // VMOV.I32. A (very) minor optimization would be to replicate the value |
5904 | // and fall through here to test for a valid 64-bit splat. But, then the |
5905 | // caller would also need to check and handle the change in size. |
5906 | return SDValue(); |
5907 | |
5908 | case 64: { |
5909 | if (type != VMOVModImm) |
5910 | return SDValue(); |
5911 | // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. |
5912 | uint64_t BitMask = 0xff; |
5913 | uint64_t Val = 0; |
5914 | unsigned ImmMask = 1; |
5915 | Imm = 0; |
5916 | for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { |
5917 | if (((SplatBits | SplatUndef) & BitMask) == BitMask) { |
5918 | Val |= BitMask; |
5919 | Imm |= ImmMask; |
5920 | } else if ((SplatBits & BitMask) != 0) { |
5921 | return SDValue(); |
5922 | } |
5923 | BitMask <<= 8; |
5924 | ImmMask <<= 1; |
5925 | } |
5926 | |
5927 | if (DAG.getDataLayout().isBigEndian()) |
5928 | // swap higher and lower 32 bit word |
5929 | Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4); |
5930 | |
5931 | // Op=1, Cmode=1110. |
5932 | OpCmode = 0x1e; |
5933 | VT = is128Bits ? MVT::v2i64 : MVT::v1i64; |
5934 | break; |
5935 | } |
5936 | |
5937 | default: |
5938 | llvm_unreachable("unexpected size for isNEONModifiedImm")::llvm::llvm_unreachable_internal("unexpected size for isNEONModifiedImm" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5938); |
5939 | } |
5940 | |
5941 | unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); |
5942 | return DAG.getTargetConstant(EncodedVal, dl, MVT::i32); |
5943 | } |
5944 | |
5945 | SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, |
5946 | const ARMSubtarget *ST) const { |
5947 | EVT VT = Op.getValueType(); |
5948 | bool IsDouble = (VT == MVT::f64); |
5949 | ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op); |
5950 | const APFloat &FPVal = CFP->getValueAPF(); |
5951 | |
5952 | // Prevent floating-point constants from using literal loads |
5953 | // when execute-only is enabled. |
5954 | if (ST->genExecuteOnly()) { |
5955 | // If we can represent the constant as an immediate, don't lower it |
5956 | if (isFPImmLegal(FPVal, VT)) |
5957 | return Op; |
5958 | // Otherwise, construct as integer, and move to float register |
5959 | APInt INTVal = FPVal.bitcastToAPInt(); |
5960 | SDLoc DL(CFP); |
5961 | switch (VT.getSimpleVT().SimpleTy) { |
5962 | default: |
5963 | llvm_unreachable("Unknown floating point type!")::llvm::llvm_unreachable_internal("Unknown floating point type!" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 5963); |
5964 | break; |
5965 | case MVT::f64: { |
5966 | SDValue Lo = DAG.getConstant(INTVal.trunc(32), DL, MVT::i32); |
5967 | SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32); |
5968 | if (!ST->isLittle()) |
5969 | std::swap(Lo, Hi); |
5970 | return DAG.getNode(ARMISD::VMOVDRR, DL, MVT::f64, Lo, Hi); |
5971 | } |
5972 | case MVT::f32: |
5973 | return DAG.getNode(ARMISD::VMOVSR, DL, VT, |
5974 | DAG.getConstant(INTVal, DL, MVT::i32)); |
5975 | } |
5976 | } |
5977 | |
5978 | if (!ST->hasVFP3()) |
5979 | return SDValue(); |
5980 | |
5981 | // Use the default (constant pool) lowering for double constants when we have |
5982 | // an SP-only FPU |
5983 | if (IsDouble && Subtarget->isFPOnlySP()) |
5984 | return SDValue(); |
5985 | |
5986 | // Try splatting with a VMOV.f32... |
5987 | int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal); |
5988 | |
5989 | if (ImmVal != -1) { |
5990 | if (IsDouble || !ST->useNEONForSinglePrecisionFP()) { |
5991 | // We have code in place to select a valid ConstantFP already, no need to |
5992 | // do any mangling. |
5993 | return Op; |
5994 | } |
5995 | |
5996 | // It's a float and we are trying to use NEON operations where |
5997 | // possible. Lower it to a splat followed by an extract. |
5998 | SDLoc DL(Op); |
5999 | SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32); |
6000 | SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32, |
6001 | NewVal); |
6002 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant, |
6003 | DAG.getConstant(0, DL, MVT::i32)); |
6004 | } |
6005 | |
6006 | // The rest of our options are NEON only, make sure that's allowed before |
6007 | // proceeding.. |
6008 | if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP())) |
6009 | return SDValue(); |
6010 | |
6011 | EVT VMovVT; |
6012 | uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue(); |
6013 | |
6014 | // It wouldn't really be worth bothering for doubles except for one very |
6015 | // important value, which does happen to match: 0.0. So make sure we don't do |
6016 | // anything stupid. |
6017 | if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32)) |
6018 | return SDValue(); |
6019 | |
6020 | // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too). |
6021 | SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), |
6022 | VMovVT, false, VMOVModImm); |
6023 | if (NewVal != SDValue()) { |
6024 | SDLoc DL(Op); |
6025 | SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT, |
6026 | NewVal); |
6027 | if (IsDouble) |
6028 | return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); |
6029 | |
6030 | // It's a float: cast and extract a vector element. |
6031 | SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, |
6032 | VecConstant); |
6033 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, |
6034 | DAG.getConstant(0, DL, MVT::i32)); |
6035 | } |
6036 | |
6037 | // Finally, try a VMVN.i32 |
6038 | NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT, |
6039 | false, VMVNModImm); |
6040 | if (NewVal != SDValue()) { |
6041 | SDLoc DL(Op); |
6042 | SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal); |
6043 | |
6044 | if (IsDouble) |
6045 | return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); |
6046 | |
6047 | // It's a float: cast and extract a vector element. |
6048 | SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, |
6049 | VecConstant); |
6050 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, |
6051 | DAG.getConstant(0, DL, MVT::i32)); |
6052 | } |
6053 | |
6054 | return SDValue(); |
6055 | } |
6056 | |
6057 | // check if an VEXT instruction can handle the shuffle mask when the |
6058 | // vector sources of the shuffle are the same. |
6059 | static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { |
6060 | unsigned NumElts = VT.getVectorNumElements(); |
6061 | |
6062 | // Assume that the first shuffle index is not UNDEF. Fail if it is. |
6063 | if (M[0] < 0) |
6064 | return false; |
6065 | |
6066 | Imm = M[0]; |
6067 | |
6068 | // If this is a VEXT shuffle, the immediate value is the index of the first |
6069 | // element. The other shuffle indices must be the successive elements after |
6070 | // the first one. |
6071 | unsigned ExpectedElt = Imm; |
6072 | for (unsigned i = 1; i < NumElts; ++i) { |
6073 | // Increment the expected index. If it wraps around, just follow it |
6074 | // back to index zero and keep going. |
6075 | ++ExpectedElt; |
6076 | if (ExpectedElt == NumElts) |
6077 | ExpectedElt = 0; |
6078 | |
6079 | if (M[i] < 0) continue; // ignore UNDEF indices |
6080 | if (ExpectedElt != static_cast<unsigned>(M[i])) |
6081 | return false; |
6082 | } |
6083 | |
6084 | return true; |
6085 | } |
6086 | |
6087 | static bool isVEXTMask(ArrayRef<int> M, EVT VT, |
6088 | bool &ReverseVEXT, unsigned &Imm) { |
6089 | unsigned NumElts = VT.getVectorNumElements(); |
6090 | ReverseVEXT = false; |
6091 | |
6092 | // Assume that the first shuffle index is not UNDEF. Fail if it is. |
6093 | if (M[0] < 0) |
6094 | return false; |
6095 | |
6096 | Imm = M[0]; |
6097 | |
6098 | // If this is a VEXT shuffle, the immediate value is the index of the first |
6099 | // element. The other shuffle indices must be the successive elements after |
6100 | // the first one. |
6101 | unsigned ExpectedElt = Imm; |
6102 | for (unsigned i = 1; i < NumElts; ++i) { |
6103 | // Increment the expected index. If it wraps around, it may still be |
6104 | // a VEXT but the source vectors must be swapped. |
6105 | ExpectedElt += 1; |
6106 | if (ExpectedElt == NumElts * 2) { |
6107 | ExpectedElt = 0; |
6108 | ReverseVEXT = true; |
6109 | } |
6110 | |
6111 | if (M[i] < 0) continue; // ignore UNDEF indices |
6112 | if (ExpectedElt != static_cast<unsigned>(M[i])) |
6113 | return false; |
6114 | } |
6115 | |
6116 | // Adjust the index value if the source operands will be swapped. |
6117 | if (ReverseVEXT) |
6118 | Imm -= NumElts; |
6119 | |
6120 | return true; |
6121 | } |
6122 | |
6123 | /// isVREVMask - Check if a vector shuffle corresponds to a VREV |
6124 | /// instruction with the specified blocksize. (The order of the elements |
6125 | /// within each block of the vector is reversed.) |
6126 | static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { |
6127 | assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&(static_cast <bool> ((BlockSize==16 || BlockSize==32 || BlockSize==64) && "Only possible block sizes for VREV are: 16, 32, 64" ) ? void (0) : __assert_fail ("(BlockSize==16 || BlockSize==32 || BlockSize==64) && \"Only possible block sizes for VREV are: 16, 32, 64\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 6128, __extension__ __PRETTY_FUNCTION__)) |
6128 | "Only possible block sizes for VREV are: 16, 32, 64")(static_cast <bool> ((BlockSize==16 || BlockSize==32 || BlockSize==64) && "Only possible block sizes for VREV are: 16, 32, 64" ) ? void (0) : __assert_fail ("(BlockSize==16 || BlockSize==32 || BlockSize==64) && \"Only possible block sizes for VREV are: 16, 32, 64\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 6128, __extension__ __PRETTY_FUNCTION__)); |
6129 | |
6130 | unsigned EltSz = VT.getScalarSizeInBits(); |
6131 | if (EltSz == 64) |
6132 | return false; |
6133 | |
6134 | unsigned NumElts = VT.getVectorNumElements(); |
6135 | unsigned BlockElts = M[0] + 1; |
6136 | // If the first shuffle index is UNDEF, be optimistic. |
6137 | if (M[0] < 0) |
6138 | BlockElts = BlockSize / EltSz; |
6139 | |
6140 | if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) |
6141 | return false; |
6142 | |
6143 | for (unsigned i = 0; i < NumElts; ++i) { |
6144 | if (M[i] < 0) continue; // ignore UNDEF indices |
6145 | if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) |
6146 | return false; |
6147 | } |
6148 | |
6149 | return true; |
6150 | } |
6151 | |
6152 | static bool isVTBLMask(ArrayRef<int> M, EVT VT) { |
6153 | // We can handle <8 x i8> vector shuffles. If the index in the mask is out of |
6154 | // range, then 0 is placed into the resulting vector. So pretty much any mask |
6155 | // of 8 elements can work here. |
6156 | return VT == MVT::v8i8 && M.size() == 8; |
6157 | } |
6158 | |
6159 | static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask, |
6160 | unsigned Index) { |
6161 | if (Mask.size() == Elements * 2) |
6162 | return Index / Elements; |
6163 | return Mask[Index] == 0 ? 0 : 1; |
6164 | } |
6165 | |
6166 | // Checks whether the shuffle mask represents a vector transpose (VTRN) by |
6167 | // checking that pairs of elements in the shuffle mask represent the same index |
6168 | // in each vector, incrementing the expected index by 2 at each step. |
6169 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6] |
6170 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g} |
6171 | // v2={e,f,g,h} |
6172 | // WhichResult gives the offset for each element in the mask based on which |
6173 | // of the two results it belongs to. |
6174 | // |
6175 | // The transpose can be represented either as: |
6176 | // result1 = shufflevector v1, v2, result1_shuffle_mask |
6177 | // result2 = shufflevector v1, v2, result2_shuffle_mask |
6178 | // where v1/v2 and the shuffle masks have the same number of elements |
6179 | // (here WhichResult (see below) indicates which result is being checked) |
6180 | // |
6181 | // or as: |
6182 | // results = shufflevector v1, v2, shuffle_mask |
6183 | // where both results are returned in one vector and the shuffle mask has twice |
6184 | // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we |
6185 | // want to check the low half and high half of the shuffle mask as if it were |
6186 | // the other case |
6187 | static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
6188 | unsigned EltSz = VT.getScalarSizeInBits(); |
6189 | if (EltSz == 64) |
6190 | return false; |
6191 | |
6192 | unsigned NumElts = VT.getVectorNumElements(); |
6193 | if (M.size() != NumElts && M.size() != NumElts*2) |
6194 | return false; |
6195 | |
6196 | // If the mask is twice as long as the input vector then we need to check the |
6197 | // upper and lower parts of the mask with a matching value for WhichResult |
6198 | // FIXME: A mask with only even values will be rejected in case the first |
6199 | // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only |
6200 | // M[0] is used to determine WhichResult |
6201 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
6202 | WhichResult = SelectPairHalf(NumElts, M, i); |
6203 | for (unsigned j = 0; j < NumElts; j += 2) { |
6204 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || |
6205 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult)) |
6206 | return false; |
6207 | } |
6208 | } |
6209 | |
6210 | if (M.size() == NumElts*2) |
6211 | WhichResult = 0; |
6212 | |
6213 | return true; |
6214 | } |
6215 | |
6216 | /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of |
6217 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
6218 | /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. |
6219 | static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
6220 | unsigned EltSz = VT.getScalarSizeInBits(); |
6221 | if (EltSz == 64) |
6222 | return false; |
6223 | |
6224 | unsigned NumElts = VT.getVectorNumElements(); |
6225 | if (M.size() != NumElts && M.size() != NumElts*2) |
6226 | return false; |
6227 | |
6228 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
6229 | WhichResult = SelectPairHalf(NumElts, M, i); |
6230 | for (unsigned j = 0; j < NumElts; j += 2) { |
6231 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || |
6232 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult)) |
6233 | return false; |
6234 | } |
6235 | } |
6236 | |
6237 | if (M.size() == NumElts*2) |
6238 | WhichResult = 0; |
6239 | |
6240 | return true; |
6241 | } |
6242 | |
6243 | // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking |
6244 | // that the mask elements are either all even and in steps of size 2 or all odd |
6245 | // and in steps of size 2. |
6246 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6] |
6247 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g} |
6248 | // v2={e,f,g,h} |
6249 | // Requires similar checks to that of isVTRNMask with |
6250 | // respect the how results are returned. |
6251 | static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
6252 | unsigned EltSz = VT.getScalarSizeInBits(); |
6253 | if (EltSz == 64) |
6254 | return false; |
6255 | |
6256 | unsigned NumElts = VT.getVectorNumElements(); |
6257 | if (M.size() != NumElts && M.size() != NumElts*2) |
6258 | return false; |
6259 | |
6260 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
6261 | WhichResult = SelectPairHalf(NumElts, M, i); |
6262 | for (unsigned j = 0; j < NumElts; ++j) { |
6263 | if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult) |
6264 | return false; |
6265 | } |
6266 | } |
6267 | |
6268 | if (M.size() == NumElts*2) |
6269 | WhichResult = 0; |
6270 | |
6271 | // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
6272 | if (VT.is64BitVector() && EltSz == 32) |
6273 | return false; |
6274 | |
6275 | return true; |
6276 | } |
6277 | |
6278 | /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of |
6279 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
6280 | /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, |
6281 | static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
6282 | unsigned EltSz = VT.getScalarSizeInBits(); |
6283 | if (EltSz == 64) |
6284 | return false; |
6285 | |
6286 | unsigned NumElts = VT.getVectorNumElements(); |
6287 | if (M.size() != NumElts && M.size() != NumElts*2) |
6288 | return false; |
6289 | |
6290 | unsigned Half = NumElts / 2; |
6291 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
6292 | WhichResult = SelectPairHalf(NumElts, M, i); |
6293 | for (unsigned j = 0; j < NumElts; j += Half) { |
6294 | unsigned Idx = WhichResult; |
6295 | for (unsigned k = 0; k < Half; ++k) { |
6296 | int MIdx = M[i + j + k]; |
6297 | if (MIdx >= 0 && (unsigned) MIdx != Idx) |
6298 | return false; |
6299 | Idx += 2; |
6300 | } |
6301 | } |
6302 | } |
6303 | |
6304 | if (M.size() == NumElts*2) |
6305 | WhichResult = 0; |
6306 | |
6307 | // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
6308 | if (VT.is64BitVector() && EltSz == 32) |
6309 | return false; |
6310 | |
6311 | return true; |
6312 | } |
6313 | |
6314 | // Checks whether the shuffle mask represents a vector zip (VZIP) by checking |
6315 | // that pairs of elements of the shufflemask represent the same index in each |
6316 | // vector incrementing sequentially through the vectors. |
6317 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5] |
6318 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f} |
6319 | // v2={e,f,g,h} |
6320 | // Requires similar checks to that of isVTRNMask with respect the how results |
6321 | // are returned. |
6322 | static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
6323 | unsigned EltSz = VT.getScalarSizeInBits(); |
6324 | if (EltSz == 64) |
6325 | return false; |
6326 | |
6327 | unsigned NumElts = VT.getVectorNumElements(); |
6328 | if (M.size() != NumElts && M.size() != NumElts*2) |
6329 | return false; |
6330 | |
6331 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
6332 | WhichResult = SelectPairHalf(NumElts, M, i); |
6333 | unsigned Idx = WhichResult * NumElts / 2; |
6334 | for (unsigned j = 0; j < NumElts; j += 2) { |
6335 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || |
6336 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts)) |
6337 | return false; |
6338 | Idx += 1; |
6339 | } |
6340 | } |
6341 | |
6342 | if (M.size() == NumElts*2) |
6343 | WhichResult = 0; |
6344 | |
6345 | // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
6346 | if (VT.is64BitVector() && EltSz == 32) |
6347 | return false; |
6348 | |
6349 | return true; |
6350 | } |
6351 | |
6352 | /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of |
6353 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
6354 | /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. |
6355 | static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
6356 | unsigned EltSz = VT.getScalarSizeInBits(); |
6357 | if (EltSz == 64) |
6358 | return false; |
6359 | |
6360 | unsigned NumElts = VT.getVectorNumElements(); |
6361 | if (M.size() != NumElts && M.size() != NumElts*2) |
6362 | return false; |
6363 | |
6364 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
6365 | WhichResult = SelectPairHalf(NumElts, M, i); |
6366 | unsigned Idx = WhichResult * NumElts / 2; |
6367 | for (unsigned j = 0; j < NumElts; j += 2) { |
6368 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || |
6369 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx)) |
6370 | return false; |
6371 | Idx += 1; |
6372 | } |
6373 | } |
6374 | |
6375 | if (M.size() == NumElts*2) |
6376 | WhichResult = 0; |
6377 | |
6378 | // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
6379 | if (VT.is64BitVector() && EltSz == 32) |
6380 | return false; |
6381 | |
6382 | return true; |
6383 | } |
6384 | |
6385 | /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), |
6386 | /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't. |
6387 | static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT, |
6388 | unsigned &WhichResult, |
6389 | bool &isV_UNDEF) { |
6390 | isV_UNDEF = false; |
6391 | if (isVTRNMask(ShuffleMask, VT, WhichResult)) |
6392 | return ARMISD::VTRN; |
6393 | if (isVUZPMask(ShuffleMask, VT, WhichResult)) |
6394 | return ARMISD::VUZP; |
6395 | if (isVZIPMask(ShuffleMask, VT, WhichResult)) |
6396 | return ARMISD::VZIP; |
6397 | |
6398 | isV_UNDEF = true; |
6399 | if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) |
6400 | return ARMISD::VTRN; |
6401 | if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) |
6402 | return ARMISD::VUZP; |
6403 | if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) |
6404 | return ARMISD::VZIP; |
6405 | |
6406 | return 0; |
6407 | } |
6408 | |
6409 | /// \return true if this is a reverse operation on an vector. |
6410 | static bool isReverseMask(ArrayRef<int> M, EVT VT) { |
6411 | unsigned NumElts = VT.getVectorNumElements(); |
6412 | // Make sure the mask has the right size. |
6413 | if (NumElts != M.size()) |
6414 | return false; |
6415 | |
6416 | // Look for <15, ..., 3, -1, 1, 0>. |
6417 | for (unsigned i = 0; i != NumElts; ++i) |
6418 | if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i)) |
6419 | return false; |
6420 | |
6421 | return true; |
6422 | } |
6423 | |
6424 | // If N is an integer constant that can be moved into a register in one |
6425 | // instruction, return an SDValue of such a constant (will become a MOV |
6426 | // instruction). Otherwise return null. |
6427 | static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, |
6428 | const ARMSubtarget *ST, const SDLoc &dl) { |
6429 | uint64_t Val; |
6430 | if (!isa<ConstantSDNode>(N)) |
6431 | return SDValue(); |
6432 | Val = cast<ConstantSDNode>(N)->getZExtValue(); |
6433 | |
6434 | if (ST->isThumb1Only()) { |
6435 | if (Val <= 255 || ~Val <= 255) |
6436 | return DAG.getConstant(Val, dl, MVT::i32); |
6437 | } else { |
6438 | if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) |
6439 | return DAG.getConstant(Val, dl, MVT::i32); |
6440 | } |
6441 | return SDValue(); |
6442 | } |
6443 | |
6444 | // If this is a case we can't handle, return null and let the default |
6445 | // expansion code take care of it. |
6446 | SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, |
6447 | const ARMSubtarget *ST) const { |
6448 | BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); |
6449 | SDLoc dl(Op); |
6450 | EVT VT = Op.getValueType(); |
6451 | |
6452 | APInt SplatBits, SplatUndef; |
6453 | unsigned SplatBitSize; |
6454 | bool HasAnyUndefs; |
6455 | if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
6456 | if (SplatUndef.isAllOnesValue()) |
6457 | return DAG.getUNDEF(VT); |
6458 | |
6459 | if (SplatBitSize <= 64) { |
6460 | // Check if an immediate VMOV works. |
6461 | EVT VmovVT; |
6462 | SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), |
6463 | SplatUndef.getZExtValue(), SplatBitSize, |
6464 | DAG, dl, VmovVT, VT.is128BitVector(), |
6465 | VMOVModImm); |
6466 | if (Val.getNode()) { |
6467 | SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); |
6468 | return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); |
6469 | } |
6470 | |
6471 | // Try an immediate VMVN. |
6472 | uint64_t NegatedImm = (~SplatBits).getZExtValue(); |
6473 | Val = isNEONModifiedImm(NegatedImm, |
6474 | SplatUndef.getZExtValue(), SplatBitSize, |
6475 | DAG, dl, VmovVT, VT.is128BitVector(), |
6476 | VMVNModImm); |
6477 | if (Val.getNode()) { |
6478 | SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); |
6479 | return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); |
6480 | } |
6481 | |
6482 | // Use vmov.f32 to materialize other v2f32 and v4f32 splats. |
6483 | if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { |
6484 | int ImmVal = ARM_AM::getFP32Imm(SplatBits); |
6485 | if (ImmVal != -1) { |
6486 | SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32); |
6487 | return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val); |
6488 | } |
6489 | } |
6490 | } |
6491 | } |
6492 | |
6493 | // Scan through the operands to see if only one value is used. |
6494 | // |
6495 | // As an optimisation, even if more than one value is used it may be more |
6496 | // profitable to splat with one value then change some lanes. |
6497 | // |
6498 | // Heuristically we decide to do this if the vector has a "dominant" value, |
6499 | // defined as splatted to more than half of the lanes. |
6500 | unsigned NumElts = VT.getVectorNumElements(); |
6501 | bool isOnlyLowElement = true; |
6502 | bool usesOnlyOneValue = true; |
6503 | bool hasDominantValue = false; |
6504 | bool isConstant = true; |
6505 | |
6506 | // Map of the number of times a particular SDValue appears in the |
6507 | // element list. |
6508 | DenseMap<SDValue, unsigned> ValueCounts; |
6509 | SDValue Value; |
6510 | for (unsigned i = 0; i < NumElts; ++i) { |
6511 | SDValue V = Op.getOperand(i); |
6512 | if (V.isUndef()) |
6513 | continue; |
6514 | if (i > 0) |
6515 | isOnlyLowElement = false; |
6516 | if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) |
6517 | isConstant = false; |
6518 | |
6519 | ValueCounts.insert(std::make_pair(V, 0)); |
6520 | unsigned &Count = ValueCounts[V]; |
6521 | |
6522 | // Is this value dominant? (takes up more than half of the lanes) |
6523 | if (++Count > (NumElts / 2)) { |
6524 | hasDominantValue = true; |
6525 | Value = V; |
6526 | } |
6527 | } |
6528 | if (ValueCounts.size() != 1) |
6529 | usesOnlyOneValue = false; |
6530 | if (!Value.getNode() && !ValueCounts.empty()) |
6531 | Value = ValueCounts.begin()->first; |
6532 | |
6533 | if (ValueCounts.empty()) |
6534 | return DAG.getUNDEF(VT); |
6535 | |
6536 | // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR. |
6537 | // Keep going if we are hitting this case. |
6538 | if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) |
6539 | return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); |
6540 | |
6541 | unsigned EltSize = VT.getScalarSizeInBits(); |
6542 | |
6543 | // Use VDUP for non-constant splats. For f32 constant splats, reduce to |
6544 | // i32 and try again. |
6545 | if (hasDominantValue && EltSize <= 32) { |
6546 | if (!isConstant) { |
6547 | SDValue N; |
6548 | |
6549 | // If we are VDUPing a value that comes directly from a vector, that will |
6550 | // cause an unnecessary move to and from a GPR, where instead we could |
6551 | // just use VDUPLANE. We can only do this if the lane being extracted |
6552 | // is at a constant index, as the VDUP from lane instructions only have |
6553 | // constant-index forms. |
6554 | ConstantSDNode *constIndex; |
6555 | if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
6556 | (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) { |
6557 | // We need to create a new undef vector to use for the VDUPLANE if the |
6558 | // size of the vector from which we get the value is different than the |
6559 | // size of the vector that we need to create. We will insert the element |
6560 | // such that the register coalescer will remove unnecessary copies. |
6561 | if (VT != Value->getOperand(0).getValueType()) { |
6562 | unsigned index = constIndex->getAPIntValue().getLimitedValue() % |
6563 | VT.getVectorNumElements(); |
6564 | N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, |
6565 | DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT), |
6566 | Value, DAG.getConstant(index, dl, MVT::i32)), |
6567 | DAG.getConstant(index, dl, MVT::i32)); |
6568 | } else |
6569 | N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, |
6570 | Value->getOperand(0), Value->getOperand(1)); |
6571 | } else |
6572 | N = DAG.getNode(ARMISD::VDUP, dl, VT, Value); |
6573 | |
6574 | if (!usesOnlyOneValue) { |
6575 | // The dominant value was splatted as 'N', but we now have to insert |
6576 | // all differing elements. |
6577 | for (unsigned I = 0; I < NumElts; ++I) { |
6578 | if (Op.getOperand(I) == Value) |
6579 | continue; |
6580 | SmallVector<SDValue, 3> Ops; |
6581 | Ops.push_back(N); |
6582 | Ops.push_back(Op.getOperand(I)); |
6583 | Ops.push_back(DAG.getConstant(I, dl, MVT::i32)); |
6584 | N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops); |
6585 | } |
6586 | } |
6587 | return N; |
6588 | } |
6589 | if (VT.getVectorElementType().isFloatingPoint()) { |
6590 | SmallVector<SDValue, 8> Ops; |
6591 | for (unsigned i = 0; i < NumElts; ++i) |
6592 | Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, |
6593 | Op.getOperand(i))); |
6594 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); |
6595 | SDValue Val = DAG.getBuildVector(VecVT, dl, Ops); |
6596 | Val = LowerBUILD_VECTOR(Val, DAG, ST); |
6597 | if (Val.getNode()) |
6598 | return DAG.getNode(ISD::BITCAST, dl, VT, Val); |
6599 | } |
6600 | if (usesOnlyOneValue) { |
6601 | SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); |
6602 | if (isConstant && Val.getNode()) |
6603 | return DAG.getNode(ARMISD::VDUP, dl, VT, Val); |
6604 | } |
6605 | } |
6606 | |
6607 | // If all elements are constants and the case above didn't get hit, fall back |
6608 | // to the default expansion, which will generate a load from the constant |
6609 | // pool. |
6610 | if (isConstant) |
6611 | return SDValue(); |
6612 | |
6613 | // Empirical tests suggest this is rarely worth it for vectors of length <= 2. |
6614 | if (NumElts >= 4) { |
6615 | SDValue shuffle = ReconstructShuffle(Op, DAG); |
6616 | if (shuffle != SDValue()) |
6617 | return shuffle; |
6618 | } |
6619 | |
6620 | if (VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) { |
6621 | // If we haven't found an efficient lowering, try splitting a 128-bit vector |
6622 | // into two 64-bit vectors; we might discover a better way to lower it. |
6623 | SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts); |
6624 | EVT ExtVT = VT.getVectorElementType(); |
6625 | EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElts / 2); |
6626 | SDValue Lower = |
6627 | DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElts / 2)); |
6628 | if (Lower.getOpcode() == ISD::BUILD_VECTOR) |
6629 | Lower = LowerBUILD_VECTOR(Lower, DAG, ST); |
6630 | SDValue Upper = DAG.getBuildVector( |
6631 | HVT, dl, makeArrayRef(&Ops[NumElts / 2], NumElts / 2)); |
6632 | if (Upper.getOpcode() == ISD::BUILD_VECTOR) |
6633 | Upper = LowerBUILD_VECTOR(Upper, DAG, ST); |
6634 | if (Lower && Upper) |
6635 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lower, Upper); |
6636 | } |
6637 | |
6638 | // Vectors with 32- or 64-bit elements can be built by directly assigning |
6639 | // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands |
6640 | // will be legalized. |
6641 | if (EltSize >= 32) { |
6642 | // Do the expansion with floating-point types, since that is what the VFP |
6643 | // registers are defined to use, and since i64 is not legal. |
6644 | EVT EltVT = EVT::getFloatingPointVT(EltSize); |
6645 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); |
6646 | SmallVector<SDValue, 8> Ops; |
6647 | for (unsigned i = 0; i < NumElts; ++i) |
6648 | Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); |
6649 | SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); |
6650 | return DAG.getNode(ISD::BITCAST, dl, VT, Val); |
6651 | } |
6652 | |
6653 | // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we |
6654 | // know the default expansion would otherwise fall back on something even |
6655 | // worse. For a vector with one or two non-undef values, that's |
6656 | // scalar_to_vector for the elements followed by a shuffle (provided the |
6657 | // shuffle is valid for the target) and materialization element by element |
6658 | // on the stack followed by a load for everything else. |
6659 | if (!isConstant && !usesOnlyOneValue) { |
6660 | SDValue Vec = DAG.getUNDEF(VT); |
6661 | for (unsigned i = 0 ; i < NumElts; ++i) { |
6662 | SDValue V = Op.getOperand(i); |
6663 | if (V.isUndef()) |
6664 | continue; |
6665 | SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32); |
6666 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); |
6667 | } |
6668 | return Vec; |
6669 | } |
6670 | |
6671 | return SDValue(); |
6672 | } |
6673 | |
6674 | // Gather data to see if the operation can be modelled as a |
6675 | // shuffle in combination with VEXTs. |
6676 | SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, |
6677 | SelectionDAG &DAG) const { |
6678 | assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!")(static_cast <bool> (Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!") ? void (0) : __assert_fail ("Op.getOpcode() == ISD::BUILD_VECTOR && \"Unknown opcode!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 6678, __extension__ __PRETTY_FUNCTION__)); |
6679 | SDLoc dl(Op); |
6680 | EVT VT = Op.getValueType(); |
6681 | unsigned NumElts = VT.getVectorNumElements(); |
6682 | |
6683 | struct ShuffleSourceInfo { |
6684 | SDValue Vec; |
6685 | unsigned MinElt = std::numeric_limits<unsigned>::max(); |
6686 | unsigned MaxElt = 0; |
6687 | |
6688 | // We may insert some combination of BITCASTs and VEXT nodes to force Vec to |
6689 | // be compatible with the shuffle we intend to construct. As a result |
6690 | // ShuffleVec will be some sliding window into the original Vec. |
6691 | SDValue ShuffleVec; |
6692 | |
6693 | // Code should guarantee that element i in Vec starts at element "WindowBase |
6694 | // + i * WindowScale in ShuffleVec". |
6695 | int WindowBase = 0; |
6696 | int WindowScale = 1; |
6697 | |
6698 | ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {} |
6699 | |
6700 | bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } |
6701 | }; |
6702 | |
6703 | // First gather all vectors used as an immediate source for this BUILD_VECTOR |
6704 | // node. |
6705 | SmallVector<ShuffleSourceInfo, 2> Sources; |
6706 | for (unsigned i = 0; i < NumElts; ++i) { |
6707 | SDValue V = Op.getOperand(i); |
6708 | if (V.isUndef()) |
6709 | continue; |
6710 | else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { |
6711 | // A shuffle can only come from building a vector from various |
6712 | // elements of other vectors. |
6713 | return SDValue(); |
6714 | } else if (!isa<ConstantSDNode>(V.getOperand(1))) { |
6715 | // Furthermore, shuffles require a constant mask, whereas extractelts |
6716 | // accept variable indices. |
6717 | return SDValue(); |
6718 | } |
6719 | |
6720 | // Add this element source to the list if it's not already there. |
6721 | SDValue SourceVec = V.getOperand(0); |
6722 | auto Source = llvm::find(Sources, SourceVec); |
6723 | if (Source == Sources.end()) |
6724 | Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec)); |
6725 | |
6726 | // Update the minimum and maximum lane number seen. |
6727 | unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); |
6728 | Source->MinElt = std::min(Source->MinElt, EltNo); |
6729 | Source->MaxElt = std::max(Source->MaxElt, EltNo); |
6730 | } |
6731 | |
6732 | // Currently only do something sane when at most two source vectors |
6733 | // are involved. |
6734 | if (Sources.size() > 2) |
6735 | return SDValue(); |
6736 | |
6737 | // Find out the smallest element size among result and two sources, and use |
6738 | // it as element size to build the shuffle_vector. |
6739 | EVT SmallestEltTy = VT.getVectorElementType(); |
6740 | for (auto &Source : Sources) { |
6741 | EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); |
6742 | if (SrcEltTy.bitsLT(SmallestEltTy)) |
6743 | SmallestEltTy = SrcEltTy; |
6744 | } |
6745 | unsigned ResMultiplier = |
6746 | VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits(); |
6747 | NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); |
6748 | EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts); |
6749 | |
6750 | // If the source vector is too wide or too narrow, we may nevertheless be able |
6751 | // to construct a compatible shuffle either by concatenating it with UNDEF or |
6752 | // extracting a suitable range of elements. |
6753 | for (auto &Src : Sources) { |
6754 | EVT SrcVT = Src.ShuffleVec.getValueType(); |
6755 | |
6756 | if (SrcVT.getSizeInBits() == VT.getSizeInBits()) |
6757 | continue; |
6758 | |
6759 | // This stage of the search produces a source with the same element type as |
6760 | // the original, but with a total width matching the BUILD_VECTOR output. |
6761 | EVT EltVT = SrcVT.getVectorElementType(); |
6762 | unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits(); |
6763 | EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); |
6764 | |
6765 | if (SrcVT.getSizeInBits() < VT.getSizeInBits()) { |
6766 | if (2 * SrcVT.getSizeInBits() != VT.getSizeInBits()) |
6767 | return SDValue(); |
6768 | // We can pad out the smaller vector for free, so if it's part of a |
6769 | // shuffle... |
6770 | Src.ShuffleVec = |
6771 | DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec, |
6772 | DAG.getUNDEF(Src.ShuffleVec.getValueType())); |
6773 | continue; |
6774 | } |
6775 | |
6776 | if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits()) |
6777 | return SDValue(); |
6778 | |
6779 | if (Src.MaxElt - Src.MinElt >= NumSrcElts) { |
6780 | // Span too large for a VEXT to cope |
6781 | return SDValue(); |
6782 | } |
6783 | |
6784 | if (Src.MinElt >= NumSrcElts) { |
6785 | // The extraction can just take the second half |
6786 | Src.ShuffleVec = |
6787 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, |
6788 | DAG.getConstant(NumSrcElts, dl, MVT::i32)); |
6789 | Src.WindowBase = -NumSrcElts; |
6790 | } else if (Src.MaxElt < NumSrcElts) { |
6791 | // The extraction can just take the first half |
6792 | Src.ShuffleVec = |
6793 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, |
6794 | DAG.getConstant(0, dl, MVT::i32)); |
6795 | } else { |
6796 | // An actual VEXT is needed |
6797 | SDValue VEXTSrc1 = |
6798 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, |
6799 | DAG.getConstant(0, dl, MVT::i32)); |
6800 | SDValue VEXTSrc2 = |
6801 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, |
6802 | DAG.getConstant(NumSrcElts, dl, MVT::i32)); |
6803 | |
6804 | Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1, |
6805 | VEXTSrc2, |
6806 | DAG.getConstant(Src.MinElt, dl, MVT::i32)); |
6807 | Src.WindowBase = -Src.MinElt; |
6808 | } |
6809 | } |
6810 | |
6811 | // Another possible incompatibility occurs from the vector element types. We |
6812 | // can fix this by bitcasting the source vectors to the same type we intend |
6813 | // for the shuffle. |
6814 | for (auto &Src : Sources) { |
6815 | EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); |
6816 | if (SrcEltTy == SmallestEltTy) |
6817 | continue; |
6818 | assert(ShuffleVT.getVectorElementType() == SmallestEltTy)(static_cast <bool> (ShuffleVT.getVectorElementType() == SmallestEltTy) ? void (0) : __assert_fail ("ShuffleVT.getVectorElementType() == SmallestEltTy" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 6818, __extension__ __PRETTY_FUNCTION__)); |
6819 | Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec); |
6820 | Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits(); |
6821 | Src.WindowBase *= Src.WindowScale; |
6822 | } |
6823 | |
6824 | // Final sanity check before we try to actually produce a shuffle. |
6825 | DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("arm-isel")) { for (auto Src : Sources) (static_cast <bool > (Src.ShuffleVec.getValueType() == ShuffleVT) ? void (0) : __assert_fail ("Src.ShuffleVec.getValueType() == ShuffleVT", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 6827, __extension__ __PRETTY_FUNCTION__));; } } while (false ) |
6826 | for (auto Src : Sources)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("arm-isel")) { for (auto Src : Sources) (static_cast <bool > (Src.ShuffleVec.getValueType() == ShuffleVT) ? void (0) : __assert_fail ("Src.ShuffleVec.getValueType() == ShuffleVT", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 6827, __extension__ __PRETTY_FUNCTION__));; } } while (false ) |
6827 | assert(Src.ShuffleVec.getValueType() == ShuffleVT);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("arm-isel")) { for (auto Src : Sources) (static_cast <bool > (Src.ShuffleVec.getValueType() == ShuffleVT) ? void (0) : __assert_fail ("Src.ShuffleVec.getValueType() == ShuffleVT", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 6827, __extension__ __PRETTY_FUNCTION__));; } } while (false ) |
6828 | )do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("arm-isel")) { for (auto Src : Sources) (static_cast <bool > (Src.ShuffleVec.getValueType() == ShuffleVT) ? void (0) : __assert_fail ("Src.ShuffleVec.getValueType() == ShuffleVT", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 6827, __extension__ __PRETTY_FUNCTION__));; } } while (false ); |
6829 | |
6830 | // The stars all align, our next step is to produce the mask for the shuffle. |
6831 | SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); |
6832 | int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); |
6833 | for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { |
6834 | SDValue Entry = Op.getOperand(i); |
6835 | if (Entry.isUndef()) |
6836 | continue; |
6837 | |
6838 | auto Src = llvm::find(Sources, Entry.getOperand(0)); |
6839 | int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue(); |
6840 | |
6841 | // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit |
6842 | // trunc. So only std::min(SrcBits, DestBits) actually get defined in this |
6843 | // segment. |
6844 | EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType(); |
6845 | int BitsDefined = std::min(OrigEltTy.getSizeInBits(), |
6846 | VT.getScalarSizeInBits()); |
6847 | int LanesDefined = BitsDefined / BitsPerShuffleLane; |
6848 | |
6849 | // This source is expected to fill ResMultiplier lanes of the final shuffle, |
6850 | // starting at the appropriate offset. |
6851 | int *LaneMask = &Mask[i * ResMultiplier]; |
6852 | |
6853 | int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase; |
6854 | ExtractBase += NumElts * (Src - Sources.begin()); |
6855 | for (int j = 0; j < LanesDefined; ++j) |
6856 | LaneMask[j] = ExtractBase + j; |
6857 | } |
6858 | |
6859 | // Final check before we try to produce nonsense... |
6860 | if (!isShuffleMaskLegal(Mask, ShuffleVT)) |
6861 | return SDValue(); |
6862 | |
6863 | // We can't handle more than two sources. This should have already |
6864 | // been checked before this point. |
6865 | assert(Sources.size() <= 2 && "Too many sources!")(static_cast <bool> (Sources.size() <= 2 && "Too many sources!" ) ? void (0) : __assert_fail ("Sources.size() <= 2 && \"Too many sources!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 6865, __extension__ __PRETTY_FUNCTION__)); |
6866 | |
6867 | SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) }; |
6868 | for (unsigned i = 0; i < Sources.size(); ++i) |
6869 | ShuffleOps[i] = Sources[i].ShuffleVec; |
6870 | |
6871 | SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0], |
6872 | ShuffleOps[1], Mask); |
6873 | return DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); |
6874 | } |
6875 | |
6876 | /// isShuffleMaskLegal - Targets can use this to indicate that they only |
6877 | /// support *some* VECTOR_SHUFFLE operations, those with specific masks. |
6878 | /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values |
6879 | /// are assumed to be legal. |
6880 | bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { |
6881 | if (VT.getVectorNumElements() == 4 && |
6882 | (VT.is128BitVector() || VT.is64BitVector())) { |
6883 | unsigned PFIndexes[4]; |
6884 | for (unsigned i = 0; i != 4; ++i) { |
6885 | if (M[i] < 0) |
6886 | PFIndexes[i] = 8; |
6887 | else |
6888 | PFIndexes[i] = M[i]; |
6889 | } |
6890 | |
6891 | // Compute the index in the perfect shuffle table. |
6892 | unsigned PFTableIndex = |
6893 | PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; |
6894 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; |
6895 | unsigned Cost = (PFEntry >> 30); |
6896 | |
6897 | if (Cost <= 4) |
6898 | return true; |
6899 | } |
6900 | |
6901 | bool ReverseVEXT, isV_UNDEF; |
6902 | unsigned Imm, WhichResult; |
6903 | |
6904 | unsigned EltSize = VT.getScalarSizeInBits(); |
6905 | return (EltSize >= 32 || |
6906 | ShuffleVectorSDNode::isSplatMask(&M[0], VT) || |
6907 | isVREVMask(M, VT, 64) || |
6908 | isVREVMask(M, VT, 32) || |
6909 | isVREVMask(M, VT, 16) || |
6910 | isVEXTMask(M, VT, ReverseVEXT, Imm) || |
6911 | isVTBLMask(M, VT) || |
6912 | isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF) || |
6913 | ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(M, VT))); |
6914 | } |
6915 | |
6916 | /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit |
6917 | /// the specified operations to build the shuffle. |
6918 | static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, |
6919 | SDValue RHS, SelectionDAG &DAG, |
6920 | const SDLoc &dl) { |
6921 | unsigned OpNum = (PFEntry >> 26) & 0x0F; |
6922 | unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); |
6923 | unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); |
6924 | |
6925 | enum { |
6926 | OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> |
6927 | OP_VREV, |
6928 | OP_VDUP0, |
6929 | OP_VDUP1, |
6930 | OP_VDUP2, |
6931 | OP_VDUP3, |
6932 | OP_VEXT1, |
6933 | OP_VEXT2, |
6934 | OP_VEXT3, |
6935 | OP_VUZPL, // VUZP, left result |
6936 | OP_VUZPR, // VUZP, right result |
6937 | OP_VZIPL, // VZIP, left result |
6938 | OP_VZIPR, // VZIP, right result |
6939 | OP_VTRNL, // VTRN, left result |
6940 | OP_VTRNR // VTRN, right result |
6941 | }; |
6942 | |
6943 | if (OpNum == OP_COPY) { |
6944 | if (LHSID == (1*9+2)*9+3) return LHS; |
6945 | assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!")(static_cast <bool> (LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!") ? void (0) : __assert_fail ("LHSID == ((4*9+5)*9+6)*9+7 && \"Illegal OP_COPY!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 6945, __extension__ __PRETTY_FUNCTION__)); |
6946 | return RHS; |
6947 | } |
6948 | |
6949 | SDValue OpLHS, OpRHS; |
6950 | OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); |
6951 | OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); |
6952 | EVT VT = OpLHS.getValueType(); |
6953 | |
6954 | switch (OpNum) { |
6955 | default: llvm_unreachable("Unknown shuffle opcode!")::llvm::llvm_unreachable_internal("Unknown shuffle opcode!", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 6955); |
6956 | case OP_VREV: |
6957 | // VREV divides the vector in half and swaps within the half. |
6958 | if (VT.getVectorElementType() == MVT::i32 || |
6959 | VT.getVectorElementType() == MVT::f32) |
6960 | return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); |
6961 | // vrev <4 x i16> -> VREV32 |
6962 | if (VT.getVectorElementType() == MVT::i16) |
6963 | return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); |
6964 | // vrev <4 x i8> -> VREV16 |
6965 | assert(VT.getVectorElementType() == MVT::i8)(static_cast <bool> (VT.getVectorElementType() == MVT:: i8) ? void (0) : __assert_fail ("VT.getVectorElementType() == MVT::i8" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 6965, __extension__ __PRETTY_FUNCTION__)); |
6966 | return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); |
6967 | case OP_VDUP0: |
6968 | case OP_VDUP1: |
6969 | case OP_VDUP2: |
6970 | case OP_VDUP3: |
6971 | return DAG.getNode(ARMISD::VDUPLANE, dl, VT, |
6972 | OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32)); |
6973 | case OP_VEXT1: |
6974 | case OP_VEXT2: |
6975 | case OP_VEXT3: |
6976 | return DAG.getNode(ARMISD::VEXT, dl, VT, |
6977 | OpLHS, OpRHS, |
6978 | DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32)); |
6979 | case OP_VUZPL: |
6980 | case OP_VUZPR: |
6981 | return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), |
6982 | OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); |
6983 | case OP_VZIPL: |
6984 | case OP_VZIPR: |
6985 | return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), |
6986 | OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); |
6987 | case OP_VTRNL: |
6988 | case OP_VTRNR: |
6989 | return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), |
6990 | OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); |
6991 | } |
6992 | } |
6993 | |
6994 | static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, |
6995 | ArrayRef<int> ShuffleMask, |
6996 | SelectionDAG &DAG) { |
6997 | // Check to see if we can use the VTBL instruction. |
6998 | SDValue V1 = Op.getOperand(0); |
6999 | SDValue V2 = Op.getOperand(1); |
7000 | SDLoc DL(Op); |
7001 | |
7002 | SmallVector<SDValue, 8> VTBLMask; |
7003 | for (ArrayRef<int>::iterator |
7004 | I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) |
7005 | VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32)); |
7006 | |
7007 | if (V2.getNode()->isUndef()) |
7008 | return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, |
7009 | DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); |
7010 | |
7011 | return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, |
7012 | DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); |
7013 | } |
7014 | |
7015 | static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, |
7016 | SelectionDAG &DAG) { |
7017 | SDLoc DL(Op); |
7018 | SDValue OpLHS = Op.getOperand(0); |
7019 | EVT VT = OpLHS.getValueType(); |
7020 | |
7021 | assert((VT == MVT::v8i16 || VT == MVT::v16i8) &&(static_cast <bool> ((VT == MVT::v8i16 || VT == MVT::v16i8 ) && "Expect an v8i16/v16i8 type") ? void (0) : __assert_fail ("(VT == MVT::v8i16 || VT == MVT::v16i8) && \"Expect an v8i16/v16i8 type\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7022, __extension__ __PRETTY_FUNCTION__)) |
7022 | "Expect an v8i16/v16i8 type")(static_cast <bool> ((VT == MVT::v8i16 || VT == MVT::v16i8 ) && "Expect an v8i16/v16i8 type") ? void (0) : __assert_fail ("(VT == MVT::v8i16 || VT == MVT::v16i8) && \"Expect an v8i16/v16i8 type\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7022, __extension__ __PRETTY_FUNCTION__)); |
7023 | OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS); |
7024 | // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now, |
7025 | // extract the first 8 bytes into the top double word and the last 8 bytes |
7026 | // into the bottom double word. The v8i16 case is similar. |
7027 | unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4; |
7028 | return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS, |
7029 | DAG.getConstant(ExtractNum, DL, MVT::i32)); |
7030 | } |
7031 | |
7032 | static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { |
7033 | SDValue V1 = Op.getOperand(0); |
7034 | SDValue V2 = Op.getOperand(1); |
7035 | SDLoc dl(Op); |
7036 | EVT VT = Op.getValueType(); |
7037 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); |
7038 | |
7039 | // Convert shuffles that are directly supported on NEON to target-specific |
7040 | // DAG nodes, instead of keeping them as shuffles and matching them again |
7041 | // during code selection. This is more efficient and avoids the possibility |
7042 | // of inconsistencies between legalization and selection. |
7043 | // FIXME: floating-point vectors should be canonicalized to integer vectors |
7044 | // of the same time so that they get CSEd properly. |
7045 | ArrayRef<int> ShuffleMask = SVN->getMask(); |
7046 | |
7047 | unsigned EltSize = VT.getScalarSizeInBits(); |
7048 | if (EltSize <= 32) { |
7049 | if (SVN->isSplat()) { |
7050 | int Lane = SVN->getSplatIndex(); |
7051 | // If this is undef splat, generate it via "just" vdup, if possible. |
7052 | if (Lane == -1) Lane = 0; |
7053 | |
7054 | // Test if V1 is a SCALAR_TO_VECTOR. |
7055 | if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { |
7056 | return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); |
7057 | } |
7058 | // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR |
7059 | // (and probably will turn into a SCALAR_TO_VECTOR once legalization |
7060 | // reaches it). |
7061 | if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && |
7062 | !isa<ConstantSDNode>(V1.getOperand(0))) { |
7063 | bool IsScalarToVector = true; |
7064 | for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) |
7065 | if (!V1.getOperand(i).isUndef()) { |
7066 | IsScalarToVector = false; |
7067 | break; |
7068 | } |
7069 | if (IsScalarToVector) |
7070 | return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); |
7071 | } |
7072 | return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, |
7073 | DAG.getConstant(Lane, dl, MVT::i32)); |
7074 | } |
7075 | |
7076 | bool ReverseVEXT; |
7077 | unsigned Imm; |
7078 | if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { |
7079 | if (ReverseVEXT) |
7080 | std::swap(V1, V2); |
7081 | return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, |
7082 | DAG.getConstant(Imm, dl, MVT::i32)); |
7083 | } |
7084 | |
7085 | if (isVREVMask(ShuffleMask, VT, 64)) |
7086 | return DAG.getNode(ARMISD::VREV64, dl, VT, V1); |
7087 | if (isVREVMask(ShuffleMask, VT, 32)) |
7088 | return DAG.getNode(ARMISD::VREV32, dl, VT, V1); |
7089 | if (isVREVMask(ShuffleMask, VT, 16)) |
7090 | return DAG.getNode(ARMISD::VREV16, dl, VT, V1); |
7091 | |
7092 | if (V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) { |
7093 | return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1, |
7094 | DAG.getConstant(Imm, dl, MVT::i32)); |
7095 | } |
7096 | |
7097 | // Check for Neon shuffles that modify both input vectors in place. |
7098 | // If both results are used, i.e., if there are two shuffles with the same |
7099 | // source operands and with masks corresponding to both results of one of |
7100 | // these operations, DAG memoization will ensure that a single node is |
7101 | // used for both shuffles. |
7102 | unsigned WhichResult; |
7103 | bool isV_UNDEF; |
7104 | if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( |
7105 | ShuffleMask, VT, WhichResult, isV_UNDEF)) { |
7106 | if (isV_UNDEF) |
7107 | V2 = V1; |
7108 | return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2) |
7109 | .getValue(WhichResult); |
7110 | } |
7111 | |
7112 | // Also check for these shuffles through CONCAT_VECTORS: we canonicalize |
7113 | // shuffles that produce a result larger than their operands with: |
7114 | // shuffle(concat(v1, undef), concat(v2, undef)) |
7115 | // -> |
7116 | // shuffle(concat(v1, v2), undef) |
7117 | // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine). |
7118 | // |
7119 | // This is useful in the general case, but there are special cases where |
7120 | // native shuffles produce larger results: the two-result ops. |
7121 | // |
7122 | // Look through the concat when lowering them: |
7123 | // shuffle(concat(v1, v2), undef) |
7124 | // -> |
7125 | // concat(VZIP(v1, v2):0, :1) |
7126 | // |
7127 | if (V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) { |
7128 | SDValue SubV1 = V1->getOperand(0); |
7129 | SDValue SubV2 = V1->getOperand(1); |
7130 | EVT SubVT = SubV1.getValueType(); |
7131 | |
7132 | // We expect these to have been canonicalized to -1. |
7133 | assert(llvm::all_of(ShuffleMask, [&](int i) {(static_cast <bool> (llvm::all_of(ShuffleMask, [&]( int i) { return i < (int)VT.getVectorNumElements(); }) && "Unexpected shuffle index into UNDEF operand!") ? void (0) : __assert_fail ("llvm::all_of(ShuffleMask, [&](int i) { return i < (int)VT.getVectorNumElements(); }) && \"Unexpected shuffle index into UNDEF operand!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7135, __extension__ __PRETTY_FUNCTION__)) |
7134 | return i < (int)VT.getVectorNumElements();(static_cast <bool> (llvm::all_of(ShuffleMask, [&]( int i) { return i < (int)VT.getVectorNumElements(); }) && "Unexpected shuffle index into UNDEF operand!") ? void (0) : __assert_fail ("llvm::all_of(ShuffleMask, [&](int i) { return i < (int)VT.getVectorNumElements(); }) && \"Unexpected shuffle index into UNDEF operand!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7135, __extension__ __PRETTY_FUNCTION__)) |
7135 | }) && "Unexpected shuffle index into UNDEF operand!")(static_cast <bool> (llvm::all_of(ShuffleMask, [&]( int i) { return i < (int)VT.getVectorNumElements(); }) && "Unexpected shuffle index into UNDEF operand!") ? void (0) : __assert_fail ("llvm::all_of(ShuffleMask, [&](int i) { return i < (int)VT.getVectorNumElements(); }) && \"Unexpected shuffle index into UNDEF operand!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7135, __extension__ __PRETTY_FUNCTION__)); |
7136 | |
7137 | if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( |
7138 | ShuffleMask, SubVT, WhichResult, isV_UNDEF)) { |
7139 | if (isV_UNDEF) |
7140 | SubV2 = SubV1; |
7141 | assert((WhichResult == 0) &&(static_cast <bool> ((WhichResult == 0) && "In-place shuffle of concat can only have one result!" ) ? void (0) : __assert_fail ("(WhichResult == 0) && \"In-place shuffle of concat can only have one result!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7142, __extension__ __PRETTY_FUNCTION__)) |
7142 | "In-place shuffle of concat can only have one result!")(static_cast <bool> ((WhichResult == 0) && "In-place shuffle of concat can only have one result!" ) ? void (0) : __assert_fail ("(WhichResult == 0) && \"In-place shuffle of concat can only have one result!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7142, __extension__ __PRETTY_FUNCTION__)); |
7143 | SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT), |
7144 | SubV1, SubV2); |
7145 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0), |
7146 | Res.getValue(1)); |
7147 | } |
7148 | } |
7149 | } |
7150 | |
7151 | // If the shuffle is not directly supported and it has 4 elements, use |
7152 | // the PerfectShuffle-generated table to synthesize it from other shuffles. |
7153 | unsigned NumElts = VT.getVectorNumElements(); |
7154 | if (NumElts == 4) { |
7155 | unsigned PFIndexes[4]; |
7156 | for (unsigned i = 0; i != 4; ++i) { |
7157 | if (ShuffleMask[i] < 0) |
7158 | PFIndexes[i] = 8; |
7159 | else |
7160 | PFIndexes[i] = ShuffleMask[i]; |
7161 | } |
7162 | |
7163 | // Compute the index in the perfect shuffle table. |
7164 | unsigned PFTableIndex = |
7165 | PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; |
7166 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; |
7167 | unsigned Cost = (PFEntry >> 30); |
7168 | |
7169 | if (Cost <= 4) |
7170 | return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); |
7171 | } |
7172 | |
7173 | // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. |
7174 | if (EltSize >= 32) { |
7175 | // Do the expansion with floating-point types, since that is what the VFP |
7176 | // registers are defined to use, and since i64 is not legal. |
7177 | EVT EltVT = EVT::getFloatingPointVT(EltSize); |
7178 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); |
7179 | V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); |
7180 | V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); |
7181 | SmallVector<SDValue, 8> Ops; |
7182 | for (unsigned i = 0; i < NumElts; ++i) { |
7183 | if (ShuffleMask[i] < 0) |
7184 | Ops.push_back(DAG.getUNDEF(EltVT)); |
7185 | else |
7186 | Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, |
7187 | ShuffleMask[i] < (int)NumElts ? V1 : V2, |
7188 | DAG.getConstant(ShuffleMask[i] & (NumElts-1), |
7189 | dl, MVT::i32))); |
7190 | } |
7191 | SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); |
7192 | return DAG.getNode(ISD::BITCAST, dl, VT, Val); |
7193 | } |
7194 | |
7195 | if ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT)) |
7196 | return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG); |
7197 | |
7198 | if (VT == MVT::v8i8) |
7199 | if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG)) |
7200 | return NewOp; |
7201 | |
7202 | return SDValue(); |
7203 | } |
7204 | |
7205 | static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { |
7206 | // INSERT_VECTOR_ELT is legal only for immediate indexes. |
7207 | SDValue Lane = Op.getOperand(2); |
7208 | if (!isa<ConstantSDNode>(Lane)) |
7209 | return SDValue(); |
7210 | |
7211 | return Op; |
7212 | } |
7213 | |
7214 | static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { |
7215 | // EXTRACT_VECTOR_ELT is legal only for immediate indexes. |
7216 | SDValue Lane = Op.getOperand(1); |
7217 | if (!isa<ConstantSDNode>(Lane)) |
7218 | return SDValue(); |
7219 | |
7220 | SDValue Vec = Op.getOperand(0); |
7221 | if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) { |
7222 | SDLoc dl(Op); |
7223 | return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); |
7224 | } |
7225 | |
7226 | return Op; |
7227 | } |
7228 | |
7229 | static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { |
7230 | // The only time a CONCAT_VECTORS operation can have legal types is when |
7231 | // two 64-bit vectors are concatenated to a 128-bit vector. |
7232 | assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&(static_cast <bool> (Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && "unexpected CONCAT_VECTORS" ) ? void (0) : __assert_fail ("Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && \"unexpected CONCAT_VECTORS\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7233, __extension__ __PRETTY_FUNCTION__)) |
7233 | "unexpected CONCAT_VECTORS")(static_cast <bool> (Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && "unexpected CONCAT_VECTORS" ) ? void (0) : __assert_fail ("Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && \"unexpected CONCAT_VECTORS\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7233, __extension__ __PRETTY_FUNCTION__)); |
7234 | SDLoc dl(Op); |
7235 | SDValue Val = DAG.getUNDEF(MVT::v2f64); |
7236 | SDValue Op0 = Op.getOperand(0); |
7237 | SDValue Op1 = Op.getOperand(1); |
7238 | if (!Op0.isUndef()) |
7239 | Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, |
7240 | DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), |
7241 | DAG.getIntPtrConstant(0, dl)); |
7242 | if (!Op1.isUndef()) |
7243 | Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, |
7244 | DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), |
7245 | DAG.getIntPtrConstant(1, dl)); |
7246 | return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); |
7247 | } |
7248 | |
7249 | /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each |
7250 | /// element has been zero/sign-extended, depending on the isSigned parameter, |
7251 | /// from an integer type half its size. |
7252 | static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, |
7253 | bool isSigned) { |
7254 | // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. |
7255 | EVT VT = N->getValueType(0); |
7256 | if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { |
7257 | SDNode *BVN = N->getOperand(0).getNode(); |
7258 | if (BVN->getValueType(0) != MVT::v4i32 || |
7259 | BVN->getOpcode() != ISD::BUILD_VECTOR) |
7260 | return false; |
7261 | unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; |
7262 | unsigned HiElt = 1 - LoElt; |
7263 | ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); |
7264 | ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); |
7265 | ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); |
7266 | ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); |
7267 | if (!Lo0 || !Hi0 || !Lo1 || !Hi1) |
7268 | return false; |
7269 | if (isSigned) { |
7270 | if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && |
7271 | Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) |
7272 | return true; |
7273 | } else { |
7274 | if (Hi0->isNullValue() && Hi1->isNullValue()) |
7275 | return true; |
7276 | } |
7277 | return false; |
7278 | } |
7279 | |
7280 | if (N->getOpcode() != ISD::BUILD_VECTOR) |
7281 | return false; |
7282 | |
7283 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
7284 | SDNode *Elt = N->getOperand(i).getNode(); |
7285 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { |
7286 | unsigned EltSize = VT.getScalarSizeInBits(); |
7287 | unsigned HalfSize = EltSize / 2; |
7288 | if (isSigned) { |
7289 | if (!isIntN(HalfSize, C->getSExtValue())) |
7290 | return false; |
7291 | } else { |
7292 | if (!isUIntN(HalfSize, C->getZExtValue())) |
7293 | return false; |
7294 | } |
7295 | continue; |
7296 | } |
7297 | return false; |
7298 | } |
7299 | |
7300 | return true; |
7301 | } |
7302 | |
7303 | /// isSignExtended - Check if a node is a vector value that is sign-extended |
7304 | /// or a constant BUILD_VECTOR with sign-extended elements. |
7305 | static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { |
7306 | if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) |
7307 | return true; |
7308 | if (isExtendedBUILD_VECTOR(N, DAG, true)) |
7309 | return true; |
7310 | return false; |
7311 | } |
7312 | |
7313 | /// isZeroExtended - Check if a node is a vector value that is zero-extended |
7314 | /// or a constant BUILD_VECTOR with zero-extended elements. |
7315 | static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { |
7316 | if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) |
7317 | return true; |
7318 | if (isExtendedBUILD_VECTOR(N, DAG, false)) |
7319 | return true; |
7320 | return false; |
7321 | } |
7322 | |
7323 | static EVT getExtensionTo64Bits(const EVT &OrigVT) { |
7324 | if (OrigVT.getSizeInBits() >= 64) |
7325 | return OrigVT; |
7326 | |
7327 | assert(OrigVT.isSimple() && "Expecting a simple value type")(static_cast <bool> (OrigVT.isSimple() && "Expecting a simple value type" ) ? void (0) : __assert_fail ("OrigVT.isSimple() && \"Expecting a simple value type\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7327, __extension__ __PRETTY_FUNCTION__)); |
7328 | |
7329 | MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; |
7330 | switch (OrigSimpleTy) { |
7331 | default: llvm_unreachable("Unexpected Vector Type")::llvm::llvm_unreachable_internal("Unexpected Vector Type", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7331); |
7332 | case MVT::v2i8: |
7333 | case MVT::v2i16: |
7334 | return MVT::v2i32; |
7335 | case MVT::v4i8: |
7336 | return MVT::v4i16; |
7337 | } |
7338 | } |
7339 | |
7340 | /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total |
7341 | /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL. |
7342 | /// We insert the required extension here to get the vector to fill a D register. |
7343 | static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, |
7344 | const EVT &OrigTy, |
7345 | const EVT &ExtTy, |
7346 | unsigned ExtOpcode) { |
7347 | // The vector originally had a size of OrigTy. It was then extended to ExtTy. |
7348 | // We expect the ExtTy to be 128-bits total. If the OrigTy is less than |
7349 | // 64-bits we need to insert a new extension so that it will be 64-bits. |
7350 | assert(ExtTy.is128BitVector() && "Unexpected extension size")(static_cast <bool> (ExtTy.is128BitVector() && "Unexpected extension size" ) ? void (0) : __assert_fail ("ExtTy.is128BitVector() && \"Unexpected extension size\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7350, __extension__ __PRETTY_FUNCTION__)); |
7351 | if (OrigTy.getSizeInBits() >= 64) |
7352 | return N; |
7353 | |
7354 | // Must extend size to at least 64 bits to be used as an operand for VMULL. |
7355 | EVT NewVT = getExtensionTo64Bits(OrigTy); |
7356 | |
7357 | return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N); |
7358 | } |
7359 | |
7360 | /// SkipLoadExtensionForVMULL - return a load of the original vector size that |
7361 | /// does not do any sign/zero extension. If the original vector is less |
7362 | /// than 64 bits, an appropriate extension will be added after the load to |
7363 | /// reach a total size of 64 bits. We have to add the extension separately |
7364 | /// because ARM does not have a sign/zero extending load for vectors. |
7365 | static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { |
7366 | EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT()); |
7367 | |
7368 | // The load already has the right type. |
7369 | if (ExtendedTy == LD->getMemoryVT()) |
7370 | return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(), |
7371 | LD->getBasePtr(), LD->getPointerInfo(), |
7372 | LD->getAlignment(), LD->getMemOperand()->getFlags()); |
7373 | |
7374 | // We need to create a zextload/sextload. We cannot just create a load |
7375 | // followed by a zext/zext node because LowerMUL is also run during normal |
7376 | // operation legalization where we can't create illegal types. |
7377 | return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy, |
7378 | LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(), |
7379 | LD->getMemoryVT(), LD->getAlignment(), |
7380 | LD->getMemOperand()->getFlags()); |
7381 | } |
7382 | |
7383 | /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, |
7384 | /// extending load, or BUILD_VECTOR with extended elements, return the |
7385 | /// unextended value. The unextended vector should be 64 bits so that it can |
7386 | /// be used as an operand to a VMULL instruction. If the original vector size |
7387 | /// before extension is less than 64 bits we add a an extension to resize |
7388 | /// the vector to 64 bits. |
7389 | static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { |
7390 | if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) |
7391 | return AddRequiredExtensionForVMULL(N->getOperand(0), DAG, |
7392 | N->getOperand(0)->getValueType(0), |
7393 | N->getValueType(0), |
7394 | N->getOpcode()); |
7395 | |
7396 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { |
7397 | assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) &&(static_cast <bool> ((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad (LD)) && "Expected extending load") ? void (0) : __assert_fail ("(ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) && \"Expected extending load\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7398, __extension__ __PRETTY_FUNCTION__)) |
7398 | "Expected extending load")(static_cast <bool> ((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad (LD)) && "Expected extending load") ? void (0) : __assert_fail ("(ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) && \"Expected extending load\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7398, __extension__ __PRETTY_FUNCTION__)); |
7399 | |
7400 | SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG); |
7401 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), newLoad.getValue(1)); |
7402 | unsigned Opcode = ISD::isSEXTLoad(LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; |
7403 | SDValue extLoad = |
7404 | DAG.getNode(Opcode, SDLoc(newLoad), LD->getValueType(0), newLoad); |
7405 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 0), extLoad); |
7406 | |
7407 | return newLoad; |
7408 | } |
7409 | |
7410 | // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will |
7411 | // have been legalized as a BITCAST from v4i32. |
7412 | if (N->getOpcode() == ISD::BITCAST) { |
7413 | SDNode *BVN = N->getOperand(0).getNode(); |
7414 | assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&(static_cast <bool> (BVN->getOpcode() == ISD::BUILD_VECTOR && BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR" ) ? void (0) : __assert_fail ("BVN->getOpcode() == ISD::BUILD_VECTOR && BVN->getValueType(0) == MVT::v4i32 && \"expected v4i32 BUILD_VECTOR\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7415, __extension__ __PRETTY_FUNCTION__)) |
7415 | BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR")(static_cast <bool> (BVN->getOpcode() == ISD::BUILD_VECTOR && BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR" ) ? void (0) : __assert_fail ("BVN->getOpcode() == ISD::BUILD_VECTOR && BVN->getValueType(0) == MVT::v4i32 && \"expected v4i32 BUILD_VECTOR\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7415, __extension__ __PRETTY_FUNCTION__)); |
7416 | unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; |
7417 | return DAG.getBuildVector( |
7418 | MVT::v2i32, SDLoc(N), |
7419 | {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)}); |
7420 | } |
7421 | // Construct a new BUILD_VECTOR with elements truncated to half the size. |
7422 | assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR")(static_cast <bool> (N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR") ? void (0) : __assert_fail ("N->getOpcode() == ISD::BUILD_VECTOR && \"expected BUILD_VECTOR\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7422, __extension__ __PRETTY_FUNCTION__)); |
7423 | EVT VT = N->getValueType(0); |
7424 | unsigned EltSize = VT.getScalarSizeInBits() / 2; |
7425 | unsigned NumElts = VT.getVectorNumElements(); |
7426 | MVT TruncVT = MVT::getIntegerVT(EltSize); |
7427 | SmallVector<SDValue, 8> Ops; |
7428 | SDLoc dl(N); |
7429 | for (unsigned i = 0; i != NumElts; ++i) { |
7430 | ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); |
7431 | const APInt &CInt = C->getAPIntValue(); |
7432 | // Element types smaller than 32 bits are not legal, so use i32 elements. |
7433 | // The values are implicitly truncated so sext vs. zext doesn't matter. |
7434 | Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32)); |
7435 | } |
7436 | return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops); |
7437 | } |
7438 | |
7439 | static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { |
7440 | unsigned Opcode = N->getOpcode(); |
7441 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { |
7442 | SDNode *N0 = N->getOperand(0).getNode(); |
7443 | SDNode *N1 = N->getOperand(1).getNode(); |
7444 | return N0->hasOneUse() && N1->hasOneUse() && |
7445 | isSignExtended(N0, DAG) && isSignExtended(N1, DAG); |
7446 | } |
7447 | return false; |
7448 | } |
7449 | |
7450 | static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { |
7451 | unsigned Opcode = N->getOpcode(); |
7452 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { |
7453 | SDNode *N0 = N->getOperand(0).getNode(); |
7454 | SDNode *N1 = N->getOperand(1).getNode(); |
7455 | return N0->hasOneUse() && N1->hasOneUse() && |
7456 | isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); |
7457 | } |
7458 | return false; |
7459 | } |
7460 | |
7461 | static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { |
7462 | // Multiplications are only custom-lowered for 128-bit vectors so that |
7463 | // VMULL can be detected. Otherwise v2i64 multiplications are not legal. |
7464 | EVT VT = Op.getValueType(); |
7465 | assert(VT.is128BitVector() && VT.isInteger() &&(static_cast <bool> (VT.is128BitVector() && VT. isInteger() && "unexpected type for custom-lowering ISD::MUL" ) ? void (0) : __assert_fail ("VT.is128BitVector() && VT.isInteger() && \"unexpected type for custom-lowering ISD::MUL\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7466, __extension__ __PRETTY_FUNCTION__)) |
7466 | "unexpected type for custom-lowering ISD::MUL")(static_cast <bool> (VT.is128BitVector() && VT. isInteger() && "unexpected type for custom-lowering ISD::MUL" ) ? void (0) : __assert_fail ("VT.is128BitVector() && VT.isInteger() && \"unexpected type for custom-lowering ISD::MUL\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7466, __extension__ __PRETTY_FUNCTION__)); |
7467 | SDNode *N0 = Op.getOperand(0).getNode(); |
7468 | SDNode *N1 = Op.getOperand(1).getNode(); |
7469 | unsigned NewOpc = 0; |
7470 | bool isMLA = false; |
7471 | bool isN0SExt = isSignExtended(N0, DAG); |
7472 | bool isN1SExt = isSignExtended(N1, DAG); |
7473 | if (isN0SExt && isN1SExt) |
7474 | NewOpc = ARMISD::VMULLs; |
7475 | else { |
7476 | bool isN0ZExt = isZeroExtended(N0, DAG); |
7477 | bool isN1ZExt = isZeroExtended(N1, DAG); |
7478 | if (isN0ZExt && isN1ZExt) |
7479 | NewOpc = ARMISD::VMULLu; |
7480 | else if (isN1SExt || isN1ZExt) { |
7481 | // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these |
7482 | // into (s/zext A * s/zext C) + (s/zext B * s/zext C) |
7483 | if (isN1SExt && isAddSubSExt(N0, DAG)) { |
7484 | NewOpc = ARMISD::VMULLs; |
7485 | isMLA = true; |
7486 | } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { |
7487 | NewOpc = ARMISD::VMULLu; |
7488 | isMLA = true; |
7489 | } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { |
7490 | std::swap(N0, N1); |
7491 | NewOpc = ARMISD::VMULLu; |
7492 | isMLA = true; |
7493 | } |
7494 | } |
7495 | |
7496 | if (!NewOpc) { |
7497 | if (VT == MVT::v2i64) |
7498 | // Fall through to expand this. It is not legal. |
7499 | return SDValue(); |
7500 | else |
7501 | // Other vector multiplications are legal. |
7502 | return Op; |
7503 | } |
7504 | } |
7505 | |
7506 | // Legalize to a VMULL instruction. |
7507 | SDLoc DL(Op); |
7508 | SDValue Op0; |
7509 | SDValue Op1 = SkipExtensionForVMULL(N1, DAG); |
7510 | if (!isMLA) { |
7511 | Op0 = SkipExtensionForVMULL(N0, DAG); |
7512 | assert(Op0.getValueType().is64BitVector() &&(static_cast <bool> (Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && "unexpected types for extended operands to VMULL" ) ? void (0) : __assert_fail ("Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && \"unexpected types for extended operands to VMULL\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7514, __extension__ __PRETTY_FUNCTION__)) |
7513 | Op1.getValueType().is64BitVector() &&(static_cast <bool> (Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && "unexpected types for extended operands to VMULL" ) ? void (0) : __assert_fail ("Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && \"unexpected types for extended operands to VMULL\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7514, __extension__ __PRETTY_FUNCTION__)) |
7514 | "unexpected types for extended operands to VMULL")(static_cast <bool> (Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && "unexpected types for extended operands to VMULL" ) ? void (0) : __assert_fail ("Op0.getValueType().is64BitVector() && Op1.getValueType().is64BitVector() && \"unexpected types for extended operands to VMULL\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7514, __extension__ __PRETTY_FUNCTION__)); |
7515 | return DAG.getNode(NewOpc, DL, VT, Op0, Op1); |
7516 | } |
7517 | |
7518 | // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during |
7519 | // isel lowering to take advantage of no-stall back to back vmul + vmla. |
7520 | // vmull q0, d4, d6 |
7521 | // vmlal q0, d5, d6 |
7522 | // is faster than |
7523 | // vaddl q0, d4, d5 |
7524 | // vmovl q1, d6 |
7525 | // vmul q0, q0, q1 |
7526 | SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG); |
7527 | SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG); |
7528 | EVT Op1VT = Op1.getValueType(); |
7529 | return DAG.getNode(N0->getOpcode(), DL, VT, |
7530 | DAG.getNode(NewOpc, DL, VT, |
7531 | DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), |
7532 | DAG.getNode(NewOpc, DL, VT, |
7533 | DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); |
7534 | } |
7535 | |
7536 | static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl, |
7537 | SelectionDAG &DAG) { |
7538 | // TODO: Should this propagate fast-math-flags? |
7539 | |
7540 | // Convert to float |
7541 | // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); |
7542 | // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); |
7543 | X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); |
7544 | Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); |
7545 | X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); |
7546 | Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); |
7547 | // Get reciprocal estimate. |
7548 | // float4 recip = vrecpeq_f32(yf); |
7549 | Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
7550 | DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), |
7551 | Y); |
7552 | // Because char has a smaller range than uchar, we can actually get away |
7553 | // without any newton steps. This requires that we use a weird bias |
7554 | // of 0xb000, however (again, this has been exhaustively tested). |
7555 | // float4 result = as_float4(as_int4(xf*recip) + 0xb000); |
7556 | X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); |
7557 | X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); |
7558 | Y = DAG.getConstant(0xb000, dl, MVT::v4i32); |
7559 | X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); |
7560 | X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); |
7561 | // Convert back to short. |
7562 | X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); |
7563 | X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); |
7564 | return X; |
7565 | } |
7566 | |
7567 | static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl, |
7568 | SelectionDAG &DAG) { |
7569 | // TODO: Should this propagate fast-math-flags? |
7570 | |
7571 | SDValue N2; |
7572 | // Convert to float. |
7573 | // float4 yf = vcvt_f32_s32(vmovl_s16(y)); |
7574 | // float4 xf = vcvt_f32_s32(vmovl_s16(x)); |
7575 | N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); |
7576 | N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); |
7577 | N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); |
7578 | N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); |
7579 | |
7580 | // Use reciprocal estimate and one refinement step. |
7581 | // float4 recip = vrecpeq_f32(yf); |
7582 | // recip *= vrecpsq_f32(yf, recip); |
7583 | N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
7584 | DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), |
7585 | N1); |
7586 | N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
7587 | DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), |
7588 | N1, N2); |
7589 | N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); |
7590 | // Because short has a smaller range than ushort, we can actually get away |
7591 | // with only a single newton step. This requires that we use a weird bias |
7592 | // of 89, however (again, this has been exhaustively tested). |
7593 | // float4 result = as_float4(as_int4(xf*recip) + 0x89); |
7594 | N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); |
7595 | N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); |
7596 | N1 = DAG.getConstant(0x89, dl, MVT::v4i32); |
7597 | N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); |
7598 | N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); |
7599 | // Convert back to integer and return. |
7600 | // return vmovn_s32(vcvt_s32_f32(result)); |
7601 | N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); |
7602 | N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); |
7603 | return N0; |
7604 | } |
7605 | |
7606 | static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { |
7607 | EVT VT = Op.getValueType(); |
7608 | assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&(static_cast <bool> ((VT == MVT::v4i16 || VT == MVT::v8i8 ) && "unexpected type for custom-lowering ISD::SDIV") ? void (0) : __assert_fail ("(VT == MVT::v4i16 || VT == MVT::v8i8) && \"unexpected type for custom-lowering ISD::SDIV\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7609, __extension__ __PRETTY_FUNCTION__)) |
7609 | "unexpected type for custom-lowering ISD::SDIV")(static_cast <bool> ((VT == MVT::v4i16 || VT == MVT::v8i8 ) && "unexpected type for custom-lowering ISD::SDIV") ? void (0) : __assert_fail ("(VT == MVT::v4i16 || VT == MVT::v8i8) && \"unexpected type for custom-lowering ISD::SDIV\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7609, __extension__ __PRETTY_FUNCTION__)); |
7610 | |
7611 | SDLoc dl(Op); |
7612 | SDValue N0 = Op.getOperand(0); |
7613 | SDValue N1 = Op.getOperand(1); |
7614 | SDValue N2, N3; |
7615 | |
7616 | if (VT == MVT::v8i8) { |
7617 | N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); |
7618 | N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); |
7619 | |
7620 | N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, |
7621 | DAG.getIntPtrConstant(4, dl)); |
7622 | N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, |
7623 | DAG.getIntPtrConstant(4, dl)); |
7624 | N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, |
7625 | DAG.getIntPtrConstant(0, dl)); |
7626 | N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, |
7627 | DAG.getIntPtrConstant(0, dl)); |
7628 | |
7629 | N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 |
7630 | N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 |
7631 | |
7632 | N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); |
7633 | N0 = LowerCONCAT_VECTORS(N0, DAG); |
7634 | |
7635 | N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); |
7636 | return N0; |
7637 | } |
7638 | return LowerSDIV_v4i16(N0, N1, dl, DAG); |
7639 | } |
7640 | |
7641 | static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { |
7642 | // TODO: Should this propagate fast-math-flags? |
7643 | EVT VT = Op.getValueType(); |
7644 | assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&(static_cast <bool> ((VT == MVT::v4i16 || VT == MVT::v8i8 ) && "unexpected type for custom-lowering ISD::UDIV") ? void (0) : __assert_fail ("(VT == MVT::v4i16 || VT == MVT::v8i8) && \"unexpected type for custom-lowering ISD::UDIV\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7645, __extension__ __PRETTY_FUNCTION__)) |
7645 | "unexpected type for custom-lowering ISD::UDIV")(static_cast <bool> ((VT == MVT::v4i16 || VT == MVT::v8i8 ) && "unexpected type for custom-lowering ISD::UDIV") ? void (0) : __assert_fail ("(VT == MVT::v4i16 || VT == MVT::v8i8) && \"unexpected type for custom-lowering ISD::UDIV\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7645, __extension__ __PRETTY_FUNCTION__)); |
7646 | |
7647 | SDLoc dl(Op); |
7648 | SDValue N0 = Op.getOperand(0); |
7649 | SDValue N1 = Op.getOperand(1); |
7650 | SDValue N2, N3; |
7651 | |
7652 | if (VT == MVT::v8i8) { |
7653 | N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); |
7654 | N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); |
7655 | |
7656 | N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, |
7657 | DAG.getIntPtrConstant(4, dl)); |
7658 | N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, |
7659 | DAG.getIntPtrConstant(4, dl)); |
7660 | N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, |
7661 | DAG.getIntPtrConstant(0, dl)); |
7662 | N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, |
7663 | DAG.getIntPtrConstant(0, dl)); |
7664 | |
7665 | N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 |
7666 | N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 |
7667 | |
7668 | N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); |
7669 | N0 = LowerCONCAT_VECTORS(N0, DAG); |
7670 | |
7671 | N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, |
7672 | DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl, |
7673 | MVT::i32), |
7674 | N0); |
7675 | return N0; |
7676 | } |
7677 | |
7678 | // v4i16 sdiv ... Convert to float. |
7679 | // float4 yf = vcvt_f32_s32(vmovl_u16(y)); |
7680 | // float4 xf = vcvt_f32_s32(vmovl_u16(x)); |
7681 | N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); |
7682 | N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); |
7683 | N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); |
7684 | SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); |
7685 | |
7686 | // Use reciprocal estimate and two refinement steps. |
7687 | // float4 recip = vrecpeq_f32(yf); |
7688 | // recip *= vrecpsq_f32(yf, recip); |
7689 | // recip *= vrecpsq_f32(yf, recip); |
7690 | N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
7691 | DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), |
7692 | BN1); |
7693 | N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
7694 | DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), |
7695 | BN1, N2); |
7696 | N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); |
7697 | N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
7698 | DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), |
7699 | BN1, N2); |
7700 | N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); |
7701 | // Simply multiplying by the reciprocal estimate can leave us a few ulps |
7702 | // too low, so we add 2 ulps (exhaustive testing shows that this is enough, |
7703 | // and that it will never cause us to return an answer too large). |
7704 | // float4 result = as_float4(as_int4(xf*recip) + 2); |
7705 | N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); |
7706 | N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); |
7707 | N1 = DAG.getConstant(2, dl, MVT::v4i32); |
7708 | N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); |
7709 | N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); |
7710 | // Convert back to integer and return. |
7711 | // return vmovn_u32(vcvt_s32_f32(result)); |
7712 | N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); |
7713 | N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); |
7714 | return N0; |
7715 | } |
7716 | |
7717 | static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { |
7718 | EVT VT = Op.getNode()->getValueType(0); |
7719 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); |
7720 | |
7721 | unsigned Opc; |
7722 | bool ExtraOp = false; |
7723 | switch (Op.getOpcode()) { |
7724 | default: llvm_unreachable("Invalid code")::llvm::llvm_unreachable_internal("Invalid code", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7724); |
7725 | case ISD::ADDC: Opc = ARMISD::ADDC; break; |
7726 | case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break; |
7727 | case ISD::SUBC: Opc = ARMISD::SUBC; break; |
7728 | case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break; |
7729 | } |
7730 | |
7731 | if (!ExtraOp) |
7732 | return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), |
7733 | Op.getOperand(1)); |
7734 | return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), |
7735 | Op.getOperand(1), Op.getOperand(2)); |
7736 | } |
7737 | |
7738 | static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) { |
7739 | SDNode *N = Op.getNode(); |
7740 | EVT VT = N->getValueType(0); |
7741 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); |
7742 | |
7743 | SDValue Carry = Op.getOperand(2); |
7744 | |
7745 | SDLoc DL(Op); |
7746 | |
7747 | SDValue Result; |
7748 | if (Op.getOpcode() == ISD::ADDCARRY) { |
7749 | // This converts the boolean value carry into the carry flag. |
7750 | Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); |
7751 | |
7752 | // Do the addition proper using the carry flag we wanted. |
7753 | Result = DAG.getNode(ARMISD::ADDE, DL, VTs, Op.getOperand(0), |
7754 | Op.getOperand(1), Carry.getValue(1)); |
7755 | |
7756 | // Now convert the carry flag into a boolean value. |
7757 | Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG); |
7758 | } else { |
7759 | // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we |
7760 | // have to invert the carry first. |
7761 | Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, |
7762 | DAG.getConstant(1, DL, MVT::i32), Carry); |
7763 | // This converts the boolean value carry into the carry flag. |
7764 | Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); |
7765 | |
7766 | // Do the subtraction proper using the carry flag we wanted. |
7767 | Result = DAG.getNode(ARMISD::SUBE, DL, VTs, Op.getOperand(0), |
7768 | Op.getOperand(1), Carry.getValue(1)); |
7769 | |
7770 | // Now convert the carry flag into a boolean value. |
7771 | Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG); |
7772 | // But the carry returned by ARMISD::SUBE is not a borrow as expected |
7773 | // by ISD::SUBCARRY, so compute 1 - C. |
7774 | Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, |
7775 | DAG.getConstant(1, DL, MVT::i32), Carry); |
7776 | } |
7777 | |
7778 | // Return both values. |
7779 | return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Carry); |
7780 | } |
7781 | |
7782 | SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { |
7783 | assert(Subtarget->isTargetDarwin())(static_cast <bool> (Subtarget->isTargetDarwin()) ? void (0) : __assert_fail ("Subtarget->isTargetDarwin()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7783, __extension__ __PRETTY_FUNCTION__)); |
7784 | |
7785 | // For iOS, we want to call an alternative entry point: __sincos_stret, |
7786 | // return values are passed via sret. |
7787 | SDLoc dl(Op); |
7788 | SDValue Arg = Op.getOperand(0); |
7789 | EVT ArgVT = Arg.getValueType(); |
7790 | Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); |
7791 | auto PtrVT = getPointerTy(DAG.getDataLayout()); |
7792 | |
7793 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
7794 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
7795 | |
7796 | // Pair of floats / doubles used to pass the result. |
7797 | Type *RetTy = StructType::get(ArgTy, ArgTy); |
7798 | auto &DL = DAG.getDataLayout(); |
7799 | |
7800 | ArgListTy Args; |
7801 | bool ShouldUseSRet = Subtarget->isAPCS_ABI(); |
7802 | SDValue SRet; |
7803 | if (ShouldUseSRet) { |
7804 | // Create stack object for sret. |
7805 | const uint64_t ByteSize = DL.getTypeAllocSize(RetTy); |
7806 | const unsigned StackAlign = DL.getPrefTypeAlignment(RetTy); |
7807 | int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); |
7808 | SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL)); |
7809 | |
7810 | ArgListEntry Entry; |
7811 | Entry.Node = SRet; |
7812 | Entry.Ty = RetTy->getPointerTo(); |
7813 | Entry.IsSExt = false; |
7814 | Entry.IsZExt = false; |
7815 | Entry.IsSRet = true; |
7816 | Args.push_back(Entry); |
7817 | RetTy = Type::getVoidTy(*DAG.getContext()); |
7818 | } |
7819 | |
7820 | ArgListEntry Entry; |
7821 | Entry.Node = Arg; |
7822 | Entry.Ty = ArgTy; |
7823 | Entry.IsSExt = false; |
7824 | Entry.IsZExt = false; |
7825 | Args.push_back(Entry); |
7826 | |
7827 | RTLIB::Libcall LC = |
7828 | (ArgVT == MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32; |
7829 | const char *LibcallName = getLibcallName(LC); |
7830 | CallingConv::ID CC = getLibcallCallingConv(LC); |
7831 | SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL)); |
7832 | |
7833 | TargetLowering::CallLoweringInfo CLI(DAG); |
7834 | CLI.setDebugLoc(dl) |
7835 | .setChain(DAG.getEntryNode()) |
7836 | .setCallee(CC, RetTy, Callee, std::move(Args)) |
7837 | .setDiscardResult(ShouldUseSRet); |
7838 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
7839 | |
7840 | if (!ShouldUseSRet) |
7841 | return CallResult.first; |
7842 | |
7843 | SDValue LoadSin = |
7844 | DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo()); |
7845 | |
7846 | // Address of cos field. |
7847 | SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet, |
7848 | DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl)); |
7849 | SDValue LoadCos = |
7850 | DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo()); |
7851 | |
7852 | SDVTList Tys = DAG.getVTList(ArgVT, ArgVT); |
7853 | return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, |
7854 | LoadSin.getValue(0), LoadCos.getValue(0)); |
7855 | } |
7856 | |
7857 | SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, |
7858 | bool Signed, |
7859 | SDValue &Chain) const { |
7860 | EVT VT = Op.getValueType(); |
7861 | assert((VT == MVT::i32 || VT == MVT::i64) &&(static_cast <bool> ((VT == MVT::i32 || VT == MVT::i64) && "unexpected type for custom lowering DIV") ? void (0) : __assert_fail ("(VT == MVT::i32 || VT == MVT::i64) && \"unexpected type for custom lowering DIV\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7862, __extension__ __PRETTY_FUNCTION__)) |
7862 | "unexpected type for custom lowering DIV")(static_cast <bool> ((VT == MVT::i32 || VT == MVT::i64) && "unexpected type for custom lowering DIV") ? void (0) : __assert_fail ("(VT == MVT::i32 || VT == MVT::i64) && \"unexpected type for custom lowering DIV\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7862, __extension__ __PRETTY_FUNCTION__)); |
7863 | SDLoc dl(Op); |
7864 | |
7865 | const auto &DL = DAG.getDataLayout(); |
7866 | const auto &TLI = DAG.getTargetLoweringInfo(); |
7867 | |
7868 | const char *Name = nullptr; |
7869 | if (Signed) |
7870 | Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64"; |
7871 | else |
7872 | Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64"; |
7873 | |
7874 | SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL)); |
7875 | |
7876 | ARMTargetLowering::ArgListTy Args; |
7877 | |
7878 | for (auto AI : {1, 0}) { |
7879 | ArgListEntry Arg; |
7880 | Arg.Node = Op.getOperand(AI); |
7881 | Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext()); |
7882 | Args.push_back(Arg); |
7883 | } |
7884 | |
7885 | CallLoweringInfo CLI(DAG); |
7886 | CLI.setDebugLoc(dl) |
7887 | .setChain(Chain) |
7888 | .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()), |
7889 | ES, std::move(Args)); |
7890 | |
7891 | return LowerCallTo(CLI).first; |
7892 | } |
7893 | |
7894 | SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, |
7895 | bool Signed) const { |
7896 | assert(Op.getValueType() == MVT::i32 &&(static_cast <bool> (Op.getValueType() == MVT::i32 && "unexpected type for custom lowering DIV") ? void (0) : __assert_fail ("Op.getValueType() == MVT::i32 && \"unexpected type for custom lowering DIV\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7897, __extension__ __PRETTY_FUNCTION__)) |
7897 | "unexpected type for custom lowering DIV")(static_cast <bool> (Op.getValueType() == MVT::i32 && "unexpected type for custom lowering DIV") ? void (0) : __assert_fail ("Op.getValueType() == MVT::i32 && \"unexpected type for custom lowering DIV\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7897, __extension__ __PRETTY_FUNCTION__)); |
7898 | SDLoc dl(Op); |
7899 | |
7900 | SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other, |
7901 | DAG.getEntryNode(), Op.getOperand(1)); |
7902 | |
7903 | return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); |
7904 | } |
7905 | |
7906 | static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) { |
7907 | SDLoc DL(N); |
7908 | SDValue Op = N->getOperand(1); |
7909 | if (N->getValueType(0) == MVT::i32) |
7910 | return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, Op); |
7911 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op, |
7912 | DAG.getConstant(0, DL, MVT::i32)); |
7913 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op, |
7914 | DAG.getConstant(1, DL, MVT::i32)); |
7915 | return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, |
7916 | DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi)); |
7917 | } |
7918 | |
7919 | void ARMTargetLowering::ExpandDIV_Windows( |
7920 | SDValue Op, SelectionDAG &DAG, bool Signed, |
7921 | SmallVectorImpl<SDValue> &Results) const { |
7922 | const auto &DL = DAG.getDataLayout(); |
7923 | const auto &TLI = DAG.getTargetLoweringInfo(); |
7924 | |
7925 | assert(Op.getValueType() == MVT::i64 &&(static_cast <bool> (Op.getValueType() == MVT::i64 && "unexpected type for custom lowering DIV") ? void (0) : __assert_fail ("Op.getValueType() == MVT::i64 && \"unexpected type for custom lowering DIV\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7926, __extension__ __PRETTY_FUNCTION__)) |
7926 | "unexpected type for custom lowering DIV")(static_cast <bool> (Op.getValueType() == MVT::i64 && "unexpected type for custom lowering DIV") ? void (0) : __assert_fail ("Op.getValueType() == MVT::i64 && \"unexpected type for custom lowering DIV\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7926, __extension__ __PRETTY_FUNCTION__)); |
7927 | SDLoc dl(Op); |
7928 | |
7929 | SDValue DBZCHK = WinDBZCheckDenominator(DAG, Op.getNode(), DAG.getEntryNode()); |
7930 | |
7931 | SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); |
7932 | |
7933 | SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result); |
7934 | SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result, |
7935 | DAG.getConstant(32, dl, TLI.getPointerTy(DL))); |
7936 | Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper); |
7937 | |
7938 | Results.push_back(Lower); |
7939 | Results.push_back(Upper); |
7940 | } |
7941 | |
7942 | static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { |
7943 | if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering())) |
7944 | // Acquire/Release load/store is not legal for targets without a dmb or |
7945 | // equivalent available. |
7946 | return SDValue(); |
7947 | |
7948 | // Monotonic load/store is legal for all targets. |
7949 | return Op; |
7950 | } |
7951 | |
7952 | static void ReplaceREADCYCLECOUNTER(SDNode *N, |
7953 | SmallVectorImpl<SDValue> &Results, |
7954 | SelectionDAG &DAG, |
7955 | const ARMSubtarget *Subtarget) { |
7956 | SDLoc DL(N); |
7957 | // Under Power Management extensions, the cycle-count is: |
7958 | // mrc p15, #0, <Rt>, c9, c13, #0 |
7959 | SDValue Ops[] = { N->getOperand(0), // Chain |
7960 | DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32), |
7961 | DAG.getConstant(15, DL, MVT::i32), |
7962 | DAG.getConstant(0, DL, MVT::i32), |
7963 | DAG.getConstant(9, DL, MVT::i32), |
7964 | DAG.getConstant(13, DL, MVT::i32), |
7965 | DAG.getConstant(0, DL, MVT::i32) |
7966 | }; |
7967 | |
7968 | SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, |
7969 | DAG.getVTList(MVT::i32, MVT::Other), Ops); |
7970 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32, |
7971 | DAG.getConstant(0, DL, MVT::i32))); |
7972 | Results.push_back(Cycles32.getValue(1)); |
7973 | } |
7974 | |
7975 | static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) { |
7976 | SDLoc dl(V.getNode()); |
7977 | SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32); |
7978 | SDValue VHi = DAG.getAnyExtOrTrunc( |
7979 | DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)), |
7980 | dl, MVT::i32); |
7981 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
7982 | if (isBigEndian) |
7983 | std::swap (VLo, VHi); |
7984 | SDValue RegClass = |
7985 | DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32); |
7986 | SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32); |
7987 | SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32); |
7988 | const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 }; |
7989 | return SDValue( |
7990 | DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0); |
7991 | } |
7992 | |
7993 | static void ReplaceCMP_SWAP_64Results(SDNode *N, |
7994 | SmallVectorImpl<SDValue> & Results, |
7995 | SelectionDAG &DAG) { |
7996 | assert(N->getValueType(0) == MVT::i64 &&(static_cast <bool> (N->getValueType(0) == MVT::i64 && "AtomicCmpSwap on types less than 64 should be legal") ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i64 && \"AtomicCmpSwap on types less than 64 should be legal\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7997, __extension__ __PRETTY_FUNCTION__)) |
7997 | "AtomicCmpSwap on types less than 64 should be legal")(static_cast <bool> (N->getValueType(0) == MVT::i64 && "AtomicCmpSwap on types less than 64 should be legal") ? void (0) : __assert_fail ("N->getValueType(0) == MVT::i64 && \"AtomicCmpSwap on types less than 64 should be legal\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 7997, __extension__ __PRETTY_FUNCTION__)); |
7998 | SDValue Ops[] = {N->getOperand(1), |
7999 | createGPRPairNode(DAG, N->getOperand(2)), |
8000 | createGPRPairNode(DAG, N->getOperand(3)), |
8001 | N->getOperand(0)}; |
8002 | SDNode *CmpSwap = DAG.getMachineNode( |
8003 | ARM::CMP_SWAP_64, SDLoc(N), |
8004 | DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops); |
8005 | |
8006 | MachineFunction &MF = DAG.getMachineFunction(); |
8007 | MachineSDNode::mmo_iterator MemOp = MF.allocateMemRefsArray(1); |
8008 | MemOp[0] = cast<MemSDNode>(N)->getMemOperand(); |
8009 | cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1); |
8010 | |
8011 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
8012 | |
8013 | Results.push_back( |
8014 | DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_1 : ARM::gsub_0, |
8015 | SDLoc(N), MVT::i32, SDValue(CmpSwap, 0))); |
8016 | Results.push_back( |
8017 | DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_0 : ARM::gsub_1, |
8018 | SDLoc(N), MVT::i32, SDValue(CmpSwap, 0))); |
8019 | Results.push_back(SDValue(CmpSwap, 2)); |
8020 | } |
8021 | |
8022 | static SDValue LowerFPOWI(SDValue Op, const ARMSubtarget &Subtarget, |
8023 | SelectionDAG &DAG) { |
8024 | const auto &TLI = DAG.getTargetLoweringInfo(); |
8025 | |
8026 | assert(Subtarget.getTargetTriple().isOSMSVCRT() &&(static_cast <bool> (Subtarget.getTargetTriple().isOSMSVCRT () && "Custom lowering is MSVCRT specific!") ? void ( 0) : __assert_fail ("Subtarget.getTargetTriple().isOSMSVCRT() && \"Custom lowering is MSVCRT specific!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 8027, __extension__ __PRETTY_FUNCTION__)) |
8027 | "Custom lowering is MSVCRT specific!")(static_cast <bool> (Subtarget.getTargetTriple().isOSMSVCRT () && "Custom lowering is MSVCRT specific!") ? void ( 0) : __assert_fail ("Subtarget.getTargetTriple().isOSMSVCRT() && \"Custom lowering is MSVCRT specific!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 8027, __extension__ __PRETTY_FUNCTION__)); |
8028 | |
8029 | SDLoc dl(Op); |
8030 | SDValue Val = Op.getOperand(0); |
8031 | MVT Ty = Val->getSimpleValueType(0); |
8032 | SDValue Exponent = DAG.getNode(ISD::SINT_TO_FP, dl, Ty, Op.getOperand(1)); |
8033 | SDValue Callee = DAG.getExternalSymbol(Ty == MVT::f32 ? "powf" : "pow", |
8034 | TLI.getPointerTy(DAG.getDataLayout())); |
8035 | |
8036 | TargetLowering::ArgListTy Args; |
8037 | TargetLowering::ArgListEntry Entry; |
8038 | |
8039 | Entry.Node = Val; |
8040 | Entry.Ty = Val.getValueType().getTypeForEVT(*DAG.getContext()); |
8041 | Entry.IsZExt = true; |
8042 | Args.push_back(Entry); |
8043 | |
8044 | Entry.Node = Exponent; |
8045 | Entry.Ty = Exponent.getValueType().getTypeForEVT(*DAG.getContext()); |
8046 | Entry.IsZExt = true; |
8047 | Args.push_back(Entry); |
8048 | |
8049 | Type *LCRTy = Val.getValueType().getTypeForEVT(*DAG.getContext()); |
8050 | |
8051 | // In the in-chain to the call is the entry node If we are emitting a |
8052 | // tailcall, the chain will be mutated if the node has a non-entry input |
8053 | // chain. |
8054 | SDValue InChain = DAG.getEntryNode(); |
8055 | SDValue TCChain = InChain; |
8056 | |
8057 | const Function &F = DAG.getMachineFunction().getFunction(); |
8058 | bool IsTC = TLI.isInTailCallPosition(DAG, Op.getNode(), TCChain) && |
8059 | F.getReturnType() == LCRTy; |
8060 | if (IsTC) |
8061 | InChain = TCChain; |
8062 | |
8063 | TargetLowering::CallLoweringInfo CLI(DAG); |
8064 | CLI.setDebugLoc(dl) |
8065 | .setChain(InChain) |
8066 | .setCallee(CallingConv::ARM_AAPCS_VFP, LCRTy, Callee, std::move(Args)) |
8067 | .setTailCall(IsTC); |
8068 | std::pair<SDValue, SDValue> CI = TLI.LowerCallTo(CLI); |
8069 | |
8070 | // Return the chain (the DAG root) if it is a tail call |
8071 | return !CI.second.getNode() ? DAG.getRoot() : CI.first; |
8072 | } |
8073 | |
8074 | SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { |
8075 | DEBUG(dbgs() << "Lowering node: "; Op.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("arm-isel")) { dbgs() << "Lowering node: "; Op.dump(); } } while (false); |
8076 | switch (Op.getOpcode()) { |
8077 | default: llvm_unreachable("Don't know how to custom lower this!")::llvm::llvm_unreachable_internal("Don't know how to custom lower this!" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 8077); |
8078 | case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG); |
8079 | case ISD::ConstantPool: return LowerConstantPool(Op, DAG); |
8080 | case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); |
8081 | case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); |
8082 | case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); |
8083 | case ISD::SELECT: return LowerSELECT(Op, DAG); |
8084 | case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); |
8085 | case ISD::BRCOND: return LowerBRCOND(Op, DAG); |
8086 | case ISD::BR_CC: return LowerBR_CC(Op, DAG); |
8087 | case ISD::BR_JT: return LowerBR_JT(Op, DAG); |
8088 | case ISD::VASTART: return LowerVASTART(Op, DAG); |
8089 | case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); |
8090 | case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); |
8091 | case ISD::SINT_TO_FP: |
8092 | case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); |
8093 | case ISD::FP_TO_SINT: |
8094 | case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); |
8095 | case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); |
8096 | case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); |
8097 | case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); |
8098 | case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); |
8099 | case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); |
8100 | case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG); |
8101 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, |
8102 | Subtarget); |
8103 | case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG, Subtarget); |
8104 | case ISD::SHL: |
8105 | case ISD::SRL: |
8106 | case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); |
8107 | case ISD::SREM: return LowerREM(Op.getNode(), DAG); |
8108 | case ISD::UREM: return LowerREM(Op.getNode(), DAG); |
8109 | case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); |
8110 | case ISD::SRL_PARTS: |
8111 | case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); |
8112 | case ISD::CTTZ: |
8113 | case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget); |
8114 | case ISD::CTPOP: return LowerCTPOP(Op.getNode(), DAG, Subtarget); |
8115 | case ISD::SETCC: return LowerVSETCC(Op, DAG); |
8116 | case ISD::SETCCE: return LowerSETCCE(Op, DAG); |
8117 | case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget); |
8118 | case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); |
8119 | case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); |
8120 | case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); |
8121 | case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); |
8122 | case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); |
8123 | case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); |
8124 | case ISD::MUL: return LowerMUL(Op, DAG); |
8125 | case ISD::SDIV: |
8126 | if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) |
8127 | return LowerDIV_Windows(Op, DAG, /* Signed */ true); |
8128 | return LowerSDIV(Op, DAG); |
8129 | case ISD::UDIV: |
8130 | if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) |
8131 | return LowerDIV_Windows(Op, DAG, /* Signed */ false); |
8132 | return LowerUDIV(Op, DAG); |
8133 | case ISD::ADDC: |
8134 | case ISD::ADDE: |
8135 | case ISD::SUBC: |
8136 | case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); |
8137 | case ISD::ADDCARRY: |
8138 | case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG); |
8139 | case ISD::SADDO: |
8140 | case ISD::SSUBO: |
8141 | return LowerSignedALUO(Op, DAG); |
8142 | case ISD::UADDO: |
8143 | case ISD::USUBO: |
8144 | return LowerUnsignedALUO(Op, DAG); |
8145 | case ISD::ATOMIC_LOAD: |
8146 | case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); |
8147 | case ISD::FSINCOS: return LowerFSINCOS(Op, DAG); |
8148 | case ISD::SDIVREM: |
8149 | case ISD::UDIVREM: return LowerDivRem(Op, DAG); |
8150 | case ISD::DYNAMIC_STACKALLOC: |
8151 | if (Subtarget->isTargetWindows()) |
8152 | return LowerDYNAMIC_STACKALLOC(Op, DAG); |
8153 | llvm_unreachable("Don't know how to custom lower this!")::llvm::llvm_unreachable_internal("Don't know how to custom lower this!" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 8153); |
8154 | case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); |
8155 | case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); |
8156 | case ISD::FPOWI: return LowerFPOWI(Op, *Subtarget, DAG); |
8157 | case ARMISD::WIN__DBZCHK: return SDValue(); |
8158 | } |
8159 | } |
8160 | |
8161 | static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results, |
8162 | SelectionDAG &DAG) { |
8163 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); |
8164 | unsigned Opc = 0; |
8165 | if (IntNo == Intrinsic::arm_smlald) |
8166 | Opc = ARMISD::SMLALD; |
8167 | else if (IntNo == Intrinsic::arm_smlaldx) |
8168 | Opc = ARMISD::SMLALDX; |
8169 | else if (IntNo == Intrinsic::arm_smlsld) |
8170 | Opc = ARMISD::SMLSLD; |
8171 | else if (IntNo == Intrinsic::arm_smlsldx) |
8172 | Opc = ARMISD::SMLSLDX; |
8173 | else |
8174 | return; |
8175 | |
8176 | SDLoc dl(N); |
8177 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, |
8178 | N->getOperand(3), |
8179 | DAG.getConstant(0, dl, MVT::i32)); |
8180 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, |
8181 | N->getOperand(3), |
8182 | DAG.getConstant(1, dl, MVT::i32)); |
8183 | |
8184 | SDValue LongMul = DAG.getNode(Opc, dl, |
8185 | DAG.getVTList(MVT::i32, MVT::i32), |
8186 | N->getOperand(1), N->getOperand(2), |
8187 | Lo, Hi); |
8188 | Results.push_back(LongMul.getValue(0)); |
8189 | Results.push_back(LongMul.getValue(1)); |
8190 | } |
8191 | |
8192 | /// ReplaceNodeResults - Replace the results of node with an illegal result |
8193 | /// type with new values built out of custom code. |
8194 | void ARMTargetLowering::ReplaceNodeResults(SDNode *N, |
8195 | SmallVectorImpl<SDValue> &Results, |
8196 | SelectionDAG &DAG) const { |
8197 | SDValue Res; |
8198 | switch (N->getOpcode()) { |
8199 | default: |
8200 | llvm_unreachable("Don't know how to custom expand this!")::llvm::llvm_unreachable_internal("Don't know how to custom expand this!" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 8200); |
8201 | case ISD::READ_REGISTER: |
8202 | ExpandREAD_REGISTER(N, Results, DAG); |
8203 | break; |
8204 | case ISD::BITCAST: |
8205 | Res = ExpandBITCAST(N, DAG, Subtarget); |
8206 | break; |
8207 | case ISD::SRL: |
8208 | case ISD::SRA: |
8209 | Res = Expand64BitShift(N, DAG, Subtarget); |
8210 | break; |
8211 | case ISD::SREM: |
8212 | case ISD::UREM: |
8213 | Res = LowerREM(N, DAG); |
8214 | break; |
8215 | case ISD::SDIVREM: |
8216 | case ISD::UDIVREM: |
8217 | Res = LowerDivRem(SDValue(N, 0), DAG); |
8218 | assert(Res.getNumOperands() == 2 && "DivRem needs two values")(static_cast <bool> (Res.getNumOperands() == 2 && "DivRem needs two values") ? void (0) : __assert_fail ("Res.getNumOperands() == 2 && \"DivRem needs two values\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 8218, __extension__ __PRETTY_FUNCTION__)); |
8219 | Results.push_back(Res.getValue(0)); |
8220 | Results.push_back(Res.getValue(1)); |
8221 | return; |
8222 | case ISD::READCYCLECOUNTER: |
8223 | ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget); |
8224 | return; |
8225 | case ISD::UDIV: |
8226 | case ISD::SDIV: |
8227 | assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows")(static_cast <bool> (Subtarget->isTargetWindows() && "can only expand DIV on Windows") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"can only expand DIV on Windows\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 8227, __extension__ __PRETTY_FUNCTION__)); |
8228 | return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV, |
8229 | Results); |
8230 | case ISD::ATOMIC_CMP_SWAP: |
8231 | ReplaceCMP_SWAP_64Results(N, Results, DAG); |
8232 | return; |
8233 | case ISD::INTRINSIC_WO_CHAIN: |
8234 | return ReplaceLongIntrinsic(N, Results, DAG); |
8235 | } |
8236 | if (Res.getNode()) |
8237 | Results.push_back(Res); |
8238 | } |
8239 | |
8240 | //===----------------------------------------------------------------------===// |
8241 | // ARM Scheduler Hooks |
8242 | //===----------------------------------------------------------------------===// |
8243 | |
8244 | /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and |
8245 | /// registers the function context. |
8246 | void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, |
8247 | MachineBasicBlock *MBB, |
8248 | MachineBasicBlock *DispatchBB, |
8249 | int FI) const { |
8250 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&(static_cast <bool> (!Subtarget->isROPI() && !Subtarget->isRWPI() && "ROPI/RWPI not currently supported with SjLj" ) ? void (0) : __assert_fail ("!Subtarget->isROPI() && !Subtarget->isRWPI() && \"ROPI/RWPI not currently supported with SjLj\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 8251, __extension__ __PRETTY_FUNCTION__)) |
8251 | "ROPI/RWPI not currently supported with SjLj")(static_cast <bool> (!Subtarget->isROPI() && !Subtarget->isRWPI() && "ROPI/RWPI not currently supported with SjLj" ) ? void (0) : __assert_fail ("!Subtarget->isROPI() && !Subtarget->isRWPI() && \"ROPI/RWPI not currently supported with SjLj\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 8251, __extension__ __PRETTY_FUNCTION__)); |
8252 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
8253 | DebugLoc dl = MI.getDebugLoc(); |
8254 | MachineFunction *MF = MBB->getParent(); |
8255 | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
8256 | MachineConstantPool *MCP = MF->getConstantPool(); |
8257 | ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); |
8258 | const Function &F = MF->getFunction(); |
8259 | |
8260 | bool isThumb = Subtarget->isThumb(); |
8261 | bool isThumb2 = Subtarget->isThumb2(); |
8262 | |
8263 | unsigned PCLabelId = AFI->createPICLabelUId(); |
8264 | unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; |
8265 | ARMConstantPoolValue *CPV = |
8266 | ARMConstantPoolMBB::Create(F.getContext(), DispatchBB, PCLabelId, PCAdj); |
8267 | unsigned CPI = MCP->getConstantPoolIndex(CPV, 4); |
8268 | |
8269 | const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass |
8270 | : &ARM::GPRRegClass; |
8271 | |
8272 | // Grab constant pool and fixed stack memory operands. |
8273 | MachineMemOperand *CPMMO = |
8274 | MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), |
8275 | MachineMemOperand::MOLoad, 4, 4); |
8276 | |
8277 | MachineMemOperand *FIMMOSt = |
8278 | MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), |
8279 | MachineMemOperand::MOStore, 4, 4); |
8280 | |
8281 | // Load the address of the dispatch MBB into the jump buffer. |
8282 | if (isThumb2) { |
8283 | // Incoming value: jbuf |
8284 | // ldr.n r5, LCPI1_1 |
8285 | // orr r5, r5, #1 |
8286 | // add r5, pc |
8287 | // str r5, [$jbuf, #+4] ; &jbuf[1] |
8288 | unsigned NewVReg1 = MRI->createVirtualRegister(TRC); |
8289 | BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) |
8290 | .addConstantPoolIndex(CPI) |
8291 | .addMemOperand(CPMMO) |
8292 | .add(predOps(ARMCC::AL)); |
8293 | // Set the low bit because of thumb mode. |
8294 | unsigned NewVReg2 = MRI->createVirtualRegister(TRC); |
8295 | BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) |
8296 | .addReg(NewVReg1, RegState::Kill) |
8297 | .addImm(0x01) |
8298 | .add(predOps(ARMCC::AL)) |
8299 | .add(condCodeOp()); |
8300 | unsigned NewVReg3 = MRI->createVirtualRegister(TRC); |
8301 | BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) |
8302 | .addReg(NewVReg2, RegState::Kill) |
8303 | .addImm(PCLabelId); |
8304 | BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) |
8305 | .addReg(NewVReg3, RegState::Kill) |
8306 | .addFrameIndex(FI) |
8307 | .addImm(36) // &jbuf[1] :: pc |
8308 | .addMemOperand(FIMMOSt) |
8309 | .add(predOps(ARMCC::AL)); |
8310 | } else if (isThumb) { |
8311 | // Incoming value: jbuf |
8312 | // ldr.n r1, LCPI1_4 |
8313 | // add r1, pc |
8314 | // mov r2, #1 |
8315 | // orrs r1, r2 |
8316 | // add r2, $jbuf, #+4 ; &jbuf[1] |
8317 | // str r1, [r2] |
8318 | unsigned NewVReg1 = MRI->createVirtualRegister(TRC); |
8319 | BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) |
8320 | .addConstantPoolIndex(CPI) |
8321 | .addMemOperand(CPMMO) |
8322 | .add(predOps(ARMCC::AL)); |
8323 | unsigned NewVReg2 = MRI->createVirtualRegister(TRC); |
8324 | BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) |
8325 | .addReg(NewVReg1, RegState::Kill) |
8326 | .addImm(PCLabelId); |
8327 | // Set the low bit because of thumb mode. |
8328 | unsigned NewVReg3 = MRI->createVirtualRegister(TRC); |
8329 | BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) |
8330 | .addReg(ARM::CPSR, RegState::Define) |
8331 | .addImm(1) |
8332 | .add(predOps(ARMCC::AL)); |
8333 | unsigned NewVReg4 = MRI->createVirtualRegister(TRC); |
8334 | BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) |
8335 | .addReg(ARM::CPSR, RegState::Define) |
8336 | .addReg(NewVReg2, RegState::Kill) |
8337 | .addReg(NewVReg3, RegState::Kill) |
8338 | .add(predOps(ARMCC::AL)); |
8339 | unsigned NewVReg5 = MRI->createVirtualRegister(TRC); |
8340 | BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5) |
8341 | .addFrameIndex(FI) |
8342 | .addImm(36); // &jbuf[1] :: pc |
8343 | BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) |
8344 | .addReg(NewVReg4, RegState::Kill) |
8345 | .addReg(NewVReg5, RegState::Kill) |
8346 | .addImm(0) |
8347 | .addMemOperand(FIMMOSt) |
8348 | .add(predOps(ARMCC::AL)); |
8349 | } else { |
8350 | // Incoming value: jbuf |
8351 | // ldr r1, LCPI1_1 |
8352 | // add r1, pc, r1 |
8353 | // str r1, [$jbuf, #+4] ; &jbuf[1] |
8354 | unsigned NewVReg1 = MRI->createVirtualRegister(TRC); |
8355 | BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) |
8356 | .addConstantPoolIndex(CPI) |
8357 | .addImm(0) |
8358 | .addMemOperand(CPMMO) |
8359 | .add(predOps(ARMCC::AL)); |
8360 | unsigned NewVReg2 = MRI->createVirtualRegister(TRC); |
8361 | BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) |
8362 | .addReg(NewVReg1, RegState::Kill) |
8363 | .addImm(PCLabelId) |
8364 | .add(predOps(ARMCC::AL)); |
8365 | BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) |
8366 | .addReg(NewVReg2, RegState::Kill) |
8367 | .addFrameIndex(FI) |
8368 | .addImm(36) // &jbuf[1] :: pc |
8369 | .addMemOperand(FIMMOSt) |
8370 | .add(predOps(ARMCC::AL)); |
8371 | } |
8372 | } |
8373 | |
8374 | void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, |
8375 | MachineBasicBlock *MBB) const { |
8376 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
8377 | DebugLoc dl = MI.getDebugLoc(); |
8378 | MachineFunction *MF = MBB->getParent(); |
8379 | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
8380 | MachineFrameInfo &MFI = MF->getFrameInfo(); |
8381 | int FI = MFI.getFunctionContextIndex(); |
8382 | |
8383 | const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass |
8384 | : &ARM::GPRnopcRegClass; |
8385 | |
8386 | // Get a mapping of the call site numbers to all of the landing pads they're |
8387 | // associated with. |
8388 | DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad; |
8389 | unsigned MaxCSNum = 0; |
8390 | for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; |
8391 | ++BB) { |
8392 | if (!BB->isEHPad()) continue; |
8393 | |
8394 | // FIXME: We should assert that the EH_LABEL is the first MI in the landing |
8395 | // pad. |
8396 | for (MachineBasicBlock::iterator |
8397 | II = BB->begin(), IE = BB->end(); II != IE; ++II) { |
8398 | if (!II->isEHLabel()) continue; |
8399 | |
8400 | MCSymbol *Sym = II->getOperand(0).getMCSymbol(); |
8401 | if (!MF->hasCallSiteLandingPad(Sym)) continue; |
8402 | |
8403 | SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym); |
8404 | for (SmallVectorImpl<unsigned>::iterator |
8405 | CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); |
8406 | CSI != CSE; ++CSI) { |
8407 | CallSiteNumToLPad[*CSI].push_back(&*BB); |
8408 | MaxCSNum = std::max(MaxCSNum, *CSI); |
8409 | } |
8410 | break; |
8411 | } |
8412 | } |
8413 | |
8414 | // Get an ordered list of the machine basic blocks for the jump table. |
8415 | std::vector<MachineBasicBlock*> LPadList; |
8416 | SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs; |
8417 | LPadList.reserve(CallSiteNumToLPad.size()); |
8418 | for (unsigned I = 1; I <= MaxCSNum; ++I) { |
8419 | SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; |
8420 | for (SmallVectorImpl<MachineBasicBlock*>::iterator |
8421 | II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) { |
8422 | LPadList.push_back(*II); |
8423 | InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end()); |
8424 | } |
8425 | } |
8426 | |
8427 | assert(!LPadList.empty() &&(static_cast <bool> (!LPadList.empty() && "No landing pad destinations for the dispatch jump table!" ) ? void (0) : __assert_fail ("!LPadList.empty() && \"No landing pad destinations for the dispatch jump table!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 8428, __extension__ __PRETTY_FUNCTION__)) |
8428 | "No landing pad destinations for the dispatch jump table!")(static_cast <bool> (!LPadList.empty() && "No landing pad destinations for the dispatch jump table!" ) ? void (0) : __assert_fail ("!LPadList.empty() && \"No landing pad destinations for the dispatch jump table!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 8428, __extension__ __PRETTY_FUNCTION__)); |
8429 | |
8430 | // Create the jump table and associated information. |
8431 | MachineJumpTableInfo *JTI = |
8432 | MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); |
8433 | unsigned MJTI = JTI->createJumpTableIndex(LPadList); |
8434 | |
8435 | // Create the MBBs for the dispatch code. |
8436 | |
8437 | // Shove the dispatch's address into the return slot in the function context. |
8438 | MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); |
8439 | DispatchBB->setIsEHPad(); |
8440 | |
8441 | MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); |
8442 | unsigned trap_opcode; |
8443 | if (Subtarget->isThumb()) |
8444 | trap_opcode = ARM::tTRAP; |
8445 | else |
8446 | trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; |
8447 | |
8448 | BuildMI(TrapBB, dl, TII->get(trap_opcode)); |
8449 | DispatchBB->addSuccessor(TrapBB); |
8450 | |
8451 | MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); |
8452 | DispatchBB->addSuccessor(DispContBB); |
8453 | |
8454 | // Insert and MBBs. |
8455 | MF->insert(MF->end(), DispatchBB); |
8456 | MF->insert(MF->end(), DispContBB); |
8457 | MF->insert(MF->end(), TrapBB); |
8458 | |
8459 | // Insert code into the entry block that creates and registers the function |
8460 | // context. |
8461 | SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); |
8462 | |
8463 | MachineMemOperand *FIMMOLd = MF->getMachineMemOperand( |
8464 | MachinePointerInfo::getFixedStack(*MF, FI), |
8465 | MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, 4); |
8466 | |
8467 | MachineInstrBuilder MIB; |
8468 | MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); |
8469 | |
8470 | const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); |
8471 | const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); |
8472 | |
8473 | // Add a register mask with no preserved registers. This results in all |
8474 | // registers being marked as clobbered. This can't work if the dispatch block |
8475 | // is in a Thumb1 function and is linked with ARM code which uses the FP |
8476 | // registers, as there is no way to preserve the FP registers in Thumb1 mode. |
8477 | MIB.addRegMask(RI.getSjLjDispatchPreservedMask(*MF)); |
8478 | |
8479 | bool IsPositionIndependent = isPositionIndependent(); |
8480 | unsigned NumLPads = LPadList.size(); |
8481 | if (Subtarget->isThumb2()) { |
8482 | unsigned NewVReg1 = MRI->createVirtualRegister(TRC); |
8483 | BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) |
8484 | .addFrameIndex(FI) |
8485 | .addImm(4) |
8486 | .addMemOperand(FIMMOLd) |
8487 | .add(predOps(ARMCC::AL)); |
8488 | |
8489 | if (NumLPads < 256) { |
8490 | BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) |
8491 | .addReg(NewVReg1) |
8492 | .addImm(LPadList.size()) |
8493 | .add(predOps(ARMCC::AL)); |
8494 | } else { |
8495 | unsigned VReg1 = MRI->createVirtualRegister(TRC); |
8496 | BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) |
8497 | .addImm(NumLPads & 0xFFFF) |
8498 | .add(predOps(ARMCC::AL)); |
8499 | |
8500 | unsigned VReg2 = VReg1; |
8501 | if ((NumLPads & 0xFFFF0000) != 0) { |
8502 | VReg2 = MRI->createVirtualRegister(TRC); |
8503 | BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) |
8504 | .addReg(VReg1) |
8505 | .addImm(NumLPads >> 16) |
8506 | .add(predOps(ARMCC::AL)); |
8507 | } |
8508 | |
8509 | BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) |
8510 | .addReg(NewVReg1) |
8511 | .addReg(VReg2) |
8512 | .add(predOps(ARMCC::AL)); |
8513 | } |
8514 | |
8515 | BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) |
8516 | .addMBB(TrapBB) |
8517 | .addImm(ARMCC::HI) |
8518 | .addReg(ARM::CPSR); |
8519 | |
8520 | unsigned NewVReg3 = MRI->createVirtualRegister(TRC); |
8521 | BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT), NewVReg3) |
8522 | .addJumpTableIndex(MJTI) |
8523 | .add(predOps(ARMCC::AL)); |
8524 | |
8525 | unsigned NewVReg4 = MRI->createVirtualRegister(TRC); |
8526 | BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) |
8527 | .addReg(NewVReg3, RegState::Kill) |
8528 | .addReg(NewVReg1) |
8529 | .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)) |
8530 | .add(predOps(ARMCC::AL)) |
8531 | .add(condCodeOp()); |
8532 | |
8533 | BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) |
8534 | .addReg(NewVReg4, RegState::Kill) |
8535 | .addReg(NewVReg1) |
8536 | .addJumpTableIndex(MJTI); |
8537 | } else if (Subtarget->isThumb()) { |
8538 | unsigned NewVReg1 = MRI->createVirtualRegister(TRC); |
8539 | BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) |
8540 | .addFrameIndex(FI) |
8541 | .addImm(1) |
8542 | .addMemOperand(FIMMOLd) |
8543 | .add(predOps(ARMCC::AL)); |
8544 | |
8545 | if (NumLPads < 256) { |
8546 | BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) |
8547 | .addReg(NewVReg1) |
8548 | .addImm(NumLPads) |
8549 | .add(predOps(ARMCC::AL)); |
8550 | } else { |
8551 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
8552 | Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); |
8553 | const Constant *C = ConstantInt::get(Int32Ty, NumLPads); |
8554 | |
8555 | // MachineConstantPool wants an explicit alignment. |
8556 | unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); |
8557 | if (Align == 0) |
8558 | Align = MF->getDataLayout().getTypeAllocSize(C->getType()); |
8559 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); |
8560 | |
8561 | unsigned VReg1 = MRI->createVirtualRegister(TRC); |
8562 | BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) |
8563 | .addReg(VReg1, RegState::Define) |
8564 | .addConstantPoolIndex(Idx) |
8565 | .add(predOps(ARMCC::AL)); |
8566 | BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) |
8567 | .addReg(NewVReg1) |
8568 | .addReg(VReg1) |
8569 | .add(predOps(ARMCC::AL)); |
8570 | } |
8571 | |
8572 | BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) |
8573 | .addMBB(TrapBB) |
8574 | .addImm(ARMCC::HI) |
8575 | .addReg(ARM::CPSR); |
8576 | |
8577 | unsigned NewVReg2 = MRI->createVirtualRegister(TRC); |
8578 | BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) |
8579 | .addReg(ARM::CPSR, RegState::Define) |
8580 | .addReg(NewVReg1) |
8581 | .addImm(2) |
8582 | .add(predOps(ARMCC::AL)); |
8583 | |
8584 | unsigned NewVReg3 = MRI->createVirtualRegister(TRC); |
8585 | BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) |
8586 | .addJumpTableIndex(MJTI) |
8587 | .add(predOps(ARMCC::AL)); |
8588 | |
8589 | unsigned NewVReg4 = MRI->createVirtualRegister(TRC); |
8590 | BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) |
8591 | .addReg(ARM::CPSR, RegState::Define) |
8592 | .addReg(NewVReg2, RegState::Kill) |
8593 | .addReg(NewVReg3) |
8594 | .add(predOps(ARMCC::AL)); |
8595 | |
8596 | MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( |
8597 | MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); |
8598 | |
8599 | unsigned NewVReg5 = MRI->createVirtualRegister(TRC); |
8600 | BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) |
8601 | .addReg(NewVReg4, RegState::Kill) |
8602 | .addImm(0) |
8603 | .addMemOperand(JTMMOLd) |
8604 | .add(predOps(ARMCC::AL)); |
8605 | |
8606 | unsigned NewVReg6 = NewVReg5; |
8607 | if (IsPositionIndependent) { |
8608 | NewVReg6 = MRI->createVirtualRegister(TRC); |
8609 | BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) |
8610 | .addReg(ARM::CPSR, RegState::Define) |
8611 | .addReg(NewVReg5, RegState::Kill) |
8612 | .addReg(NewVReg3) |
8613 | .add(predOps(ARMCC::AL)); |
8614 | } |
8615 | |
8616 | BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) |
8617 | .addReg(NewVReg6, RegState::Kill) |
8618 | .addJumpTableIndex(MJTI); |
8619 | } else { |
8620 | unsigned NewVReg1 = MRI->createVirtualRegister(TRC); |
8621 | BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) |
8622 | .addFrameIndex(FI) |
8623 | .addImm(4) |
8624 | .addMemOperand(FIMMOLd) |
8625 | .add(predOps(ARMCC::AL)); |
8626 | |
8627 | if (NumLPads < 256) { |
8628 | BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) |
8629 | .addReg(NewVReg1) |
8630 | .addImm(NumLPads) |
8631 | .add(predOps(ARMCC::AL)); |
8632 | } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { |
8633 | unsigned VReg1 = MRI->createVirtualRegister(TRC); |
8634 | BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) |
8635 | .addImm(NumLPads & 0xFFFF) |
8636 | .add(predOps(ARMCC::AL)); |
8637 | |
8638 | unsigned VReg2 = VReg1; |
8639 | if ((NumLPads & 0xFFFF0000) != 0) { |
8640 | VReg2 = MRI->createVirtualRegister(TRC); |
8641 | BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) |
8642 | .addReg(VReg1) |
8643 | .addImm(NumLPads >> 16) |
8644 | .add(predOps(ARMCC::AL)); |
8645 | } |
8646 | |
8647 | BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) |
8648 | .addReg(NewVReg1) |
8649 | .addReg(VReg2) |
8650 | .add(predOps(ARMCC::AL)); |
8651 | } else { |
8652 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
8653 | Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); |
8654 | const Constant *C = ConstantInt::get(Int32Ty, NumLPads); |
8655 | |
8656 | // MachineConstantPool wants an explicit alignment. |
8657 | unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); |
8658 | if (Align == 0) |
8659 | Align = MF->getDataLayout().getTypeAllocSize(C->getType()); |
8660 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); |
8661 | |
8662 | unsigned VReg1 = MRI->createVirtualRegister(TRC); |
8663 | BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) |
8664 | .addReg(VReg1, RegState::Define) |
8665 | .addConstantPoolIndex(Idx) |
8666 | .addImm(0) |
8667 | .add(predOps(ARMCC::AL)); |
8668 | BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) |
8669 | .addReg(NewVReg1) |
8670 | .addReg(VReg1, RegState::Kill) |
8671 | .add(predOps(ARMCC::AL)); |
8672 | } |
8673 | |
8674 | BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) |
8675 | .addMBB(TrapBB) |
8676 | .addImm(ARMCC::HI) |
8677 | .addReg(ARM::CPSR); |
8678 | |
8679 | unsigned NewVReg3 = MRI->createVirtualRegister(TRC); |
8680 | BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) |
8681 | .addReg(NewVReg1) |
8682 | .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)) |
8683 | .add(predOps(ARMCC::AL)) |
8684 | .add(condCodeOp()); |
8685 | unsigned NewVReg4 = MRI->createVirtualRegister(TRC); |
8686 | BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) |
8687 | .addJumpTableIndex(MJTI) |
8688 | .add(predOps(ARMCC::AL)); |
8689 | |
8690 | MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( |
8691 | MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); |
8692 | unsigned NewVReg5 = MRI->createVirtualRegister(TRC); |
8693 | BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) |
8694 | .addReg(NewVReg3, RegState::Kill) |
8695 | .addReg(NewVReg4) |
8696 | .addImm(0) |
8697 | .addMemOperand(JTMMOLd) |
8698 | .add(predOps(ARMCC::AL)); |
8699 | |
8700 | if (IsPositionIndependent) { |
8701 | BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) |
8702 | .addReg(NewVReg5, RegState::Kill) |
8703 | .addReg(NewVReg4) |
8704 | .addJumpTableIndex(MJTI); |
8705 | } else { |
8706 | BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr)) |
8707 | .addReg(NewVReg5, RegState::Kill) |
8708 | .addJumpTableIndex(MJTI); |
8709 | } |
8710 | } |
8711 | |
8712 | // Add the jump table entries as successors to the MBB. |
8713 | SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; |
8714 | for (std::vector<MachineBasicBlock*>::iterator |
8715 | I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { |
8716 | MachineBasicBlock *CurMBB = *I; |
8717 | if (SeenMBBs.insert(CurMBB).second) |
8718 | DispContBB->addSuccessor(CurMBB); |
8719 | } |
8720 | |
8721 | // N.B. the order the invoke BBs are processed in doesn't matter here. |
8722 | const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF); |
8723 | SmallVector<MachineBasicBlock*, 64> MBBLPads; |
8724 | for (MachineBasicBlock *BB : InvokeBBs) { |
8725 | |
8726 | // Remove the landing pad successor from the invoke block and replace it |
8727 | // with the new dispatch block. |
8728 | SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(), |
8729 | BB->succ_end()); |
8730 | while (!Successors.empty()) { |
8731 | MachineBasicBlock *SMBB = Successors.pop_back_val(); |
8732 | if (SMBB->isEHPad()) { |
8733 | BB->removeSuccessor(SMBB); |
8734 | MBBLPads.push_back(SMBB); |
8735 | } |
8736 | } |
8737 | |
8738 | BB->addSuccessor(DispatchBB, BranchProbability::getZero()); |
8739 | BB->normalizeSuccProbs(); |
8740 | |
8741 | // Find the invoke call and mark all of the callee-saved registers as |
8742 | // 'implicit defined' so that they're spilled. This prevents code from |
8743 | // moving instructions to before the EH block, where they will never be |
8744 | // executed. |
8745 | for (MachineBasicBlock::reverse_iterator |
8746 | II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { |
8747 | if (!II->isCall()) continue; |
8748 | |
8749 | DenseMap<unsigned, bool> DefRegs; |
8750 | for (MachineInstr::mop_iterator |
8751 | OI = II->operands_begin(), OE = II->operands_end(); |
8752 | OI != OE; ++OI) { |
8753 | if (!OI->isReg()) continue; |
8754 | DefRegs[OI->getReg()] = true; |
8755 | } |
8756 | |
8757 | MachineInstrBuilder MIB(*MF, &*II); |
8758 | |
8759 | for (unsigned i = 0; SavedRegs[i] != 0; ++i) { |
8760 | unsigned Reg = SavedRegs[i]; |
8761 | if (Subtarget->isThumb2() && |
8762 | !ARM::tGPRRegClass.contains(Reg) && |
8763 | !ARM::hGPRRegClass.contains(Reg)) |
8764 | continue; |
8765 | if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) |
8766 | continue; |
8767 | if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) |
8768 | continue; |
8769 | if (!DefRegs[Reg]) |
8770 | MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); |
8771 | } |
8772 | |
8773 | break; |
8774 | } |
8775 | } |
8776 | |
8777 | // Mark all former landing pads as non-landing pads. The dispatch is the only |
8778 | // landing pad now. |
8779 | for (SmallVectorImpl<MachineBasicBlock*>::iterator |
8780 | I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I) |
8781 | (*I)->setIsEHPad(false); |
8782 | |
8783 | // The instruction is gone now. |
8784 | MI.eraseFromParent(); |
8785 | } |
8786 | |
8787 | static |
8788 | MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { |
8789 | for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), |
8790 | E = MBB->succ_end(); I != E; ++I) |
8791 | if (*I != Succ) |
8792 | return *I; |
8793 | llvm_unreachable("Expecting a BB with two successors!")::llvm::llvm_unreachable_internal("Expecting a BB with two successors!" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 8793); |
8794 | } |
8795 | |
8796 | /// Return the load opcode for a given load size. If load size >= 8, |
8797 | /// neon opcode will be returned. |
8798 | static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) { |
8799 | if (LdSize >= 8) |
8800 | return LdSize == 16 ? ARM::VLD1q32wb_fixed |
8801 | : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0; |
8802 | if (IsThumb1) |
8803 | return LdSize == 4 ? ARM::tLDRi |
8804 | : LdSize == 2 ? ARM::tLDRHi |
8805 | : LdSize == 1 ? ARM::tLDRBi : 0; |
8806 | if (IsThumb2) |
8807 | return LdSize == 4 ? ARM::t2LDR_POST |
8808 | : LdSize == 2 ? ARM::t2LDRH_POST |
8809 | : LdSize == 1 ? ARM::t2LDRB_POST : 0; |
8810 | return LdSize == 4 ? ARM::LDR_POST_IMM |
8811 | : LdSize == 2 ? ARM::LDRH_POST |
8812 | : LdSize == 1 ? ARM::LDRB_POST_IMM : 0; |
8813 | } |
8814 | |
8815 | /// Return the store opcode for a given store size. If store size >= 8, |
8816 | /// neon opcode will be returned. |
8817 | static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) { |
8818 | if (StSize >= 8) |
8819 | return StSize == 16 ? ARM::VST1q32wb_fixed |
8820 | : StSize == 8 ? ARM::VST1d32wb_fixed : 0; |
8821 | if (IsThumb1) |
8822 | return StSize == 4 ? ARM::tSTRi |
8823 | : StSize == 2 ? ARM::tSTRHi |
8824 | : StSize == 1 ? ARM::tSTRBi : 0; |
8825 | if (IsThumb2) |
8826 | return StSize == 4 ? ARM::t2STR_POST |
8827 | : StSize == 2 ? ARM::t2STRH_POST |
8828 | : StSize == 1 ? ARM::t2STRB_POST : 0; |
8829 | return StSize == 4 ? ARM::STR_POST_IMM |
8830 | : StSize == 2 ? ARM::STRH_POST |
8831 | : StSize == 1 ? ARM::STRB_POST_IMM : 0; |
8832 | } |
8833 | |
8834 | /// Emit a post-increment load operation with given size. The instructions |
8835 | /// will be added to BB at Pos. |
8836 | static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, |
8837 | const TargetInstrInfo *TII, const DebugLoc &dl, |
8838 | unsigned LdSize, unsigned Data, unsigned AddrIn, |
8839 | unsigned AddrOut, bool IsThumb1, bool IsThumb2) { |
8840 | unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2); |
8841 | assert(LdOpc != 0 && "Should have a load opcode")(static_cast <bool> (LdOpc != 0 && "Should have a load opcode" ) ? void (0) : __assert_fail ("LdOpc != 0 && \"Should have a load opcode\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 8841, __extension__ __PRETTY_FUNCTION__)); |
8842 | if (LdSize >= 8) { |
8843 | BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) |
8844 | .addReg(AddrOut, RegState::Define) |
8845 | .addReg(AddrIn) |
8846 | .addImm(0) |
8847 | .add(predOps(ARMCC::AL)); |
8848 | } else if (IsThumb1) { |
8849 | // load + update AddrIn |
8850 | BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) |
8851 | .addReg(AddrIn) |
8852 | .addImm(0) |
8853 | .add(predOps(ARMCC::AL)); |
8854 | BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut) |
8855 | .add(t1CondCodeOp()) |
8856 | .addReg(AddrIn) |
8857 | .addImm(LdSize) |
8858 | .add(predOps(ARMCC::AL)); |
8859 | } else if (IsThumb2) { |
8860 | BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) |
8861 | .addReg(AddrOut, RegState::Define) |
8862 | .addReg(AddrIn) |
8863 | .addImm(LdSize) |
8864 | .add(predOps(ARMCC::AL)); |
8865 | } else { // arm |
8866 | BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) |
8867 | .addReg(AddrOut, RegState::Define) |
8868 | .addReg(AddrIn) |
8869 | .addReg(0) |
8870 | .addImm(LdSize) |
8871 | .add(predOps(ARMCC::AL)); |
8872 | } |
8873 | } |
8874 | |
8875 | /// Emit a post-increment store operation with given size. The instructions |
8876 | /// will be added to BB at Pos. |
8877 | static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, |
8878 | const TargetInstrInfo *TII, const DebugLoc &dl, |
8879 | unsigned StSize, unsigned Data, unsigned AddrIn, |
8880 | unsigned AddrOut, bool IsThumb1, bool IsThumb2) { |
8881 | unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2); |
8882 | assert(StOpc != 0 && "Should have a store opcode")(static_cast <bool> (StOpc != 0 && "Should have a store opcode" ) ? void (0) : __assert_fail ("StOpc != 0 && \"Should have a store opcode\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 8882, __extension__ __PRETTY_FUNCTION__)); |
8883 | if (StSize >= 8) { |
8884 | BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) |
8885 | .addReg(AddrIn) |
8886 | .addImm(0) |
8887 | .addReg(Data) |
8888 | .add(predOps(ARMCC::AL)); |
8889 | } else if (IsThumb1) { |
8890 | // store + update AddrIn |
8891 | BuildMI(*BB, Pos, dl, TII->get(StOpc)) |
8892 | .addReg(Data) |
8893 | .addReg(AddrIn) |
8894 | .addImm(0) |
8895 | .add(predOps(ARMCC::AL)); |
8896 | BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut) |
8897 | .add(t1CondCodeOp()) |
8898 | .addReg(AddrIn) |
8899 | .addImm(StSize) |
8900 | .add(predOps(ARMCC::AL)); |
8901 | } else if (IsThumb2) { |
8902 | BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) |
8903 | .addReg(Data) |
8904 | .addReg(AddrIn) |
8905 | .addImm(StSize) |
8906 | .add(predOps(ARMCC::AL)); |
8907 | } else { // arm |
8908 | BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) |
8909 | .addReg(Data) |
8910 | .addReg(AddrIn) |
8911 | .addReg(0) |
8912 | .addImm(StSize) |
8913 | .add(predOps(ARMCC::AL)); |
8914 | } |
8915 | } |
8916 | |
8917 | MachineBasicBlock * |
8918 | ARMTargetLowering::EmitStructByval(MachineInstr &MI, |
8919 | MachineBasicBlock *BB) const { |
8920 | // This pseudo instruction has 3 operands: dst, src, size |
8921 | // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). |
8922 | // Otherwise, we will generate unrolled scalar copies. |
8923 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
8924 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
8925 | MachineFunction::iterator It = ++BB->getIterator(); |
8926 | |
8927 | unsigned dest = MI.getOperand(0).getReg(); |
8928 | unsigned src = MI.getOperand(1).getReg(); |
8929 | unsigned SizeVal = MI.getOperand(2).getImm(); |
8930 | unsigned Align = MI.getOperand(3).getImm(); |
8931 | DebugLoc dl = MI.getDebugLoc(); |
8932 | |
8933 | MachineFunction *MF = BB->getParent(); |
8934 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
8935 | unsigned UnitSize = 0; |
8936 | const TargetRegisterClass *TRC = nullptr; |
8937 | const TargetRegisterClass *VecTRC = nullptr; |
8938 | |
8939 | bool IsThumb1 = Subtarget->isThumb1Only(); |
8940 | bool IsThumb2 = Subtarget->isThumb2(); |
8941 | bool IsThumb = Subtarget->isThumb(); |
8942 | |
8943 | if (Align & 1) { |
8944 | UnitSize = 1; |
8945 | } else if (Align & 2) { |
8946 | UnitSize = 2; |
8947 | } else { |
8948 | // Check whether we can use NEON instructions. |
8949 | if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) && |
8950 | Subtarget->hasNEON()) { |
8951 | if ((Align % 16 == 0) && SizeVal >= 16) |
8952 | UnitSize = 16; |
8953 | else if ((Align % 8 == 0) && SizeVal >= 8) |
8954 | UnitSize = 8; |
8955 | } |
8956 | // Can't use NEON instructions. |
8957 | if (UnitSize == 0) |
8958 | UnitSize = 4; |
8959 | } |
8960 | |
8961 | // Select the correct opcode and register class for unit size load/store |
8962 | bool IsNeon = UnitSize >= 8; |
8963 | TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass; |
8964 | if (IsNeon) |
8965 | VecTRC = UnitSize == 16 ? &ARM::DPairRegClass |
8966 | : UnitSize == 8 ? &ARM::DPRRegClass |
8967 | : nullptr; |
8968 | |
8969 | unsigned BytesLeft = SizeVal % UnitSize; |
8970 | unsigned LoopSize = SizeVal - BytesLeft; |
8971 | |
8972 | if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { |
8973 | // Use LDR and STR to copy. |
8974 | // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) |
8975 | // [destOut] = STR_POST(scratch, destIn, UnitSize) |
8976 | unsigned srcIn = src; |
8977 | unsigned destIn = dest; |
8978 | for (unsigned i = 0; i < LoopSize; i+=UnitSize) { |
8979 | unsigned srcOut = MRI.createVirtualRegister(TRC); |
8980 | unsigned destOut = MRI.createVirtualRegister(TRC); |
8981 | unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); |
8982 | emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut, |
8983 | IsThumb1, IsThumb2); |
8984 | emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut, |
8985 | IsThumb1, IsThumb2); |
8986 | srcIn = srcOut; |
8987 | destIn = destOut; |
8988 | } |
8989 | |
8990 | // Handle the leftover bytes with LDRB and STRB. |
8991 | // [scratch, srcOut] = LDRB_POST(srcIn, 1) |
8992 | // [destOut] = STRB_POST(scratch, destIn, 1) |
8993 | for (unsigned i = 0; i < BytesLeft; i++) { |
8994 | unsigned srcOut = MRI.createVirtualRegister(TRC); |
8995 | unsigned destOut = MRI.createVirtualRegister(TRC); |
8996 | unsigned scratch = MRI.createVirtualRegister(TRC); |
8997 | emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut, |
8998 | IsThumb1, IsThumb2); |
8999 | emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut, |
9000 | IsThumb1, IsThumb2); |
9001 | srcIn = srcOut; |
9002 | destIn = destOut; |
9003 | } |
9004 | MI.eraseFromParent(); // The instruction is gone now. |
9005 | return BB; |
9006 | } |
9007 | |
9008 | // Expand the pseudo op to a loop. |
9009 | // thisMBB: |
9010 | // ... |
9011 | // movw varEnd, # --> with thumb2 |
9012 | // movt varEnd, # |
9013 | // ldrcp varEnd, idx --> without thumb2 |
9014 | // fallthrough --> loopMBB |
9015 | // loopMBB: |
9016 | // PHI varPhi, varEnd, varLoop |
9017 | // PHI srcPhi, src, srcLoop |
9018 | // PHI destPhi, dst, destLoop |
9019 | // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) |
9020 | // [destLoop] = STR_POST(scratch, destPhi, UnitSize) |
9021 | // subs varLoop, varPhi, #UnitSize |
9022 | // bne loopMBB |
9023 | // fallthrough --> exitMBB |
9024 | // exitMBB: |
9025 | // epilogue to handle left-over bytes |
9026 | // [scratch, srcOut] = LDRB_POST(srcLoop, 1) |
9027 | // [destOut] = STRB_POST(scratch, destLoop, 1) |
9028 | MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); |
9029 | MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); |
9030 | MF->insert(It, loopMBB); |
9031 | MF->insert(It, exitMBB); |
9032 | |
9033 | // Transfer the remainder of BB and its successor edges to exitMBB. |
9034 | exitMBB->splice(exitMBB->begin(), BB, |
9035 | std::next(MachineBasicBlock::iterator(MI)), BB->end()); |
9036 | exitMBB->transferSuccessorsAndUpdatePHIs(BB); |
9037 | |
9038 | // Load an immediate to varEnd. |
9039 | unsigned varEnd = MRI.createVirtualRegister(TRC); |
9040 | if (Subtarget->useMovt(*MF)) { |
9041 | unsigned Vtmp = varEnd; |
9042 | if ((LoopSize & 0xFFFF0000) != 0) |
9043 | Vtmp = MRI.createVirtualRegister(TRC); |
9044 | BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp) |
9045 | .addImm(LoopSize & 0xFFFF) |
9046 | .add(predOps(ARMCC::AL)); |
9047 | |
9048 | if ((LoopSize & 0xFFFF0000) != 0) |
9049 | BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd) |
9050 | .addReg(Vtmp) |
9051 | .addImm(LoopSize >> 16) |
9052 | .add(predOps(ARMCC::AL)); |
9053 | } else { |
9054 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
9055 | Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); |
9056 | const Constant *C = ConstantInt::get(Int32Ty, LoopSize); |
9057 | |
9058 | // MachineConstantPool wants an explicit alignment. |
9059 | unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); |
9060 | if (Align == 0) |
9061 | Align = MF->getDataLayout().getTypeAllocSize(C->getType()); |
9062 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); |
9063 | |
9064 | if (IsThumb) |
9065 | BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)) |
9066 | .addReg(varEnd, RegState::Define) |
9067 | .addConstantPoolIndex(Idx) |
9068 | .add(predOps(ARMCC::AL)); |
9069 | else |
9070 | BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp)) |
9071 | .addReg(varEnd, RegState::Define) |
9072 | .addConstantPoolIndex(Idx) |
9073 | .addImm(0) |
9074 | .add(predOps(ARMCC::AL)); |
9075 | } |
9076 | BB->addSuccessor(loopMBB); |
9077 | |
9078 | // Generate the loop body: |
9079 | // varPhi = PHI(varLoop, varEnd) |
9080 | // srcPhi = PHI(srcLoop, src) |
9081 | // destPhi = PHI(destLoop, dst) |
9082 | MachineBasicBlock *entryBB = BB; |
9083 | BB = loopMBB; |
9084 | unsigned varLoop = MRI.createVirtualRegister(TRC); |
9085 | unsigned varPhi = MRI.createVirtualRegister(TRC); |
9086 | unsigned srcLoop = MRI.createVirtualRegister(TRC); |
9087 | unsigned srcPhi = MRI.createVirtualRegister(TRC); |
9088 | unsigned destLoop = MRI.createVirtualRegister(TRC); |
9089 | unsigned destPhi = MRI.createVirtualRegister(TRC); |
9090 | |
9091 | BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi) |
9092 | .addReg(varLoop).addMBB(loopMBB) |
9093 | .addReg(varEnd).addMBB(entryBB); |
9094 | BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi) |
9095 | .addReg(srcLoop).addMBB(loopMBB) |
9096 | .addReg(src).addMBB(entryBB); |
9097 | BuildMI(BB, dl, TII->get(ARM::PHI), destPhi) |
9098 | .addReg(destLoop).addMBB(loopMBB) |
9099 | .addReg(dest).addMBB(entryBB); |
9100 | |
9101 | // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) |
9102 | // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) |
9103 | unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); |
9104 | emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop, |
9105 | IsThumb1, IsThumb2); |
9106 | emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop, |
9107 | IsThumb1, IsThumb2); |
9108 | |
9109 | // Decrement loop variable by UnitSize. |
9110 | if (IsThumb1) { |
9111 | BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop) |
9112 | .add(t1CondCodeOp()) |
9113 | .addReg(varPhi) |
9114 | .addImm(UnitSize) |
9115 | .add(predOps(ARMCC::AL)); |
9116 | } else { |
9117 | MachineInstrBuilder MIB = |
9118 | BuildMI(*BB, BB->end(), dl, |
9119 | TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop); |
9120 | MIB.addReg(varPhi) |
9121 | .addImm(UnitSize) |
9122 | .add(predOps(ARMCC::AL)) |
9123 | .add(condCodeOp()); |
9124 | MIB->getOperand(5).setReg(ARM::CPSR); |
9125 | MIB->getOperand(5).setIsDef(true); |
9126 | } |
9127 | BuildMI(*BB, BB->end(), dl, |
9128 | TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc)) |
9129 | .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); |
9130 | |
9131 | // loopMBB can loop back to loopMBB or fall through to exitMBB. |
9132 | BB->addSuccessor(loopMBB); |
9133 | BB->addSuccessor(exitMBB); |
9134 | |
9135 | // Add epilogue to handle BytesLeft. |
9136 | BB = exitMBB; |
9137 | auto StartOfExit = exitMBB->begin(); |
9138 | |
9139 | // [scratch, srcOut] = LDRB_POST(srcLoop, 1) |
9140 | // [destOut] = STRB_POST(scratch, destLoop, 1) |
9141 | unsigned srcIn = srcLoop; |
9142 | unsigned destIn = destLoop; |
9143 | for (unsigned i = 0; i < BytesLeft; i++) { |
9144 | unsigned srcOut = MRI.createVirtualRegister(TRC); |
9145 | unsigned destOut = MRI.createVirtualRegister(TRC); |
9146 | unsigned scratch = MRI.createVirtualRegister(TRC); |
9147 | emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut, |
9148 | IsThumb1, IsThumb2); |
9149 | emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut, |
9150 | IsThumb1, IsThumb2); |
9151 | srcIn = srcOut; |
9152 | destIn = destOut; |
9153 | } |
9154 | |
9155 | MI.eraseFromParent(); // The instruction is gone now. |
9156 | return BB; |
9157 | } |
9158 | |
9159 | MachineBasicBlock * |
9160 | ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI, |
9161 | MachineBasicBlock *MBB) const { |
9162 | const TargetMachine &TM = getTargetMachine(); |
9163 | const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); |
9164 | DebugLoc DL = MI.getDebugLoc(); |
9165 | |
9166 | assert(Subtarget->isTargetWindows() &&(static_cast <bool> (Subtarget->isTargetWindows() && "__chkstk is only supported on Windows") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"__chkstk is only supported on Windows\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 9167, __extension__ __PRETTY_FUNCTION__)) |
9167 | "__chkstk is only supported on Windows")(static_cast <bool> (Subtarget->isTargetWindows() && "__chkstk is only supported on Windows") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"__chkstk is only supported on Windows\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 9167, __extension__ __PRETTY_FUNCTION__)); |
9168 | assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode")(static_cast <bool> (Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode") ? void (0) : __assert_fail ("Subtarget->isThumb2() && \"Windows on ARM requires Thumb-2 mode\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 9168, __extension__ __PRETTY_FUNCTION__)); |
9169 | |
9170 | // __chkstk takes the number of words to allocate on the stack in R4, and |
9171 | // returns the stack adjustment in number of bytes in R4. This will not |
9172 | // clober any other registers (other than the obvious lr). |
9173 | // |
9174 | // Although, technically, IP should be considered a register which may be |
9175 | // clobbered, the call itself will not touch it. Windows on ARM is a pure |
9176 | // thumb-2 environment, so there is no interworking required. As a result, we |
9177 | // do not expect a veneer to be emitted by the linker, clobbering IP. |
9178 | // |
9179 | // Each module receives its own copy of __chkstk, so no import thunk is |
9180 | // required, again, ensuring that IP is not clobbered. |
9181 | // |
9182 | // Finally, although some linkers may theoretically provide a trampoline for |
9183 | // out of range calls (which is quite common due to a 32M range limitation of |
9184 | // branches for Thumb), we can generate the long-call version via |
9185 | // -mcmodel=large, alleviating the need for the trampoline which may clobber |
9186 | // IP. |
9187 | |
9188 | switch (TM.getCodeModel()) { |
9189 | case CodeModel::Small: |
9190 | case CodeModel::Medium: |
9191 | case CodeModel::Kernel: |
9192 | BuildMI(*MBB, MI, DL, TII.get(ARM::tBL)) |
9193 | .add(predOps(ARMCC::AL)) |
9194 | .addExternalSymbol("__chkstk") |
9195 | .addReg(ARM::R4, RegState::Implicit | RegState::Kill) |
9196 | .addReg(ARM::R4, RegState::Implicit | RegState::Define) |
9197 | .addReg(ARM::R12, |
9198 | RegState::Implicit | RegState::Define | RegState::Dead) |
9199 | .addReg(ARM::CPSR, |
9200 | RegState::Implicit | RegState::Define | RegState::Dead); |
9201 | break; |
9202 | case CodeModel::Large: { |
9203 | MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); |
9204 | unsigned Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass); |
9205 | |
9206 | BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg) |
9207 | .addExternalSymbol("__chkstk"); |
9208 | BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr)) |
9209 | .add(predOps(ARMCC::AL)) |
9210 | .addReg(Reg, RegState::Kill) |
9211 | .addReg(ARM::R4, RegState::Implicit | RegState::Kill) |
9212 | .addReg(ARM::R4, RegState::Implicit | RegState::Define) |
9213 | .addReg(ARM::R12, |
9214 | RegState::Implicit | RegState::Define | RegState::Dead) |
9215 | .addReg(ARM::CPSR, |
9216 | RegState::Implicit | RegState::Define | RegState::Dead); |
9217 | break; |
9218 | } |
9219 | } |
9220 | |
9221 | BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP) |
9222 | .addReg(ARM::SP, RegState::Kill) |
9223 | .addReg(ARM::R4, RegState::Kill) |
9224 | .setMIFlags(MachineInstr::FrameSetup) |
9225 | .add(predOps(ARMCC::AL)) |
9226 | .add(condCodeOp()); |
9227 | |
9228 | MI.eraseFromParent(); |
9229 | return MBB; |
9230 | } |
9231 | |
9232 | MachineBasicBlock * |
9233 | ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI, |
9234 | MachineBasicBlock *MBB) const { |
9235 | DebugLoc DL = MI.getDebugLoc(); |
9236 | MachineFunction *MF = MBB->getParent(); |
9237 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
9238 | |
9239 | MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock(); |
9240 | MF->insert(++MBB->getIterator(), ContBB); |
9241 | ContBB->splice(ContBB->begin(), MBB, |
9242 | std::next(MachineBasicBlock::iterator(MI)), MBB->end()); |
9243 | ContBB->transferSuccessorsAndUpdatePHIs(MBB); |
9244 | MBB->addSuccessor(ContBB); |
9245 | |
9246 | MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); |
9247 | BuildMI(TrapBB, DL, TII->get(ARM::t__brkdiv0)); |
9248 | MF->push_back(TrapBB); |
9249 | MBB->addSuccessor(TrapBB); |
9250 | |
9251 | BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8)) |
9252 | .addReg(MI.getOperand(0).getReg()) |
9253 | .addImm(0) |
9254 | .add(predOps(ARMCC::AL)); |
9255 | BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc)) |
9256 | .addMBB(TrapBB) |
9257 | .addImm(ARMCC::EQ) |
9258 | .addReg(ARM::CPSR); |
9259 | |
9260 | MI.eraseFromParent(); |
9261 | return ContBB; |
9262 | } |
9263 | |
9264 | MachineBasicBlock * |
9265 | ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, |
9266 | MachineBasicBlock *BB) const { |
9267 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
9268 | DebugLoc dl = MI.getDebugLoc(); |
9269 | bool isThumb2 = Subtarget->isThumb2(); |
9270 | switch (MI.getOpcode()) { |
9271 | default: { |
9272 | MI.print(errs()); |
9273 | llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 9273); |
9274 | } |
9275 | |
9276 | // Thumb1 post-indexed loads are really just single-register LDMs. |
9277 | case ARM::tLDR_postidx: { |
9278 | MachineOperand Def(MI.getOperand(1)); |
9279 | BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD)) |
9280 | .add(Def) // Rn_wb |
9281 | .add(MI.getOperand(2)) // Rn |
9282 | .add(MI.getOperand(3)) // PredImm |
9283 | .add(MI.getOperand(4)) // PredReg |
9284 | .add(MI.getOperand(0)); // Rt |
9285 | MI.eraseFromParent(); |
9286 | return BB; |
9287 | } |
9288 | |
9289 | // The Thumb2 pre-indexed stores have the same MI operands, they just |
9290 | // define them differently in the .td files from the isel patterns, so |
9291 | // they need pseudos. |
9292 | case ARM::t2STR_preidx: |
9293 | MI.setDesc(TII->get(ARM::t2STR_PRE)); |
9294 | return BB; |
9295 | case ARM::t2STRB_preidx: |
9296 | MI.setDesc(TII->get(ARM::t2STRB_PRE)); |
9297 | return BB; |
9298 | case ARM::t2STRH_preidx: |
9299 | MI.setDesc(TII->get(ARM::t2STRH_PRE)); |
9300 | return BB; |
9301 | |
9302 | case ARM::STRi_preidx: |
9303 | case ARM::STRBi_preidx: { |
9304 | unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM |
9305 | : ARM::STRB_PRE_IMM; |
9306 | // Decode the offset. |
9307 | unsigned Offset = MI.getOperand(4).getImm(); |
9308 | bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; |
9309 | Offset = ARM_AM::getAM2Offset(Offset); |
9310 | if (isSub) |
9311 | Offset = -Offset; |
9312 | |
9313 | MachineMemOperand *MMO = *MI.memoperands_begin(); |
9314 | BuildMI(*BB, MI, dl, TII->get(NewOpc)) |
9315 | .add(MI.getOperand(0)) // Rn_wb |
9316 | .add(MI.getOperand(1)) // Rt |
9317 | .add(MI.getOperand(2)) // Rn |
9318 | .addImm(Offset) // offset (skip GPR==zero_reg) |
9319 | .add(MI.getOperand(5)) // pred |
9320 | .add(MI.getOperand(6)) |
9321 | .addMemOperand(MMO); |
9322 | MI.eraseFromParent(); |
9323 | return BB; |
9324 | } |
9325 | case ARM::STRr_preidx: |
9326 | case ARM::STRBr_preidx: |
9327 | case ARM::STRH_preidx: { |
9328 | unsigned NewOpc; |
9329 | switch (MI.getOpcode()) { |
9330 | default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 9330); |
9331 | case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; |
9332 | case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; |
9333 | case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; |
9334 | } |
9335 | MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); |
9336 | for (unsigned i = 0; i < MI.getNumOperands(); ++i) |
9337 | MIB.add(MI.getOperand(i)); |
9338 | MI.eraseFromParent(); |
9339 | return BB; |
9340 | } |
9341 | |
9342 | case ARM::tMOVCCr_pseudo: { |
9343 | // To "insert" a SELECT_CC instruction, we actually have to insert the |
9344 | // diamond control-flow pattern. The incoming instruction knows the |
9345 | // destination vreg to set, the condition code register to branch on, the |
9346 | // true/false values to select between, and a branch opcode to use. |
9347 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
9348 | MachineFunction::iterator It = ++BB->getIterator(); |
9349 | |
9350 | // thisMBB: |
9351 | // ... |
9352 | // TrueVal = ... |
9353 | // cmpTY ccX, r1, r2 |
9354 | // bCC copy1MBB |
9355 | // fallthrough --> copy0MBB |
9356 | MachineBasicBlock *thisMBB = BB; |
9357 | MachineFunction *F = BB->getParent(); |
9358 | MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); |
9359 | MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); |
9360 | F->insert(It, copy0MBB); |
9361 | F->insert(It, sinkMBB); |
9362 | |
9363 | // Transfer the remainder of BB and its successor edges to sinkMBB. |
9364 | sinkMBB->splice(sinkMBB->begin(), BB, |
9365 | std::next(MachineBasicBlock::iterator(MI)), BB->end()); |
9366 | sinkMBB->transferSuccessorsAndUpdatePHIs(BB); |
9367 | |
9368 | BB->addSuccessor(copy0MBB); |
9369 | BB->addSuccessor(sinkMBB); |
9370 | |
9371 | BuildMI(BB, dl, TII->get(ARM::tBcc)) |
9372 | .addMBB(sinkMBB) |
9373 | .addImm(MI.getOperand(3).getImm()) |
9374 | .addReg(MI.getOperand(4).getReg()); |
9375 | |
9376 | // copy0MBB: |
9377 | // %FalseValue = ... |
9378 | // # fallthrough to sinkMBB |
9379 | BB = copy0MBB; |
9380 | |
9381 | // Update machine-CFG edges |
9382 | BB->addSuccessor(sinkMBB); |
9383 | |
9384 | // sinkMBB: |
9385 | // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] |
9386 | // ... |
9387 | BB = sinkMBB; |
9388 | BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg()) |
9389 | .addReg(MI.getOperand(1).getReg()) |
9390 | .addMBB(copy0MBB) |
9391 | .addReg(MI.getOperand(2).getReg()) |
9392 | .addMBB(thisMBB); |
9393 | |
9394 | MI.eraseFromParent(); // The pseudo instruction is gone now. |
9395 | return BB; |
9396 | } |
9397 | |
9398 | case ARM::BCCi64: |
9399 | case ARM::BCCZi64: { |
9400 | // If there is an unconditional branch to the other successor, remove it. |
9401 | BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end()); |
9402 | |
9403 | // Compare both parts that make up the double comparison separately for |
9404 | // equality. |
9405 | bool RHSisZero = MI.getOpcode() == ARM::BCCZi64; |
9406 | |
9407 | unsigned LHS1 = MI.getOperand(1).getReg(); |
9408 | unsigned LHS2 = MI.getOperand(2).getReg(); |
9409 | if (RHSisZero) { |
9410 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
9411 | .addReg(LHS1) |
9412 | .addImm(0) |
9413 | .add(predOps(ARMCC::AL)); |
9414 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
9415 | .addReg(LHS2).addImm(0) |
9416 | .addImm(ARMCC::EQ).addReg(ARM::CPSR); |
9417 | } else { |
9418 | unsigned RHS1 = MI.getOperand(3).getReg(); |
9419 | unsigned RHS2 = MI.getOperand(4).getReg(); |
9420 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) |
9421 | .addReg(LHS1) |
9422 | .addReg(RHS1) |
9423 | .add(predOps(ARMCC::AL)); |
9424 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) |
9425 | .addReg(LHS2).addReg(RHS2) |
9426 | .addImm(ARMCC::EQ).addReg(ARM::CPSR); |
9427 | } |
9428 | |
9429 | MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB(); |
9430 | MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); |
9431 | if (MI.getOperand(0).getImm() == ARMCC::NE) |
9432 | std::swap(destMBB, exitMBB); |
9433 | |
9434 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) |
9435 | .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); |
9436 | if (isThumb2) |
9437 | BuildMI(BB, dl, TII->get(ARM::t2B)) |
9438 | .addMBB(exitMBB) |
9439 | .add(predOps(ARMCC::AL)); |
9440 | else |
9441 | BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); |
9442 | |
9443 | MI.eraseFromParent(); // The pseudo instruction is gone now. |
9444 | return BB; |
9445 | } |
9446 | |
9447 | case ARM::Int_eh_sjlj_setjmp: |
9448 | case ARM::Int_eh_sjlj_setjmp_nofp: |
9449 | case ARM::tInt_eh_sjlj_setjmp: |
9450 | case ARM::t2Int_eh_sjlj_setjmp: |
9451 | case ARM::t2Int_eh_sjlj_setjmp_nofp: |
9452 | return BB; |
9453 | |
9454 | case ARM::Int_eh_sjlj_setup_dispatch: |
9455 | EmitSjLjDispatchBlock(MI, BB); |
9456 | return BB; |
9457 | |
9458 | case ARM::ABS: |
9459 | case ARM::t2ABS: { |
9460 | // To insert an ABS instruction, we have to insert the |
9461 | // diamond control-flow pattern. The incoming instruction knows the |
9462 | // source vreg to test against 0, the destination vreg to set, |
9463 | // the condition code register to branch on, the |
9464 | // true/false values to select between, and a branch opcode to use. |
9465 | // It transforms |
9466 | // V1 = ABS V0 |
9467 | // into |
9468 | // V2 = MOVS V0 |
9469 | // BCC (branch to SinkBB if V0 >= 0) |
9470 | // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) |
9471 | // SinkBB: V1 = PHI(V2, V3) |
9472 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
9473 | MachineFunction::iterator BBI = ++BB->getIterator(); |
9474 | MachineFunction *Fn = BB->getParent(); |
9475 | MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); |
9476 | MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); |
9477 | Fn->insert(BBI, RSBBB); |
9478 | Fn->insert(BBI, SinkBB); |
9479 | |
9480 | unsigned int ABSSrcReg = MI.getOperand(1).getReg(); |
9481 | unsigned int ABSDstReg = MI.getOperand(0).getReg(); |
9482 | bool ABSSrcKIll = MI.getOperand(1).isKill(); |
9483 | bool isThumb2 = Subtarget->isThumb2(); |
9484 | MachineRegisterInfo &MRI = Fn->getRegInfo(); |
9485 | // In Thumb mode S must not be specified if source register is the SP or |
9486 | // PC and if destination register is the SP, so restrict register class |
9487 | unsigned NewRsbDstReg = |
9488 | MRI.createVirtualRegister(isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass); |
9489 | |
9490 | // Transfer the remainder of BB and its successor edges to sinkMBB. |
9491 | SinkBB->splice(SinkBB->begin(), BB, |
9492 | std::next(MachineBasicBlock::iterator(MI)), BB->end()); |
9493 | SinkBB->transferSuccessorsAndUpdatePHIs(BB); |
9494 | |
9495 | BB->addSuccessor(RSBBB); |
9496 | BB->addSuccessor(SinkBB); |
9497 | |
9498 | // fall through to SinkMBB |
9499 | RSBBB->addSuccessor(SinkBB); |
9500 | |
9501 | // insert a cmp at the end of BB |
9502 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
9503 | .addReg(ABSSrcReg) |
9504 | .addImm(0) |
9505 | .add(predOps(ARMCC::AL)); |
9506 | |
9507 | // insert a bcc with opposite CC to ARMCC::MI at the end of BB |
9508 | BuildMI(BB, dl, |
9509 | TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) |
9510 | .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); |
9511 | |
9512 | // insert rsbri in RSBBB |
9513 | // Note: BCC and rsbri will be converted into predicated rsbmi |
9514 | // by if-conversion pass |
9515 | BuildMI(*RSBBB, RSBBB->begin(), dl, |
9516 | TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) |
9517 | .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0) |
9518 | .addImm(0) |
9519 | .add(predOps(ARMCC::AL)) |
9520 | .add(condCodeOp()); |
9521 | |
9522 | // insert PHI in SinkBB, |
9523 | // reuse ABSDstReg to not change uses of ABS instruction |
9524 | BuildMI(*SinkBB, SinkBB->begin(), dl, |
9525 | TII->get(ARM::PHI), ABSDstReg) |
9526 | .addReg(NewRsbDstReg).addMBB(RSBBB) |
9527 | .addReg(ABSSrcReg).addMBB(BB); |
9528 | |
9529 | // remove ABS instruction |
9530 | MI.eraseFromParent(); |
9531 | |
9532 | // return last added BB |
9533 | return SinkBB; |
9534 | } |
9535 | case ARM::COPY_STRUCT_BYVAL_I32: |
9536 | ++NumLoopByVals; |
9537 | return EmitStructByval(MI, BB); |
9538 | case ARM::WIN__CHKSTK: |
9539 | return EmitLowered__chkstk(MI, BB); |
9540 | case ARM::WIN__DBZCHK: |
9541 | return EmitLowered__dbzchk(MI, BB); |
9542 | } |
9543 | } |
9544 | |
9545 | /// \brief Attaches vregs to MEMCPY that it will use as scratch registers |
9546 | /// when it is expanded into LDM/STM. This is done as a post-isel lowering |
9547 | /// instead of as a custom inserter because we need the use list from the SDNode. |
9548 | static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, |
9549 | MachineInstr &MI, const SDNode *Node) { |
9550 | bool isThumb1 = Subtarget->isThumb1Only(); |
9551 | |
9552 | DebugLoc DL = MI.getDebugLoc(); |
9553 | MachineFunction *MF = MI.getParent()->getParent(); |
9554 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
9555 | MachineInstrBuilder MIB(*MF, MI); |
9556 | |
9557 | // If the new dst/src is unused mark it as dead. |
9558 | if (!Node->hasAnyUseOfValue(0)) { |
9559 | MI.getOperand(0).setIsDead(true); |
9560 | } |
9561 | if (!Node->hasAnyUseOfValue(1)) { |
9562 | MI.getOperand(1).setIsDead(true); |
9563 | } |
9564 | |
9565 | // The MEMCPY both defines and kills the scratch registers. |
9566 | for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) { |
9567 | unsigned TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass |
9568 | : &ARM::GPRRegClass); |
9569 | MIB.addReg(TmpReg, RegState::Define|RegState::Dead); |
9570 | } |
9571 | } |
9572 | |
9573 | void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, |
9574 | SDNode *Node) const { |
9575 | if (MI.getOpcode() == ARM::MEMCPY) { |
9576 | attachMEMCPYScratchRegs(Subtarget, MI, Node); |
9577 | return; |
9578 | } |
9579 | |
9580 | const MCInstrDesc *MCID = &MI.getDesc(); |
9581 | // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, |
9582 | // RSC. Coming out of isel, they have an implicit CPSR def, but the optional |
9583 | // operand is still set to noreg. If needed, set the optional operand's |
9584 | // register to CPSR, and remove the redundant implicit def. |
9585 | // |
9586 | // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR). |
9587 | |
9588 | // Rename pseudo opcodes. |
9589 | unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode()); |
9590 | unsigned ccOutIdx; |
9591 | if (NewOpc) { |
9592 | const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo(); |
9593 | MCID = &TII->get(NewOpc); |
9594 | |
9595 | assert(MCID->getNumOperands() ==(static_cast <bool> (MCID->getNumOperands() == MI.getDesc ().getNumOperands() + 5 - MI.getDesc().getSize() && "converted opcode should be the same except for cc_out" " (and, on Thumb1, pred)") ? void (0) : __assert_fail ("MCID->getNumOperands() == MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize() && \"converted opcode should be the same except for cc_out\" \" (and, on Thumb1, pred)\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 9598, __extension__ __PRETTY_FUNCTION__)) |
9596 | MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize()(static_cast <bool> (MCID->getNumOperands() == MI.getDesc ().getNumOperands() + 5 - MI.getDesc().getSize() && "converted opcode should be the same except for cc_out" " (and, on Thumb1, pred)") ? void (0) : __assert_fail ("MCID->getNumOperands() == MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize() && \"converted opcode should be the same except for cc_out\" \" (and, on Thumb1, pred)\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 9598, __extension__ __PRETTY_FUNCTION__)) |
9597 | && "converted opcode should be the same except for cc_out"(static_cast <bool> (MCID->getNumOperands() == MI.getDesc ().getNumOperands() + 5 - MI.getDesc().getSize() && "converted opcode should be the same except for cc_out" " (and, on Thumb1, pred)") ? void (0) : __assert_fail ("MCID->getNumOperands() == MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize() && \"converted opcode should be the same except for cc_out\" \" (and, on Thumb1, pred)\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 9598, __extension__ __PRETTY_FUNCTION__)) |
9598 | " (and, on Thumb1, pred)")(static_cast <bool> (MCID->getNumOperands() == MI.getDesc ().getNumOperands() + 5 - MI.getDesc().getSize() && "converted opcode should be the same except for cc_out" " (and, on Thumb1, pred)") ? void (0) : __assert_fail ("MCID->getNumOperands() == MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize() && \"converted opcode should be the same except for cc_out\" \" (and, on Thumb1, pred)\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 9598, __extension__ __PRETTY_FUNCTION__)); |
9599 | |
9600 | MI.setDesc(*MCID); |
9601 | |
9602 | // Add the optional cc_out operand |
9603 | MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); |
9604 | |
9605 | // On Thumb1, move all input operands to the end, then add the predicate |
9606 | if (Subtarget->isThumb1Only()) { |
9607 | for (unsigned c = MCID->getNumOperands() - 4; c--;) { |
9608 | MI.addOperand(MI.getOperand(1)); |
9609 | MI.RemoveOperand(1); |
9610 | } |
9611 | |
9612 | // Restore the ties |
9613 | for (unsigned i = MI.getNumOperands(); i--;) { |
9614 | const MachineOperand& op = MI.getOperand(i); |
9615 | if (op.isReg() && op.isUse()) { |
9616 | int DefIdx = MCID->getOperandConstraint(i, MCOI::TIED_TO); |
9617 | if (DefIdx != -1) |
9618 | MI.tieOperands(DefIdx, i); |
9619 | } |
9620 | } |
9621 | |
9622 | MI.addOperand(MachineOperand::CreateImm(ARMCC::AL)); |
9623 | MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/false)); |
9624 | ccOutIdx = 1; |
9625 | } else |
9626 | ccOutIdx = MCID->getNumOperands() - 1; |
9627 | } else |
9628 | ccOutIdx = MCID->getNumOperands() - 1; |
9629 | |
9630 | // Any ARM instruction that sets the 's' bit should specify an optional |
9631 | // "cc_out" operand in the last operand position. |
9632 | if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { |
9633 | assert(!NewOpc && "Optional cc_out operand required")(static_cast <bool> (!NewOpc && "Optional cc_out operand required" ) ? void (0) : __assert_fail ("!NewOpc && \"Optional cc_out operand required\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 9633, __extension__ __PRETTY_FUNCTION__)); |
9634 | return; |
9635 | } |
9636 | // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it |
9637 | // since we already have an optional CPSR def. |
9638 | bool definesCPSR = false; |
9639 | bool deadCPSR = false; |
9640 | for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e; |
9641 | ++i) { |
9642 | const MachineOperand &MO = MI.getOperand(i); |
9643 | if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { |
9644 | definesCPSR = true; |
9645 | if (MO.isDead()) |
9646 | deadCPSR = true; |
9647 | MI.RemoveOperand(i); |
9648 | break; |
9649 | } |
9650 | } |
9651 | if (!definesCPSR) { |
9652 | assert(!NewOpc && "Optional cc_out operand required")(static_cast <bool> (!NewOpc && "Optional cc_out operand required" ) ? void (0) : __assert_fail ("!NewOpc && \"Optional cc_out operand required\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 9652, __extension__ __PRETTY_FUNCTION__)); |
9653 | return; |
9654 | } |
9655 | assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag")(static_cast <bool> (deadCPSR == !Node->hasAnyUseOfValue (1) && "inconsistent dead flag") ? void (0) : __assert_fail ("deadCPSR == !Node->hasAnyUseOfValue(1) && \"inconsistent dead flag\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 9655, __extension__ __PRETTY_FUNCTION__)); |
9656 | if (deadCPSR) { |
9657 | assert(!MI.getOperand(ccOutIdx).getReg() &&(static_cast <bool> (!MI.getOperand(ccOutIdx).getReg() && "expect uninitialized optional cc_out operand") ? void (0) : __assert_fail ("!MI.getOperand(ccOutIdx).getReg() && \"expect uninitialized optional cc_out operand\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 9658, __extension__ __PRETTY_FUNCTION__)) |
9658 | "expect uninitialized optional cc_out operand")(static_cast <bool> (!MI.getOperand(ccOutIdx).getReg() && "expect uninitialized optional cc_out operand") ? void (0) : __assert_fail ("!MI.getOperand(ccOutIdx).getReg() && \"expect uninitialized optional cc_out operand\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 9658, __extension__ __PRETTY_FUNCTION__)); |
9659 | // Thumb1 instructions must have the S bit even if the CPSR is dead. |
9660 | if (!Subtarget->isThumb1Only()) |
9661 | return; |
9662 | } |
9663 | |
9664 | // If this instruction was defined with an optional CPSR def and its dag node |
9665 | // had a live implicit CPSR def, then activate the optional CPSR def. |
9666 | MachineOperand &MO = MI.getOperand(ccOutIdx); |
9667 | MO.setReg(ARM::CPSR); |
9668 | MO.setIsDef(true); |
9669 | } |
9670 | |
9671 | //===----------------------------------------------------------------------===// |
9672 | // ARM Optimization Hooks |
9673 | //===----------------------------------------------------------------------===// |
9674 | |
9675 | // Helper function that checks if N is a null or all ones constant. |
9676 | static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { |
9677 | return AllOnes ? isAllOnesConstant(N) : isNullConstant(N); |
9678 | } |
9679 | |
9680 | // Return true if N is conditionally 0 or all ones. |
9681 | // Detects these expressions where cc is an i1 value: |
9682 | // |
9683 | // (select cc 0, y) [AllOnes=0] |
9684 | // (select cc y, 0) [AllOnes=0] |
9685 | // (zext cc) [AllOnes=0] |
9686 | // (sext cc) [AllOnes=0/1] |
9687 | // (select cc -1, y) [AllOnes=1] |
9688 | // (select cc y, -1) [AllOnes=1] |
9689 | // |
9690 | // Invert is set when N is the null/all ones constant when CC is false. |
9691 | // OtherOp is set to the alternative value of N. |
9692 | static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, |
9693 | SDValue &CC, bool &Invert, |
9694 | SDValue &OtherOp, |
9695 | SelectionDAG &DAG) { |
9696 | switch (N->getOpcode()) { |
9697 | default: return false; |
9698 | case ISD::SELECT: { |
9699 | CC = N->getOperand(0); |
9700 | SDValue N1 = N->getOperand(1); |
9701 | SDValue N2 = N->getOperand(2); |
9702 | if (isZeroOrAllOnes(N1, AllOnes)) { |
9703 | Invert = false; |
9704 | OtherOp = N2; |
9705 | return true; |
9706 | } |
9707 | if (isZeroOrAllOnes(N2, AllOnes)) { |
9708 | Invert = true; |
9709 | OtherOp = N1; |
9710 | return true; |
9711 | } |
9712 | return false; |
9713 | } |
9714 | case ISD::ZERO_EXTEND: |
9715 | // (zext cc) can never be the all ones value. |
9716 | if (AllOnes) |
9717 | return false; |
9718 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
9719 | case ISD::SIGN_EXTEND: { |
9720 | SDLoc dl(N); |
9721 | EVT VT = N->getValueType(0); |
9722 | CC = N->getOperand(0); |
9723 | if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC) |
9724 | return false; |
9725 | Invert = !AllOnes; |
9726 | if (AllOnes) |
9727 | // When looking for an AllOnes constant, N is an sext, and the 'other' |
9728 | // value is 0. |
9729 | OtherOp = DAG.getConstant(0, dl, VT); |
9730 | else if (N->getOpcode() == ISD::ZERO_EXTEND) |
9731 | // When looking for a 0 constant, N can be zext or sext. |
9732 | OtherOp = DAG.getConstant(1, dl, VT); |
9733 | else |
9734 | OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl, |
9735 | VT); |
9736 | return true; |
9737 | } |
9738 | } |
9739 | } |
9740 | |
9741 | // Combine a constant select operand into its use: |
9742 | // |
9743 | // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) |
9744 | // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) |
9745 | // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] |
9746 | // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) |
9747 | // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) |
9748 | // |
9749 | // The transform is rejected if the select doesn't have a constant operand that |
9750 | // is null, or all ones when AllOnes is set. |
9751 | // |
9752 | // Also recognize sext/zext from i1: |
9753 | // |
9754 | // (add (zext cc), x) -> (select cc (add x, 1), x) |
9755 | // (add (sext cc), x) -> (select cc (add x, -1), x) |
9756 | // |
9757 | // These transformations eventually create predicated instructions. |
9758 | // |
9759 | // @param N The node to transform. |
9760 | // @param Slct The N operand that is a select. |
9761 | // @param OtherOp The other N operand (x above). |
9762 | // @param DCI Context. |
9763 | // @param AllOnes Require the select constant to be all ones instead of null. |
9764 | // @returns The new node, or SDValue() on failure. |
9765 | static |
9766 | SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, |
9767 | TargetLowering::DAGCombinerInfo &DCI, |
9768 | bool AllOnes = false) { |
9769 | SelectionDAG &DAG = DCI.DAG; |
9770 | EVT VT = N->getValueType(0); |
9771 | SDValue NonConstantVal; |
9772 | SDValue CCOp; |
9773 | bool SwapSelectOps; |
9774 | if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps, |
9775 | NonConstantVal, DAG)) |
9776 | return SDValue(); |
9777 | |
9778 | // Slct is now know to be the desired identity constant when CC is true. |
9779 | SDValue TrueVal = OtherOp; |
9780 | SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, |
9781 | OtherOp, NonConstantVal); |
9782 | // Unless SwapSelectOps says CC should be false. |
9783 | if (SwapSelectOps) |
9784 | std::swap(TrueVal, FalseVal); |
9785 | |
9786 | return DAG.getNode(ISD::SELECT, SDLoc(N), VT, |
9787 | CCOp, TrueVal, FalseVal); |
9788 | } |
9789 | |
9790 | // Attempt combineSelectAndUse on each operand of a commutative operator N. |
9791 | static |
9792 | SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, |
9793 | TargetLowering::DAGCombinerInfo &DCI) { |
9794 | SDValue N0 = N->getOperand(0); |
9795 | SDValue N1 = N->getOperand(1); |
9796 | if (N0.getNode()->hasOneUse()) |
9797 | if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes)) |
9798 | return Result; |
9799 | if (N1.getNode()->hasOneUse()) |
9800 | if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes)) |
9801 | return Result; |
9802 | return SDValue(); |
9803 | } |
9804 | |
9805 | static bool IsVUZPShuffleNode(SDNode *N) { |
9806 | // VUZP shuffle node. |
9807 | if (N->getOpcode() == ARMISD::VUZP) |
9808 | return true; |
9809 | |
9810 | // "VUZP" on i32 is an alias for VTRN. |
9811 | if (N->getOpcode() == ARMISD::VTRN && N->getValueType(0) == MVT::v2i32) |
9812 | return true; |
9813 | |
9814 | return false; |
9815 | } |
9816 | |
9817 | static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1, |
9818 | TargetLowering::DAGCombinerInfo &DCI, |
9819 | const ARMSubtarget *Subtarget) { |
9820 | // Look for ADD(VUZP.0, VUZP.1). |
9821 | if (!IsVUZPShuffleNode(N0.getNode()) || N0.getNode() != N1.getNode() || |
9822 | N0 == N1) |
9823 | return SDValue(); |
9824 | |
9825 | // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD. |
9826 | if (!N->getValueType(0).is64BitVector()) |
9827 | return SDValue(); |
9828 | |
9829 | // Generate vpadd. |
9830 | SelectionDAG &DAG = DCI.DAG; |
9831 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
9832 | SDLoc dl(N); |
9833 | SDNode *Unzip = N0.getNode(); |
9834 | EVT VT = N->getValueType(0); |
9835 | |
9836 | SmallVector<SDValue, 8> Ops; |
9837 | Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpadd, dl, |
9838 | TLI.getPointerTy(DAG.getDataLayout()))); |
9839 | Ops.push_back(Unzip->getOperand(0)); |
9840 | Ops.push_back(Unzip->getOperand(1)); |
9841 | |
9842 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops); |
9843 | } |
9844 | |
9845 | static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1, |
9846 | TargetLowering::DAGCombinerInfo &DCI, |
9847 | const ARMSubtarget *Subtarget) { |
9848 | // Check for two extended operands. |
9849 | if (!(N0.getOpcode() == ISD::SIGN_EXTEND && |
9850 | N1.getOpcode() == ISD::SIGN_EXTEND) && |
9851 | !(N0.getOpcode() == ISD::ZERO_EXTEND && |
9852 | N1.getOpcode() == ISD::ZERO_EXTEND)) |
9853 | return SDValue(); |
9854 | |
9855 | SDValue N00 = N0.getOperand(0); |
9856 | SDValue N10 = N1.getOperand(0); |
9857 | |
9858 | // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1)) |
9859 | if (!IsVUZPShuffleNode(N00.getNode()) || N00.getNode() != N10.getNode() || |
9860 | N00 == N10) |
9861 | return SDValue(); |
9862 | |
9863 | // We only recognize Q register paddl here; this can't be reached until |
9864 | // after type legalization. |
9865 | if (!N00.getValueType().is64BitVector() || |
9866 | !N0.getValueType().is128BitVector()) |
9867 | return SDValue(); |
9868 | |
9869 | // Generate vpaddl. |
9870 | SelectionDAG &DAG = DCI.DAG; |
9871 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
9872 | SDLoc dl(N); |
9873 | EVT VT = N->getValueType(0); |
9874 | |
9875 | SmallVector<SDValue, 8> Ops; |
9876 | // Form vpaddl.sN or vpaddl.uN depending on the kind of extension. |
9877 | unsigned Opcode; |
9878 | if (N0.getOpcode() == ISD::SIGN_EXTEND) |
9879 | Opcode = Intrinsic::arm_neon_vpaddls; |
9880 | else |
9881 | Opcode = Intrinsic::arm_neon_vpaddlu; |
9882 | Ops.push_back(DAG.getConstant(Opcode, dl, |
9883 | TLI.getPointerTy(DAG.getDataLayout()))); |
9884 | EVT ElemTy = N00.getValueType().getVectorElementType(); |
9885 | unsigned NumElts = VT.getVectorNumElements(); |
9886 | EVT ConcatVT = EVT::getVectorVT(*DAG.getContext(), ElemTy, NumElts * 2); |
9887 | SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), ConcatVT, |
9888 | N00.getOperand(0), N00.getOperand(1)); |
9889 | Ops.push_back(Concat); |
9890 | |
9891 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops); |
9892 | } |
9893 | |
9894 | // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in |
9895 | // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is |
9896 | // much easier to match. |
9897 | static SDValue |
9898 | AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1, |
9899 | TargetLowering::DAGCombinerInfo &DCI, |
9900 | const ARMSubtarget *Subtarget) { |
9901 | // Only perform optimization if after legalize, and if NEON is available. We |
9902 | // also expected both operands to be BUILD_VECTORs. |
9903 | if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() |
9904 | || N0.getOpcode() != ISD::BUILD_VECTOR |
9905 | || N1.getOpcode() != ISD::BUILD_VECTOR) |
9906 | return SDValue(); |
9907 | |
9908 | // Check output type since VPADDL operand elements can only be 8, 16, or 32. |
9909 | EVT VT = N->getValueType(0); |
9910 | if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) |
9911 | return SDValue(); |
9912 | |
9913 | // Check that the vector operands are of the right form. |
9914 | // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR |
9915 | // operands, where N is the size of the formed vector. |
9916 | // Each EXTRACT_VECTOR should have the same input vector and odd or even |
9917 | // index such that we have a pair wise add pattern. |
9918 | |
9919 | // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. |
9920 | if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
9921 | return SDValue(); |
9922 | SDValue Vec = N0->getOperand(0)->getOperand(0); |
9923 | SDNode *V = Vec.getNode(); |
9924 | unsigned nextIndex = 0; |
9925 | |
9926 | // For each operands to the ADD which are BUILD_VECTORs, |
9927 | // check to see if each of their operands are an EXTRACT_VECTOR with |
9928 | // the same vector and appropriate index. |
9929 | for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { |
9930 | if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT |
9931 | && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
9932 | |
9933 | SDValue ExtVec0 = N0->getOperand(i); |
9934 | SDValue ExtVec1 = N1->getOperand(i); |
9935 | |
9936 | // First operand is the vector, verify its the same. |
9937 | if (V != ExtVec0->getOperand(0).getNode() || |
9938 | V != ExtVec1->getOperand(0).getNode()) |
9939 | return SDValue(); |
9940 | |
9941 | // Second is the constant, verify its correct. |
9942 | ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); |
9943 | ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); |
9944 | |
9945 | // For the constant, we want to see all the even or all the odd. |
9946 | if (!C0 || !C1 || C0->getZExtValue() != nextIndex |
9947 | || C1->getZExtValue() != nextIndex+1) |
9948 | return SDValue(); |
9949 | |
9950 | // Increment index. |
9951 | nextIndex+=2; |
9952 | } else |
9953 | return SDValue(); |
9954 | } |
9955 | |
9956 | // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure |
9957 | // we're using the entire input vector, otherwise there's a size/legality |
9958 | // mismatch somewhere. |
9959 | if (nextIndex != Vec.getValueType().getVectorNumElements() || |
9960 | Vec.getValueType().getVectorElementType() == VT.getVectorElementType()) |
9961 | return SDValue(); |
9962 | |
9963 | // Create VPADDL node. |
9964 | SelectionDAG &DAG = DCI.DAG; |
9965 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
9966 | |
9967 | SDLoc dl(N); |
9968 | |
9969 | // Build operand list. |
9970 | SmallVector<SDValue, 8> Ops; |
9971 | Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl, |
9972 | TLI.getPointerTy(DAG.getDataLayout()))); |
9973 | |
9974 | // Input is the vector. |
9975 | Ops.push_back(Vec); |
9976 | |
9977 | // Get widened type and narrowed type. |
9978 | MVT widenType; |
9979 | unsigned numElem = VT.getVectorNumElements(); |
9980 | |
9981 | EVT inputLaneType = Vec.getValueType().getVectorElementType(); |
9982 | switch (inputLaneType.getSimpleVT().SimpleTy) { |
9983 | case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; |
9984 | case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; |
9985 | case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; |
9986 | default: |
9987 | llvm_unreachable("Invalid vector element type for padd optimization.")::llvm::llvm_unreachable_internal("Invalid vector element type for padd optimization." , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 9987); |
9988 | } |
9989 | |
9990 | SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops); |
9991 | unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE; |
9992 | return DAG.getNode(ExtOp, dl, VT, tmp); |
9993 | } |
9994 | |
9995 | static SDValue findMUL_LOHI(SDValue V) { |
9996 | if (V->getOpcode() == ISD::UMUL_LOHI || |
9997 | V->getOpcode() == ISD::SMUL_LOHI) |
9998 | return V; |
9999 | return SDValue(); |
10000 | } |
10001 | |
10002 | static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode, |
10003 | TargetLowering::DAGCombinerInfo &DCI, |
10004 | const ARMSubtarget *Subtarget) { |
10005 | if (Subtarget->isThumb()) { |
10006 | if (!Subtarget->hasDSP()) |
10007 | return SDValue(); |
10008 | } else if (!Subtarget->hasV5TEOps()) |
10009 | return SDValue(); |
10010 | |
10011 | // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and |
10012 | // accumulates the product into a 64-bit value. The 16-bit values will |
10013 | // be sign extended somehow or SRA'd into 32-bit values |
10014 | // (addc (adde (mul 16bit, 16bit), lo), hi) |
10015 | SDValue Mul = AddcNode->getOperand(0); |
10016 | SDValue Lo = AddcNode->getOperand(1); |
10017 | if (Mul.getOpcode() != ISD::MUL) { |
10018 | Lo = AddcNode->getOperand(0); |
10019 | Mul = AddcNode->getOperand(1); |
10020 | if (Mul.getOpcode() != ISD::MUL) |
10021 | return SDValue(); |
10022 | } |
10023 | |
10024 | SDValue SRA = AddeNode->getOperand(0); |
10025 | SDValue Hi = AddeNode->getOperand(1); |
10026 | if (SRA.getOpcode() != ISD::SRA) { |
10027 | SRA = AddeNode->getOperand(1); |
10028 | Hi = AddeNode->getOperand(0); |
10029 | if (SRA.getOpcode() != ISD::SRA) |
10030 | return SDValue(); |
10031 | } |
10032 | if (auto Const = dyn_cast<ConstantSDNode>(SRA.getOperand(1))) { |
10033 | if (Const->getZExtValue() != 31) |
10034 | return SDValue(); |
10035 | } else |
10036 | return SDValue(); |
10037 | |
10038 | if (SRA.getOperand(0) != Mul) |
10039 | return SDValue(); |
10040 | |
10041 | SelectionDAG &DAG = DCI.DAG; |
10042 | SDLoc dl(AddcNode); |
10043 | unsigned Opcode = 0; |
10044 | SDValue Op0; |
10045 | SDValue Op1; |
10046 | |
10047 | if (isS16(Mul.getOperand(0), DAG) && isS16(Mul.getOperand(1), DAG)) { |
10048 | Opcode = ARMISD::SMLALBB; |
10049 | Op0 = Mul.getOperand(0); |
10050 | Op1 = Mul.getOperand(1); |
10051 | } else if (isS16(Mul.getOperand(0), DAG) && isSRA16(Mul.getOperand(1))) { |
10052 | Opcode = ARMISD::SMLALBT; |
10053 | Op0 = Mul.getOperand(0); |
10054 | Op1 = Mul.getOperand(1).getOperand(0); |
10055 | } else if (isSRA16(Mul.getOperand(0)) && isS16(Mul.getOperand(1), DAG)) { |
10056 | Opcode = ARMISD::SMLALTB; |
10057 | Op0 = Mul.getOperand(0).getOperand(0); |
10058 | Op1 = Mul.getOperand(1); |
10059 | } else if (isSRA16(Mul.getOperand(0)) && isSRA16(Mul.getOperand(1))) { |
10060 | Opcode = ARMISD::SMLALTT; |
10061 | Op0 = Mul->getOperand(0).getOperand(0); |
10062 | Op1 = Mul->getOperand(1).getOperand(0); |
10063 | } |
10064 | |
10065 | if (!Op0 || !Op1) |
10066 | return SDValue(); |
10067 | |
10068 | SDValue SMLAL = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), |
10069 | Op0, Op1, Lo, Hi); |
10070 | // Replace the ADDs' nodes uses by the MLA node's values. |
10071 | SDValue HiMLALResult(SMLAL.getNode(), 1); |
10072 | SDValue LoMLALResult(SMLAL.getNode(), 0); |
10073 | |
10074 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult); |
10075 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult); |
10076 | |
10077 | // Return original node to notify the driver to stop replacing. |
10078 | SDValue resNode(AddcNode, 0); |
10079 | return resNode; |
10080 | } |
10081 | |
10082 | static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode, |
10083 | TargetLowering::DAGCombinerInfo &DCI, |
10084 | const ARMSubtarget *Subtarget) { |
10085 | // Look for multiply add opportunities. |
10086 | // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where |
10087 | // each add nodes consumes a value from ISD::UMUL_LOHI and there is |
10088 | // a glue link from the first add to the second add. |
10089 | // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by |
10090 | // a S/UMLAL instruction. |
10091 | // UMUL_LOHI |
10092 | // / :lo \ :hi |
10093 | // V \ [no multiline comment] |
10094 | // loAdd -> ADDC | |
10095 | // \ :carry / |
10096 | // V V |
10097 | // ADDE <- hiAdd |
10098 | // |
10099 | // In the special case where only the higher part of a signed result is used |
10100 | // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts |
10101 | // a constant with the exact value of 0x80000000, we recognize we are dealing |
10102 | // with a "rounded multiply and add" (or subtract) and transform it into |
10103 | // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively. |
10104 | |
10105 | assert((AddeSubeNode->getOpcode() == ARMISD::ADDE ||(static_cast <bool> ((AddeSubeNode->getOpcode() == ARMISD ::ADDE || AddeSubeNode->getOpcode() == ARMISD::SUBE) && "Expect an ADDE or SUBE") ? void (0) : __assert_fail ("(AddeSubeNode->getOpcode() == ARMISD::ADDE || AddeSubeNode->getOpcode() == ARMISD::SUBE) && \"Expect an ADDE or SUBE\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 10107, __extension__ __PRETTY_FUNCTION__)) |
10106 | AddeSubeNode->getOpcode() == ARMISD::SUBE) &&(static_cast <bool> ((AddeSubeNode->getOpcode() == ARMISD ::ADDE || AddeSubeNode->getOpcode() == ARMISD::SUBE) && "Expect an ADDE or SUBE") ? void (0) : __assert_fail ("(AddeSubeNode->getOpcode() == ARMISD::ADDE || AddeSubeNode->getOpcode() == ARMISD::SUBE) && \"Expect an ADDE or SUBE\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 10107, __extension__ __PRETTY_FUNCTION__)) |
10107 | "Expect an ADDE or SUBE")(static_cast <bool> ((AddeSubeNode->getOpcode() == ARMISD ::ADDE || AddeSubeNode->getOpcode() == ARMISD::SUBE) && "Expect an ADDE or SUBE") ? void (0) : __assert_fail ("(AddeSubeNode->getOpcode() == ARMISD::ADDE || AddeSubeNode->getOpcode() == ARMISD::SUBE) && \"Expect an ADDE or SUBE\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 10107, __extension__ __PRETTY_FUNCTION__)); |
10108 | |
10109 | assert(AddeSubeNode->getNumOperands() == 3 &&(static_cast <bool> (AddeSubeNode->getNumOperands() == 3 && AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && "ADDE node has the wrong inputs") ? void (0) : __assert_fail ("AddeSubeNode->getNumOperands() == 3 && AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && \"ADDE node has the wrong inputs\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 10111, __extension__ __PRETTY_FUNCTION__)) |
10110 | AddeSubeNode->getOperand(2).getValueType() == MVT::i32 &&(static_cast <bool> (AddeSubeNode->getNumOperands() == 3 && AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && "ADDE node has the wrong inputs") ? void (0) : __assert_fail ("AddeSubeNode->getNumOperands() == 3 && AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && \"ADDE node has the wrong inputs\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 10111, __extension__ __PRETTY_FUNCTION__)) |
10111 | "ADDE node has the wrong inputs")(static_cast <bool> (AddeSubeNode->getNumOperands() == 3 && AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && "ADDE node has the wrong inputs") ? void (0) : __assert_fail ("AddeSubeNode->getNumOperands() == 3 && AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && \"ADDE node has the wrong inputs\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 10111, __extension__ __PRETTY_FUNCTION__)); |
10112 | |
10113 | // Check that we are chained to the right ADDC or SUBC node. |
10114 | SDNode *AddcSubcNode = AddeSubeNode->getOperand(2).getNode(); |
10115 | if ((AddeSubeNode->getOpcode() == ARMISD::ADDE && |
10116 | AddcSubcNode->getOpcode() != ARMISD::ADDC) || |
10117 | (AddeSubeNode->getOpcode() == ARMISD::SUBE && |
10118 | AddcSubcNode->getOpcode() != ARMISD::SUBC)) |
10119 | return SDValue(); |
10120 | |
10121 | SDValue AddcSubcOp0 = AddcSubcNode->getOperand(0); |
10122 | SDValue AddcSubcOp1 = AddcSubcNode->getOperand(1); |
10123 | |
10124 | // Check if the two operands are from the same mul_lohi node. |
10125 | if (AddcSubcOp0.getNode() == AddcSubcOp1.getNode()) |
10126 | return SDValue(); |
10127 | |
10128 | assert(AddcSubcNode->getNumValues() == 2 &&(static_cast <bool> (AddcSubcNode->getNumValues() == 2 && AddcSubcNode->getValueType(0) == MVT::i32 && "Expect ADDC with two result values. First: i32") ? void (0) : __assert_fail ("AddcSubcNode->getNumValues() == 2 && AddcSubcNode->getValueType(0) == MVT::i32 && \"Expect ADDC with two result values. First: i32\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 10130, __extension__ __PRETTY_FUNCTION__)) |
10129 | AddcSubcNode->getValueType(0) == MVT::i32 &&(static_cast <bool> (AddcSubcNode->getNumValues() == 2 && AddcSubcNode->getValueType(0) == MVT::i32 && "Expect ADDC with two result values. First: i32") ? void (0) : __assert_fail ("AddcSubcNode->getNumValues() == 2 && AddcSubcNode->getValueType(0) == MVT::i32 && \"Expect ADDC with two result values. First: i32\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 10130, __extension__ __PRETTY_FUNCTION__)) |
10130 | "Expect ADDC with two result values. First: i32")(static_cast <bool> (AddcSubcNode->getNumValues() == 2 && AddcSubcNode->getValueType(0) == MVT::i32 && "Expect ADDC with two result values. First: i32") ? void (0) : __assert_fail ("AddcSubcNode->getNumValues() == 2 && AddcSubcNode->getValueType(0) == MVT::i32 && \"Expect ADDC with two result values. First: i32\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 10130, __extension__ __PRETTY_FUNCTION__)); |
10131 | |
10132 | // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it |
10133 | // maybe a SMLAL which multiplies two 16-bit values. |
10134 | if (AddeSubeNode->getOpcode() == ARMISD::ADDE && |
10135 | AddcSubcOp0->getOpcode() != ISD::UMUL_LOHI && |
10136 | AddcSubcOp0->getOpcode() != ISD::SMUL_LOHI && |
10137 | AddcSubcOp1->getOpcode() != ISD::UMUL_LOHI && |
10138 | AddcSubcOp1->getOpcode() != ISD::SMUL_LOHI) |
10139 | return AddCombineTo64BitSMLAL16(AddcSubcNode, AddeSubeNode, DCI, Subtarget); |
10140 | |
10141 | // Check for the triangle shape. |
10142 | SDValue AddeSubeOp0 = AddeSubeNode->getOperand(0); |
10143 | SDValue AddeSubeOp1 = AddeSubeNode->getOperand(1); |
10144 | |
10145 | // Make sure that the ADDE/SUBE operands are not coming from the same node. |
10146 | if (AddeSubeOp0.getNode() == AddeSubeOp1.getNode()) |
10147 | return SDValue(); |
10148 | |
10149 | // Find the MUL_LOHI node walking up ADDE/SUBE's operands. |
10150 | bool IsLeftOperandMUL = false; |
10151 | SDValue MULOp = findMUL_LOHI(AddeSubeOp0); |
10152 | if (MULOp == SDValue()) |
10153 | MULOp = findMUL_LOHI(AddeSubeOp1); |
10154 | else |
10155 | IsLeftOperandMUL = true; |
10156 | if (MULOp == SDValue()) |
10157 | return SDValue(); |
10158 | |
10159 | // Figure out the right opcode. |
10160 | unsigned Opc = MULOp->getOpcode(); |
10161 | unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; |
10162 | |
10163 | // Figure out the high and low input values to the MLAL node. |
10164 | SDValue *HiAddSub = nullptr; |
10165 | SDValue *LoMul = nullptr; |
10166 | SDValue *LowAddSub = nullptr; |
10167 | |
10168 | // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI. |
10169 | if ((AddeSubeOp0 != MULOp.getValue(1)) && (AddeSubeOp1 != MULOp.getValue(1))) |
10170 | return SDValue(); |
10171 | |
10172 | if (IsLeftOperandMUL) |
10173 | HiAddSub = &AddeSubeOp1; |
10174 | else |
10175 | HiAddSub = &AddeSubeOp0; |
10176 | |
10177 | // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node |
10178 | // whose low result is fed to the ADDC/SUBC we are checking. |
10179 | |
10180 | if (AddcSubcOp0 == MULOp.getValue(0)) { |
10181 | LoMul = &AddcSubcOp0; |
10182 | LowAddSub = &AddcSubcOp1; |
10183 | } |
10184 | if (AddcSubcOp1 == MULOp.getValue(0)) { |
10185 | LoMul = &AddcSubcOp1; |
10186 | LowAddSub = &AddcSubcOp0; |
10187 | } |
10188 | |
10189 | if (!LoMul) |
10190 | return SDValue(); |
10191 | |
10192 | // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC |
10193 | // the replacement below will create a cycle. |
10194 | if (AddcSubcNode == HiAddSub->getNode() || |
10195 | AddcSubcNode->isPredecessorOf(HiAddSub->getNode())) |
10196 | return SDValue(); |
10197 | |
10198 | // Create the merged node. |
10199 | SelectionDAG &DAG = DCI.DAG; |
10200 | |
10201 | // Start building operand list. |
10202 | SmallVector<SDValue, 8> Ops; |
10203 | Ops.push_back(LoMul->getOperand(0)); |
10204 | Ops.push_back(LoMul->getOperand(1)); |
10205 | |
10206 | // Check whether we can use SMMLAR, SMMLSR or SMMULR instead. For this to be |
10207 | // the case, we must be doing signed multiplication and only use the higher |
10208 | // part of the result of the MLAL, furthermore the LowAddSub must be a constant |
10209 | // addition or subtraction with the value of 0x800000. |
10210 | if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() && |
10211 | FinalOpc == ARMISD::SMLAL && !AddeSubeNode->hasAnyUseOfValue(1) && |
10212 | LowAddSub->getNode()->getOpcode() == ISD::Constant && |
10213 | static_cast<ConstantSDNode *>(LowAddSub->getNode())->getZExtValue() == |
10214 | 0x80000000) { |
10215 | Ops.push_back(*HiAddSub); |
10216 | if (AddcSubcNode->getOpcode() == ARMISD::SUBC) { |
10217 | FinalOpc = ARMISD::SMMLSR; |
10218 | } else { |
10219 | FinalOpc = ARMISD::SMMLAR; |
10220 | } |
10221 | SDValue NewNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), MVT::i32, Ops); |
10222 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), NewNode); |
10223 | |
10224 | return SDValue(AddeSubeNode, 0); |
10225 | } else if (AddcSubcNode->getOpcode() == ARMISD::SUBC) |
10226 | // SMMLS is generated during instruction selection and the rest of this |
10227 | // function can not handle the case where AddcSubcNode is a SUBC. |
10228 | return SDValue(); |
10229 | |
10230 | // Finish building the operand list for {U/S}MLAL |
10231 | Ops.push_back(*LowAddSub); |
10232 | Ops.push_back(*HiAddSub); |
10233 | |
10234 | SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), |
10235 | DAG.getVTList(MVT::i32, MVT::i32), Ops); |
10236 | |
10237 | // Replace the ADDs' nodes uses by the MLA node's values. |
10238 | SDValue HiMLALResult(MLALNode.getNode(), 1); |
10239 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), HiMLALResult); |
10240 | |
10241 | SDValue LoMLALResult(MLALNode.getNode(), 0); |
10242 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddcSubcNode, 0), LoMLALResult); |
10243 | |
10244 | // Return original node to notify the driver to stop replacing. |
10245 | return SDValue(AddeSubeNode, 0); |
10246 | } |
10247 | |
10248 | static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode, |
10249 | TargetLowering::DAGCombinerInfo &DCI, |
10250 | const ARMSubtarget *Subtarget) { |
10251 | // UMAAL is similar to UMLAL except that it adds two unsigned values. |
10252 | // While trying to combine for the other MLAL nodes, first search for the |
10253 | // chance to use UMAAL. Check if Addc uses a node which has already |
10254 | // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde |
10255 | // as the addend, and it's handled in PerformUMLALCombine. |
10256 | |
10257 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) |
10258 | return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget); |
10259 | |
10260 | // Check that we have a glued ADDC node. |
10261 | SDNode* AddcNode = AddeNode->getOperand(2).getNode(); |
10262 | if (AddcNode->getOpcode() != ARMISD::ADDC) |
10263 | return SDValue(); |
10264 | |
10265 | // Find the converted UMAAL or quit if it doesn't exist. |
10266 | SDNode *UmlalNode = nullptr; |
10267 | SDValue AddHi; |
10268 | if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) { |
10269 | UmlalNode = AddcNode->getOperand(0).getNode(); |
10270 | AddHi = AddcNode->getOperand(1); |
10271 | } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) { |
10272 | UmlalNode = AddcNode->getOperand(1).getNode(); |
10273 | AddHi = AddcNode->getOperand(0); |
10274 | } else { |
10275 | return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget); |
10276 | } |
10277 | |
10278 | // The ADDC should be glued to an ADDE node, which uses the same UMLAL as |
10279 | // the ADDC as well as Zero. |
10280 | if (!isNullConstant(UmlalNode->getOperand(3))) |
10281 | return SDValue(); |
10282 | |
10283 | if ((isNullConstant(AddeNode->getOperand(0)) && |
10284 | AddeNode->getOperand(1).getNode() == UmlalNode) || |
10285 | (AddeNode->getOperand(0).getNode() == UmlalNode && |
10286 | isNullConstant(AddeNode->getOperand(1)))) { |
10287 | SelectionDAG &DAG = DCI.DAG; |
10288 | SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1), |
10289 | UmlalNode->getOperand(2), AddHi }; |
10290 | SDValue UMAAL = DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode), |
10291 | DAG.getVTList(MVT::i32, MVT::i32), Ops); |
10292 | |
10293 | // Replace the ADDs' nodes uses by the UMAAL node's values. |
10294 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1)); |
10295 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0)); |
10296 | |
10297 | // Return original node to notify the driver to stop replacing. |
10298 | return SDValue(AddeNode, 0); |
10299 | } |
10300 | return SDValue(); |
10301 | } |
10302 | |
10303 | static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG, |
10304 | const ARMSubtarget *Subtarget) { |
10305 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) |
10306 | return SDValue(); |
10307 | |
10308 | // Check that we have a pair of ADDC and ADDE as operands. |
10309 | // Both addends of the ADDE must be zero. |
10310 | SDNode* AddcNode = N->getOperand(2).getNode(); |
10311 | SDNode* AddeNode = N->getOperand(3).getNode(); |
10312 | if ((AddcNode->getOpcode() == ARMISD::ADDC) && |
10313 | (AddeNode->getOpcode() == ARMISD::ADDE) && |
10314 | isNullConstant(AddeNode->getOperand(0)) && |
10315 | isNullConstant(AddeNode->getOperand(1)) && |
10316 | (AddeNode->getOperand(2).getNode() == AddcNode)) |
10317 | return DAG.getNode(ARMISD::UMAAL, SDLoc(N), |
10318 | DAG.getVTList(MVT::i32, MVT::i32), |
10319 | {N->getOperand(0), N->getOperand(1), |
10320 | AddcNode->getOperand(0), AddcNode->getOperand(1)}); |
10321 | else |
10322 | return SDValue(); |
10323 | } |
10324 | |
10325 | static SDValue PerformAddcSubcCombine(SDNode *N, |
10326 | TargetLowering::DAGCombinerInfo &DCI, |
10327 | const ARMSubtarget *Subtarget) { |
10328 | SelectionDAG &DAG(DCI.DAG); |
10329 | |
10330 | if (N->getOpcode() == ARMISD::SUBC) { |
10331 | // (SUBC (ADDE 0, 0, C), 1) -> C |
10332 | SDValue LHS = N->getOperand(0); |
10333 | SDValue RHS = N->getOperand(1); |
10334 | if (LHS->getOpcode() == ARMISD::ADDE && |
10335 | isNullConstant(LHS->getOperand(0)) && |
10336 | isNullConstant(LHS->getOperand(1)) && isOneConstant(RHS)) { |
10337 | return DCI.CombineTo(N, SDValue(N, 0), LHS->getOperand(2)); |
10338 | } |
10339 | } |
10340 | |
10341 | if (Subtarget->isThumb1Only()) { |
10342 | SDValue RHS = N->getOperand(1); |
10343 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { |
10344 | int32_t imm = C->getSExtValue(); |
10345 | if (imm < 0 && imm > std::numeric_limits<int>::min()) { |
10346 | SDLoc DL(N); |
10347 | RHS = DAG.getConstant(-imm, DL, MVT::i32); |
10348 | unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC |
10349 | : ARMISD::ADDC; |
10350 | return DAG.getNode(Opcode, DL, N->getVTList(), N->getOperand(0), RHS); |
10351 | } |
10352 | } |
10353 | } |
10354 | return SDValue(); |
10355 | } |
10356 | |
10357 | static SDValue PerformAddeSubeCombine(SDNode *N, |
10358 | TargetLowering::DAGCombinerInfo &DCI, |
10359 | const ARMSubtarget *Subtarget) { |
10360 | if (Subtarget->isThumb1Only()) { |
10361 | SelectionDAG &DAG = DCI.DAG; |
10362 | SDValue RHS = N->getOperand(1); |
10363 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { |
10364 | int64_t imm = C->getSExtValue(); |
10365 | if (imm < 0) { |
10366 | SDLoc DL(N); |
10367 | |
10368 | // The with-carry-in form matches bitwise not instead of the negation. |
10369 | // Effectively, the inverse interpretation of the carry flag already |
10370 | // accounts for part of the negation. |
10371 | RHS = DAG.getConstant(~imm, DL, MVT::i32); |
10372 | |
10373 | unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE |
10374 | : ARMISD::ADDE; |
10375 | return DAG.getNode(Opcode, DL, N->getVTList(), |
10376 | N->getOperand(0), RHS, N->getOperand(2)); |
10377 | } |
10378 | } |
10379 | } else if (N->getOperand(1)->getOpcode() == ISD::SMUL_LOHI) { |
10380 | return AddCombineTo64bitMLAL(N, DCI, Subtarget); |
10381 | } |
10382 | return SDValue(); |
10383 | } |
10384 | |
10385 | /// PerformADDECombine - Target-specific dag combine transform from |
10386 | /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or |
10387 | /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL |
10388 | static SDValue PerformADDECombine(SDNode *N, |
10389 | TargetLowering::DAGCombinerInfo &DCI, |
10390 | const ARMSubtarget *Subtarget) { |
10391 | // Only ARM and Thumb2 support UMLAL/SMLAL. |
10392 | if (Subtarget->isThumb1Only()) |
10393 | return PerformAddeSubeCombine(N, DCI, Subtarget); |
10394 | |
10395 | // Only perform the checks after legalize when the pattern is available. |
10396 | if (DCI.isBeforeLegalize()) return SDValue(); |
10397 | |
10398 | return AddCombineTo64bitUMAAL(N, DCI, Subtarget); |
10399 | } |
10400 | |
10401 | /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with |
10402 | /// operands N0 and N1. This is a helper for PerformADDCombine that is |
10403 | /// called with the default operands, and if that fails, with commuted |
10404 | /// operands. |
10405 | static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, |
10406 | TargetLowering::DAGCombinerInfo &DCI, |
10407 | const ARMSubtarget *Subtarget){ |
10408 | // Attempt to create vpadd for this add. |
10409 | if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget)) |
10410 | return Result; |
10411 | |
10412 | // Attempt to create vpaddl for this add. |
10413 | if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget)) |
10414 | return Result; |
10415 | if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI, |
10416 | Subtarget)) |
10417 | return Result; |
10418 | |
10419 | // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) |
10420 | if (N0.getNode()->hasOneUse()) |
10421 | if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI)) |
10422 | return Result; |
10423 | return SDValue(); |
10424 | } |
10425 | |
10426 | static SDValue PerformSHLSimplify(SDNode *N, |
10427 | TargetLowering::DAGCombinerInfo &DCI, |
10428 | const ARMSubtarget *ST) { |
10429 | // Allow the generic combiner to identify potential bswaps. |
10430 | if (DCI.isBeforeLegalize()) |
10431 | return SDValue(); |
10432 | |
10433 | // DAG combiner will fold: |
10434 | // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) |
10435 | // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2 |
10436 | // Other code patterns that can be also be modified have the following form: |
10437 | // b + ((a << 1) | 510) |
10438 | // b + ((a << 1) & 510) |
10439 | // b + ((a << 1) ^ 510) |
10440 | // b + ((a << 1) + 510) |
10441 | |
10442 | // Many instructions can perform the shift for free, but it requires both |
10443 | // the operands to be registers. If c1 << c2 is too large, a mov immediate |
10444 | // instruction will needed. So, unfold back to the original pattern if: |
10445 | // - if c1 and c2 are small enough that they don't require mov imms. |
10446 | // - the user(s) of the node can perform an shl |
10447 | |
10448 | // No shifted operands for 16-bit instructions. |
10449 | if (ST->isThumb() && ST->isThumb1Only()) |
10450 | return SDValue(); |
10451 | |
10452 | // Check that all the users could perform the shl themselves. |
10453 | for (auto U : N->uses()) { |
10454 | switch(U->getOpcode()) { |
10455 | default: |
10456 | return SDValue(); |
10457 | case ISD::SUB: |
10458 | case ISD::ADD: |
10459 | case ISD::AND: |
10460 | case ISD::OR: |
10461 | case ISD::XOR: |
10462 | case ISD::SETCC: |
10463 | case ARMISD::CMP: |
10464 | // Check that the user isn't already using a constant because there |
10465 | // aren't any instructions that support an immediate operand and a |
10466 | // shifted operand. |
10467 | if (isa<ConstantSDNode>(U->getOperand(0)) || |
10468 | isa<ConstantSDNode>(U->getOperand(1))) |
10469 | return SDValue(); |
10470 | |
10471 | // Check that it's not already using a shift. |
10472 | if (U->getOperand(0).getOpcode() == ISD::SHL || |
10473 | U->getOperand(1).getOpcode() == ISD::SHL) |
10474 | return SDValue(); |
10475 | break; |
10476 | } |
10477 | } |
10478 | |
10479 | if (N->getOpcode() != ISD::ADD && N->getOpcode() != ISD::OR && |
10480 | N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND) |
10481 | return SDValue(); |
10482 | |
10483 | if (N->getOperand(0).getOpcode() != ISD::SHL) |
10484 | return SDValue(); |
10485 | |
10486 | SDValue SHL = N->getOperand(0); |
10487 | |
10488 | auto *C1ShlC2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
10489 | auto *C2 = dyn_cast<ConstantSDNode>(SHL.getOperand(1)); |
10490 | if (!C1ShlC2 || !C2) |
10491 | return SDValue(); |
10492 | |
10493 | APInt C2Int = C2->getAPIntValue(); |
10494 | APInt C1Int = C1ShlC2->getAPIntValue(); |
10495 | |
10496 | // Check that performing a lshr will not lose any information. |
10497 | APInt Mask = APInt::getHighBitsSet(C2Int.getBitWidth(), |
10498 | C2Int.getBitWidth() - C2->getZExtValue()); |
10499 | if ((C1Int & Mask) != C1Int) |
10500 | return SDValue(); |
10501 | |
10502 | // Shift the first constant. |
10503 | C1Int.lshrInPlace(C2Int); |
10504 | |
10505 | // The immediates are encoded as an 8-bit value that can be rotated. |
10506 | auto LargeImm = [](const APInt &Imm) { |
10507 | unsigned Zeros = Imm.countLeadingZeros() + Imm.countTrailingZeros(); |
10508 | return Imm.getBitWidth() - Zeros > 8; |
10509 | }; |
10510 | |
10511 | if (LargeImm(C1Int) || LargeImm(C2Int)) |
10512 | return SDValue(); |
10513 | |
10514 | SelectionDAG &DAG = DCI.DAG; |
10515 | SDLoc dl(N); |
10516 | SDValue X = SHL.getOperand(0); |
10517 | SDValue BinOp = DAG.getNode(N->getOpcode(), dl, MVT::i32, X, |
10518 | DAG.getConstant(C1Int, dl, MVT::i32)); |
10519 | // Shift left to compensate for the lshr of C1Int. |
10520 | SDValue Res = DAG.getNode(ISD::SHL, dl, MVT::i32, BinOp, SHL.getOperand(1)); |
10521 | |
10522 | DEBUG(dbgs() << "Simplify shl use:\n"; SHL.getOperand(0).dump(); SHL.dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("arm-isel")) { dbgs() << "Simplify shl use:\n"; SHL.getOperand (0).dump(); SHL.dump(); N->dump(); } } while (false) |
10523 | N->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("arm-isel")) { dbgs() << "Simplify shl use:\n"; SHL.getOperand (0).dump(); SHL.dump(); N->dump(); } } while (false); |
10524 | DEBUG(dbgs() << "Into:\n"; X.dump(); BinOp.dump(); Res.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("arm-isel")) { dbgs() << "Into:\n"; X.dump(); BinOp.dump (); Res.dump(); } } while (false); |
10525 | |
10526 | DAG.ReplaceAllUsesWith(SDValue(N, 0), Res); |
10527 | return SDValue(N, 0); |
10528 | } |
10529 | |
10530 | |
10531 | /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. |
10532 | /// |
10533 | static SDValue PerformADDCombine(SDNode *N, |
10534 | TargetLowering::DAGCombinerInfo &DCI, |
10535 | const ARMSubtarget *Subtarget) { |
10536 | SDValue N0 = N->getOperand(0); |
10537 | SDValue N1 = N->getOperand(1); |
10538 | |
10539 | // Only works one way, because it needs an immediate operand. |
10540 | if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) |
10541 | return Result; |
10542 | |
10543 | // First try with the default operand order. |
10544 | if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget)) |
10545 | return Result; |
10546 | |
10547 | // If that didn't work, try again with the operands commuted. |
10548 | return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); |
10549 | } |
10550 | |
10551 | /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. |
10552 | /// |
10553 | static SDValue PerformSUBCombine(SDNode *N, |
10554 | TargetLowering::DAGCombinerInfo &DCI) { |
10555 | SDValue N0 = N->getOperand(0); |
10556 | SDValue N1 = N->getOperand(1); |
10557 | |
10558 | // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) |
10559 | if (N1.getNode()->hasOneUse()) |
10560 | if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI)) |
10561 | return Result; |
10562 | |
10563 | return SDValue(); |
10564 | } |
10565 | |
10566 | /// PerformVMULCombine |
10567 | /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the |
10568 | /// special multiplier accumulator forwarding. |
10569 | /// vmul d3, d0, d2 |
10570 | /// vmla d3, d1, d2 |
10571 | /// is faster than |
10572 | /// vadd d3, d0, d1 |
10573 | /// vmul d3, d3, d2 |
10574 | // However, for (A + B) * (A + B), |
10575 | // vadd d2, d0, d1 |
10576 | // vmul d3, d0, d2 |
10577 | // vmla d3, d1, d2 |
10578 | // is slower than |
10579 | // vadd d2, d0, d1 |
10580 | // vmul d3, d2, d2 |
10581 | static SDValue PerformVMULCombine(SDNode *N, |
10582 | TargetLowering::DAGCombinerInfo &DCI, |
10583 | const ARMSubtarget *Subtarget) { |
10584 | if (!Subtarget->hasVMLxForwarding()) |
10585 | return SDValue(); |
10586 | |
10587 | SelectionDAG &DAG = DCI.DAG; |
10588 | SDValue N0 = N->getOperand(0); |
10589 | SDValue N1 = N->getOperand(1); |
10590 | unsigned Opcode = N0.getOpcode(); |
10591 | if (Opcode != ISD::ADD && Opcode != ISD::SUB && |
10592 | Opcode != ISD::FADD && Opcode != ISD::FSUB) { |
10593 | Opcode = N1.getOpcode(); |
10594 | if (Opcode != ISD::ADD && Opcode != ISD::SUB && |
10595 | Opcode != ISD::FADD && Opcode != ISD::FSUB) |
10596 | return SDValue(); |
10597 | std::swap(N0, N1); |
10598 | } |
10599 | |
10600 | if (N0 == N1) |
10601 | return SDValue(); |
10602 | |
10603 | EVT VT = N->getValueType(0); |
10604 | SDLoc DL(N); |
10605 | SDValue N00 = N0->getOperand(0); |
10606 | SDValue N01 = N0->getOperand(1); |
10607 | return DAG.getNode(Opcode, DL, VT, |
10608 | DAG.getNode(ISD::MUL, DL, VT, N00, N1), |
10609 | DAG.getNode(ISD::MUL, DL, VT, N01, N1)); |
10610 | } |
10611 | |
10612 | static SDValue PerformMULCombine(SDNode *N, |
10613 | TargetLowering::DAGCombinerInfo &DCI, |
10614 | const ARMSubtarget *Subtarget) { |
10615 | SelectionDAG &DAG = DCI.DAG; |
10616 | |
10617 | if (Subtarget->isThumb1Only()) |
10618 | return SDValue(); |
10619 | |
10620 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
10621 | return SDValue(); |
10622 | |
10623 | EVT VT = N->getValueType(0); |
10624 | if (VT.is64BitVector() || VT.is128BitVector()) |
10625 | return PerformVMULCombine(N, DCI, Subtarget); |
10626 | if (VT != MVT::i32) |
10627 | return SDValue(); |
10628 | |
10629 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
10630 | if (!C) |
10631 | return SDValue(); |
10632 | |
10633 | int64_t MulAmt = C->getSExtValue(); |
10634 | unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt); |
10635 | |
10636 | ShiftAmt = ShiftAmt & (32 - 1); |
10637 | SDValue V = N->getOperand(0); |
10638 | SDLoc DL(N); |
10639 | |
10640 | SDValue Res; |
10641 | MulAmt >>= ShiftAmt; |
10642 | |
10643 | if (MulAmt >= 0) { |
10644 | if (isPowerOf2_32(MulAmt - 1)) { |
10645 | // (mul x, 2^N + 1) => (add (shl x, N), x) |
10646 | Res = DAG.getNode(ISD::ADD, DL, VT, |
10647 | V, |
10648 | DAG.getNode(ISD::SHL, DL, VT, |
10649 | V, |
10650 | DAG.getConstant(Log2_32(MulAmt - 1), DL, |
10651 | MVT::i32))); |
10652 | } else if (isPowerOf2_32(MulAmt + 1)) { |
10653 | // (mul x, 2^N - 1) => (sub (shl x, N), x) |
10654 | Res = DAG.getNode(ISD::SUB, DL, VT, |
10655 | DAG.getNode(ISD::SHL, DL, VT, |
10656 | V, |
10657 | DAG.getConstant(Log2_32(MulAmt + 1), DL, |
10658 | MVT::i32)), |
10659 | V); |
10660 | } else |
10661 | return SDValue(); |
10662 | } else { |
10663 | uint64_t MulAmtAbs = -MulAmt; |
10664 | if (isPowerOf2_32(MulAmtAbs + 1)) { |
10665 | // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) |
10666 | Res = DAG.getNode(ISD::SUB, DL, VT, |
10667 | V, |
10668 | DAG.getNode(ISD::SHL, DL, VT, |
10669 | V, |
10670 | DAG.getConstant(Log2_32(MulAmtAbs + 1), DL, |
10671 | MVT::i32))); |
10672 | } else if (isPowerOf2_32(MulAmtAbs - 1)) { |
10673 | // (mul x, -(2^N + 1)) => - (add (shl x, N), x) |
10674 | Res = DAG.getNode(ISD::ADD, DL, VT, |
10675 | V, |
10676 | DAG.getNode(ISD::SHL, DL, VT, |
10677 | V, |
10678 | DAG.getConstant(Log2_32(MulAmtAbs - 1), DL, |
10679 | MVT::i32))); |
10680 | Res = DAG.getNode(ISD::SUB, DL, VT, |
10681 | DAG.getConstant(0, DL, MVT::i32), Res); |
10682 | } else |
10683 | return SDValue(); |
10684 | } |
10685 | |
10686 | if (ShiftAmt != 0) |
10687 | Res = DAG.getNode(ISD::SHL, DL, VT, |
10688 | Res, DAG.getConstant(ShiftAmt, DL, MVT::i32)); |
10689 | |
10690 | // Do not add new nodes to DAG combiner worklist. |
10691 | DCI.CombineTo(N, Res, false); |
10692 | return SDValue(); |
10693 | } |
10694 | |
10695 | static SDValue PerformANDCombine(SDNode *N, |
10696 | TargetLowering::DAGCombinerInfo &DCI, |
10697 | const ARMSubtarget *Subtarget) { |
10698 | // Attempt to use immediate-form VBIC |
10699 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); |
10700 | SDLoc dl(N); |
10701 | EVT VT = N->getValueType(0); |
10702 | SelectionDAG &DAG = DCI.DAG; |
10703 | |
10704 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
10705 | return SDValue(); |
10706 | |
10707 | APInt SplatBits, SplatUndef; |
10708 | unsigned SplatBitSize; |
10709 | bool HasAnyUndefs; |
10710 | if (BVN && |
10711 | BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
10712 | if (SplatBitSize <= 64) { |
10713 | EVT VbicVT; |
10714 | SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), |
10715 | SplatUndef.getZExtValue(), SplatBitSize, |
10716 | DAG, dl, VbicVT, VT.is128BitVector(), |
10717 | OtherModImm); |
10718 | if (Val.getNode()) { |
10719 | SDValue Input = |
10720 | DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); |
10721 | SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); |
10722 | return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); |
10723 | } |
10724 | } |
10725 | } |
10726 | |
10727 | if (!Subtarget->isThumb1Only()) { |
10728 | // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) |
10729 | if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI)) |
10730 | return Result; |
10731 | |
10732 | if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) |
10733 | return Result; |
10734 | } |
10735 | |
10736 | return SDValue(); |
10737 | } |
10738 | |
10739 | // Try combining OR nodes to SMULWB, SMULWT. |
10740 | static SDValue PerformORCombineToSMULWBT(SDNode *OR, |
10741 | TargetLowering::DAGCombinerInfo &DCI, |
10742 | const ARMSubtarget *Subtarget) { |
10743 | if (!Subtarget->hasV6Ops() || |
10744 | (Subtarget->isThumb() && |
10745 | (!Subtarget->hasThumb2() || !Subtarget->hasDSP()))) |
10746 | return SDValue(); |
10747 | |
10748 | SDValue SRL = OR->getOperand(0); |
10749 | SDValue SHL = OR->getOperand(1); |
10750 | |
10751 | if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) { |
10752 | SRL = OR->getOperand(1); |
10753 | SHL = OR->getOperand(0); |
10754 | } |
10755 | if (!isSRL16(SRL) || !isSHL16(SHL)) |
10756 | return SDValue(); |
10757 | |
10758 | // The first operands to the shifts need to be the two results from the |
10759 | // same smul_lohi node. |
10760 | if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) || |
10761 | SRL.getOperand(0).getOpcode() != ISD::SMUL_LOHI) |
10762 | return SDValue(); |
10763 | |
10764 | SDNode *SMULLOHI = SRL.getOperand(0).getNode(); |
10765 | if (SRL.getOperand(0) != SDValue(SMULLOHI, 0) || |
10766 | SHL.getOperand(0) != SDValue(SMULLOHI, 1)) |
10767 | return SDValue(); |
10768 | |
10769 | // Now we have: |
10770 | // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16))) |
10771 | // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments. |
10772 | // For SMUWB the 16-bit value will signed extended somehow. |
10773 | // For SMULWT only the SRA is required. |
10774 | // Check both sides of SMUL_LOHI |
10775 | SDValue OpS16 = SMULLOHI->getOperand(0); |
10776 | SDValue OpS32 = SMULLOHI->getOperand(1); |
10777 | |
10778 | SelectionDAG &DAG = DCI.DAG; |
10779 | if (!isS16(OpS16, DAG) && !isSRA16(OpS16)) { |
10780 | OpS16 = OpS32; |
10781 | OpS32 = SMULLOHI->getOperand(0); |
10782 | } |
10783 | |
10784 | SDLoc dl(OR); |
10785 | unsigned Opcode = 0; |
10786 | if (isS16(OpS16, DAG)) |
10787 | Opcode = ARMISD::SMULWB; |
10788 | else if (isSRA16(OpS16)) { |
10789 | Opcode = ARMISD::SMULWT; |
10790 | OpS16 = OpS16->getOperand(0); |
10791 | } |
10792 | else |
10793 | return SDValue(); |
10794 | |
10795 | SDValue Res = DAG.getNode(Opcode, dl, MVT::i32, OpS32, OpS16); |
10796 | DAG.ReplaceAllUsesOfValueWith(SDValue(OR, 0), Res); |
10797 | return SDValue(OR, 0); |
10798 | } |
10799 | |
10800 | static SDValue PerformORCombineToBFI(SDNode *N, |
10801 | TargetLowering::DAGCombinerInfo &DCI, |
10802 | const ARMSubtarget *Subtarget) { |
10803 | // BFI is only available on V6T2+ |
10804 | if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) |
10805 | return SDValue(); |
10806 | |
10807 | EVT VT = N->getValueType(0); |
10808 | SDValue N0 = N->getOperand(0); |
10809 | SDValue N1 = N->getOperand(1); |
10810 | SelectionDAG &DAG = DCI.DAG; |
10811 | SDLoc DL(N); |
10812 | // 1) or (and A, mask), val => ARMbfi A, val, mask |
10813 | // iff (val & mask) == val |
10814 | // |
10815 | // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask |
10816 | // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) |
10817 | // && mask == ~mask2 |
10818 | // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) |
10819 | // && ~mask == mask2 |
10820 | // (i.e., copy a bitfield value into another bitfield of the same width) |
10821 | |
10822 | if (VT != MVT::i32) |
10823 | return SDValue(); |
10824 | |
10825 | SDValue N00 = N0.getOperand(0); |
10826 | |
10827 | // The value and the mask need to be constants so we can verify this is |
10828 | // actually a bitfield set. If the mask is 0xffff, we can do better |
10829 | // via a movt instruction, so don't use BFI in that case. |
10830 | SDValue MaskOp = N0.getOperand(1); |
10831 | ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); |
10832 | if (!MaskC) |
10833 | return SDValue(); |
10834 | unsigned Mask = MaskC->getZExtValue(); |
10835 | if (Mask == 0xffff) |
10836 | return SDValue(); |
10837 | SDValue Res; |
10838 | // Case (1): or (and A, mask), val => ARMbfi A, val, mask |
10839 | ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); |
10840 | if (N1C) { |
10841 | unsigned Val = N1C->getZExtValue(); |
10842 | if ((Val & ~Mask) != Val) |
10843 | return SDValue(); |
10844 | |
10845 | if (ARM::isBitFieldInvertedMask(Mask)) { |
10846 | Val >>= countTrailingZeros(~Mask); |
10847 | |
10848 | Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, |
10849 | DAG.getConstant(Val, DL, MVT::i32), |
10850 | DAG.getConstant(Mask, DL, MVT::i32)); |
10851 | |
10852 | DCI.CombineTo(N, Res, false); |
10853 | // Return value from the original node to inform the combiner than N is |
10854 | // now dead. |
10855 | return SDValue(N, 0); |
10856 | } |
10857 | } else if (N1.getOpcode() == ISD::AND) { |
10858 | // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask |
10859 | ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); |
10860 | if (!N11C) |
10861 | return SDValue(); |
10862 | unsigned Mask2 = N11C->getZExtValue(); |
10863 | |
10864 | // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern |
10865 | // as is to match. |
10866 | if (ARM::isBitFieldInvertedMask(Mask) && |
10867 | (Mask == ~Mask2)) { |
10868 | // The pack halfword instruction works better for masks that fit it, |
10869 | // so use that when it's available. |
10870 | if (Subtarget->hasDSP() && |
10871 | (Mask == 0xffff || Mask == 0xffff0000)) |
10872 | return SDValue(); |
10873 | // 2a |
10874 | unsigned amt = countTrailingZeros(Mask2); |
10875 | Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), |
10876 | DAG.getConstant(amt, DL, MVT::i32)); |
10877 | Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, |
10878 | DAG.getConstant(Mask, DL, MVT::i32)); |
10879 | DCI.CombineTo(N, Res, false); |
10880 | // Return value from the original node to inform the combiner than N is |
10881 | // now dead. |
10882 | return SDValue(N, 0); |
10883 | } else if (ARM::isBitFieldInvertedMask(~Mask) && |
10884 | (~Mask == Mask2)) { |
10885 | // The pack halfword instruction works better for masks that fit it, |
10886 | // so use that when it's available. |
10887 | if (Subtarget->hasDSP() && |
10888 | (Mask2 == 0xffff || Mask2 == 0xffff0000)) |
10889 | return SDValue(); |
10890 | // 2b |
10891 | unsigned lsb = countTrailingZeros(Mask); |
10892 | Res = DAG.getNode(ISD::SRL, DL, VT, N00, |
10893 | DAG.getConstant(lsb, DL, MVT::i32)); |
10894 | Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, |
10895 | DAG.getConstant(Mask2, DL, MVT::i32)); |
10896 | DCI.CombineTo(N, Res, false); |
10897 | // Return value from the original node to inform the combiner than N is |
10898 | // now dead. |
10899 | return SDValue(N, 0); |
10900 | } |
10901 | } |
10902 | |
10903 | if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && |
10904 | N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && |
10905 | ARM::isBitFieldInvertedMask(~Mask)) { |
10906 | // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask |
10907 | // where lsb(mask) == #shamt and masked bits of B are known zero. |
10908 | SDValue ShAmt = N00.getOperand(1); |
10909 | unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); |
10910 | unsigned LSB = countTrailingZeros(Mask); |
10911 | if (ShAmtC != LSB) |
10912 | return SDValue(); |
10913 | |
10914 | Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), |
10915 | DAG.getConstant(~Mask, DL, MVT::i32)); |
10916 | |
10917 | DCI.CombineTo(N, Res, false); |
10918 | // Return value from the original node to inform the combiner than N is |
10919 | // now dead. |
10920 | return SDValue(N, 0); |
10921 | } |
10922 | |
10923 | return SDValue(); |
10924 | } |
10925 | |
10926 | /// PerformORCombine - Target-specific dag combine xforms for ISD::OR |
10927 | static SDValue PerformORCombine(SDNode *N, |
10928 | TargetLowering::DAGCombinerInfo &DCI, |
10929 | const ARMSubtarget *Subtarget) { |
10930 | // Attempt to use immediate-form VORR |
10931 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); |
10932 | SDLoc dl(N); |
10933 | EVT VT = N->getValueType(0); |
10934 | SelectionDAG &DAG = DCI.DAG; |
10935 | |
10936 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
10937 | return SDValue(); |
10938 | |
10939 | APInt SplatBits, SplatUndef; |
10940 | unsigned SplatBitSize; |
10941 | bool HasAnyUndefs; |
10942 | if (BVN && Subtarget->hasNEON() && |
10943 | BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
10944 | if (SplatBitSize <= 64) { |
10945 | EVT VorrVT; |
10946 | SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), |
10947 | SplatUndef.getZExtValue(), SplatBitSize, |
10948 | DAG, dl, VorrVT, VT.is128BitVector(), |
10949 | OtherModImm); |
10950 | if (Val.getNode()) { |
10951 | SDValue Input = |
10952 | DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); |
10953 | SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); |
10954 | return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); |
10955 | } |
10956 | } |
10957 | } |
10958 | |
10959 | if (!Subtarget->isThumb1Only()) { |
10960 | // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) |
10961 | if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) |
10962 | return Result; |
10963 | if (SDValue Result = PerformORCombineToSMULWBT(N, DCI, Subtarget)) |
10964 | return Result; |
10965 | } |
10966 | |
10967 | SDValue N0 = N->getOperand(0); |
10968 | SDValue N1 = N->getOperand(1); |
10969 | |
10970 | // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. |
10971 | if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && |
10972 | DAG.getTargetLoweringInfo().isTypeLegal(VT)) { |
10973 | |
10974 | // The code below optimizes (or (and X, Y), Z). |
10975 | // The AND operand needs to have a single user to make these optimizations |
10976 | // profitable. |
10977 | if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) |
10978 | return SDValue(); |
10979 | |
10980 | APInt SplatUndef; |
10981 | unsigned SplatBitSize; |
10982 | bool HasAnyUndefs; |
10983 | |
10984 | APInt SplatBits0, SplatBits1; |
10985 | BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); |
10986 | BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); |
10987 | // Ensure that the second operand of both ands are constants |
10988 | if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, |
10989 | HasAnyUndefs) && !HasAnyUndefs) { |
10990 | if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, |
10991 | HasAnyUndefs) && !HasAnyUndefs) { |
10992 | // Ensure that the bit width of the constants are the same and that |
10993 | // the splat arguments are logical inverses as per the pattern we |
10994 | // are trying to simplify. |
10995 | if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() && |
10996 | SplatBits0 == ~SplatBits1) { |
10997 | // Canonicalize the vector type to make instruction selection |
10998 | // simpler. |
10999 | EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; |
11000 | SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, |
11001 | N0->getOperand(1), |
11002 | N0->getOperand(0), |
11003 | N1->getOperand(0)); |
11004 | return DAG.getNode(ISD::BITCAST, dl, VT, Result); |
11005 | } |
11006 | } |
11007 | } |
11008 | } |
11009 | |
11010 | // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when |
11011 | // reasonable. |
11012 | if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) { |
11013 | if (SDValue Res = PerformORCombineToBFI(N, DCI, Subtarget)) |
11014 | return Res; |
11015 | } |
11016 | |
11017 | if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) |
11018 | return Result; |
11019 | |
11020 | return SDValue(); |
11021 | } |
11022 | |
11023 | static SDValue PerformXORCombine(SDNode *N, |
11024 | TargetLowering::DAGCombinerInfo &DCI, |
11025 | const ARMSubtarget *Subtarget) { |
11026 | EVT VT = N->getValueType(0); |
11027 | SelectionDAG &DAG = DCI.DAG; |
11028 | |
11029 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
11030 | return SDValue(); |
11031 | |
11032 | if (!Subtarget->isThumb1Only()) { |
11033 | // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) |
11034 | if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) |
11035 | return Result; |
11036 | |
11037 | if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) |
11038 | return Result; |
11039 | } |
11040 | |
11041 | return SDValue(); |
11042 | } |
11043 | |
11044 | // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it, |
11045 | // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and |
11046 | // their position in "to" (Rd). |
11047 | static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) { |
11048 | assert(N->getOpcode() == ARMISD::BFI)(static_cast <bool> (N->getOpcode() == ARMISD::BFI) ? void (0) : __assert_fail ("N->getOpcode() == ARMISD::BFI" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 11048, __extension__ __PRETTY_FUNCTION__)); |
11049 | |
11050 | SDValue From = N->getOperand(1); |
11051 | ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue(); |
11052 | FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation()); |
11053 | |
11054 | // If the Base came from a SHR #C, we can deduce that it is really testing bit |
11055 | // #C in the base of the SHR. |
11056 | if (From->getOpcode() == ISD::SRL && |
11057 | isa<ConstantSDNode>(From->getOperand(1))) { |
11058 | APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue(); |
11059 | assert(Shift.getLimitedValue() < 32 && "Shift too large!")(static_cast <bool> (Shift.getLimitedValue() < 32 && "Shift too large!") ? void (0) : __assert_fail ("Shift.getLimitedValue() < 32 && \"Shift too large!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 11059, __extension__ __PRETTY_FUNCTION__)); |
11060 | FromMask <<= Shift.getLimitedValue(31); |
11061 | From = From->getOperand(0); |
11062 | } |
11063 | |
11064 | return From; |
11065 | } |
11066 | |
11067 | // If A and B contain one contiguous set of bits, does A | B == A . B? |
11068 | // |
11069 | // Neither A nor B must be zero. |
11070 | static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) { |
11071 | unsigned LastActiveBitInA = A.countTrailingZeros(); |
11072 | unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1; |
11073 | return LastActiveBitInA - 1 == FirstActiveBitInB; |
11074 | } |
11075 | |
11076 | static SDValue FindBFIToCombineWith(SDNode *N) { |
11077 | // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with, |
11078 | // if one exists. |
11079 | APInt ToMask, FromMask; |
11080 | SDValue From = ParseBFI(N, ToMask, FromMask); |
11081 | SDValue To = N->getOperand(0); |
11082 | |
11083 | // Now check for a compatible BFI to merge with. We can pass through BFIs that |
11084 | // aren't compatible, but not if they set the same bit in their destination as |
11085 | // we do (or that of any BFI we're going to combine with). |
11086 | SDValue V = To; |
11087 | APInt CombinedToMask = ToMask; |
11088 | while (V.getOpcode() == ARMISD::BFI) { |
11089 | APInt NewToMask, NewFromMask; |
11090 | SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask); |
11091 | if (NewFrom != From) { |
11092 | // This BFI has a different base. Keep going. |
11093 | CombinedToMask |= NewToMask; |
11094 | V = V.getOperand(0); |
11095 | continue; |
11096 | } |
11097 | |
11098 | // Do the written bits conflict with any we've seen so far? |
11099 | if ((NewToMask & CombinedToMask).getBoolValue()) |
11100 | // Conflicting bits - bail out because going further is unsafe. |
11101 | return SDValue(); |
11102 | |
11103 | // Are the new bits contiguous when combined with the old bits? |
11104 | if (BitsProperlyConcatenate(ToMask, NewToMask) && |
11105 | BitsProperlyConcatenate(FromMask, NewFromMask)) |
11106 | return V; |
11107 | if (BitsProperlyConcatenate(NewToMask, ToMask) && |
11108 | BitsProperlyConcatenate(NewFromMask, FromMask)) |
11109 | return V; |
11110 | |
11111 | // We've seen a write to some bits, so track it. |
11112 | CombinedToMask |= NewToMask; |
11113 | // Keep going... |
11114 | V = V.getOperand(0); |
11115 | } |
11116 | |
11117 | return SDValue(); |
11118 | } |
11119 | |
11120 | static SDValue PerformBFICombine(SDNode *N, |
11121 | TargetLowering::DAGCombinerInfo &DCI) { |
11122 | SDValue N1 = N->getOperand(1); |
11123 | if (N1.getOpcode() == ISD::AND) { |
11124 | // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff |
11125 | // the bits being cleared by the AND are not demanded by the BFI. |
11126 | ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); |
11127 | if (!N11C) |
11128 | return SDValue(); |
11129 | unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); |
11130 | unsigned LSB = countTrailingZeros(~InvMask); |
11131 | unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB; |
11132 | assert(Width <(static_cast <bool> (Width < static_cast<unsigned >(std::numeric_limits<unsigned>::digits) && "undefined behavior" ) ? void (0) : __assert_fail ("Width < static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && \"undefined behavior\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 11134, __extension__ __PRETTY_FUNCTION__)) |
11133 | static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&(static_cast <bool> (Width < static_cast<unsigned >(std::numeric_limits<unsigned>::digits) && "undefined behavior" ) ? void (0) : __assert_fail ("Width < static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && \"undefined behavior\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 11134, __extension__ __PRETTY_FUNCTION__)) |
11134 | "undefined behavior")(static_cast <bool> (Width < static_cast<unsigned >(std::numeric_limits<unsigned>::digits) && "undefined behavior" ) ? void (0) : __assert_fail ("Width < static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && \"undefined behavior\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 11134, __extension__ __PRETTY_FUNCTION__)); |
11135 | unsigned Mask = (1u << Width) - 1; |
11136 | unsigned Mask2 = N11C->getZExtValue(); |
11137 | if ((Mask & (~Mask2)) == 0) |
11138 | return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0), |
11139 | N->getOperand(0), N1.getOperand(0), |
11140 | N->getOperand(2)); |
11141 | } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) { |
11142 | // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes. |
11143 | // Keep track of any consecutive bits set that all come from the same base |
11144 | // value. We can combine these together into a single BFI. |
11145 | SDValue CombineBFI = FindBFIToCombineWith(N); |
11146 | if (CombineBFI == SDValue()) |
11147 | return SDValue(); |
11148 | |
11149 | // We've found a BFI. |
11150 | APInt ToMask1, FromMask1; |
11151 | SDValue From1 = ParseBFI(N, ToMask1, FromMask1); |
11152 | |
11153 | APInt ToMask2, FromMask2; |
11154 | SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2); |
11155 | assert(From1 == From2)(static_cast <bool> (From1 == From2) ? void (0) : __assert_fail ("From1 == From2", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 11155, __extension__ __PRETTY_FUNCTION__)); |
11156 | (void)From2; |
11157 | |
11158 | // First, unlink CombineBFI. |
11159 | DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0)); |
11160 | // Then create a new BFI, combining the two together. |
11161 | APInt NewFromMask = FromMask1 | FromMask2; |
11162 | APInt NewToMask = ToMask1 | ToMask2; |
11163 | |
11164 | EVT VT = N->getValueType(0); |
11165 | SDLoc dl(N); |
11166 | |
11167 | if (NewFromMask[0] == 0) |
11168 | From1 = DCI.DAG.getNode( |
11169 | ISD::SRL, dl, VT, From1, |
11170 | DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT)); |
11171 | return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1, |
11172 | DCI.DAG.getConstant(~NewToMask, dl, VT)); |
11173 | } |
11174 | return SDValue(); |
11175 | } |
11176 | |
11177 | /// PerformVMOVRRDCombine - Target-specific dag combine xforms for |
11178 | /// ARMISD::VMOVRRD. |
11179 | static SDValue PerformVMOVRRDCombine(SDNode *N, |
11180 | TargetLowering::DAGCombinerInfo &DCI, |
11181 | const ARMSubtarget *Subtarget) { |
11182 | // vmovrrd(vmovdrr x, y) -> x,y |
11183 | SDValue InDouble = N->getOperand(0); |
11184 | if (InDouble.getOpcode() == ARMISD::VMOVDRR && !Subtarget->isFPOnlySP()) |
11185 | return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); |
11186 | |
11187 | // vmovrrd(load f64) -> (load i32), (load i32) |
11188 | SDNode *InNode = InDouble.getNode(); |
11189 | if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && |
11190 | InNode->getValueType(0) == MVT::f64 && |
11191 | InNode->getOperand(1).getOpcode() == ISD::FrameIndex && |
11192 | !cast<LoadSDNode>(InNode)->isVolatile()) { |
11193 | // TODO: Should this be done for non-FrameIndex operands? |
11194 | LoadSDNode *LD = cast<LoadSDNode>(InNode); |
11195 | |
11196 | SelectionDAG &DAG = DCI.DAG; |
11197 | SDLoc DL(LD); |
11198 | SDValue BasePtr = LD->getBasePtr(); |
11199 | SDValue NewLD1 = |
11200 | DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(), |
11201 | LD->getAlignment(), LD->getMemOperand()->getFlags()); |
11202 | |
11203 | SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, |
11204 | DAG.getConstant(4, DL, MVT::i32)); |
11205 | SDValue NewLD2 = DAG.getLoad( |
11206 | MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, LD->getPointerInfo(), |
11207 | std::min(4U, LD->getAlignment() / 2), LD->getMemOperand()->getFlags()); |
11208 | |
11209 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); |
11210 | if (DCI.DAG.getDataLayout().isBigEndian()) |
11211 | std::swap (NewLD1, NewLD2); |
11212 | SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); |
11213 | return Result; |
11214 | } |
11215 | |
11216 | return SDValue(); |
11217 | } |
11218 | |
11219 | /// PerformVMOVDRRCombine - Target-specific dag combine xforms for |
11220 | /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. |
11221 | static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { |
11222 | // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) |
11223 | SDValue Op0 = N->getOperand(0); |
11224 | SDValue Op1 = N->getOperand(1); |
11225 | if (Op0.getOpcode() == ISD::BITCAST) |
11226 | Op0 = Op0.getOperand(0); |
11227 | if (Op1.getOpcode() == ISD::BITCAST) |
11228 | Op1 = Op1.getOperand(0); |
11229 | if (Op0.getOpcode() == ARMISD::VMOVRRD && |
11230 | Op0.getNode() == Op1.getNode() && |
11231 | Op0.getResNo() == 0 && Op1.getResNo() == 1) |
11232 | return DAG.getNode(ISD::BITCAST, SDLoc(N), |
11233 | N->getValueType(0), Op0.getOperand(0)); |
11234 | return SDValue(); |
11235 | } |
11236 | |
11237 | /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node |
11238 | /// are normal, non-volatile loads. If so, it is profitable to bitcast an |
11239 | /// i64 vector to have f64 elements, since the value can then be loaded |
11240 | /// directly into a VFP register. |
11241 | static bool hasNormalLoadOperand(SDNode *N) { |
11242 | unsigned NumElts = N->getValueType(0).getVectorNumElements(); |
11243 | for (unsigned i = 0; i < NumElts; ++i) { |
11244 | SDNode *Elt = N->getOperand(i).getNode(); |
11245 | if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) |
11246 | return true; |
11247 | } |
11248 | return false; |
11249 | } |
11250 | |
11251 | /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for |
11252 | /// ISD::BUILD_VECTOR. |
11253 | static SDValue PerformBUILD_VECTORCombine(SDNode *N, |
11254 | TargetLowering::DAGCombinerInfo &DCI, |
11255 | const ARMSubtarget *Subtarget) { |
11256 | // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): |
11257 | // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value |
11258 | // into a pair of GPRs, which is fine when the value is used as a scalar, |
11259 | // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. |
11260 | SelectionDAG &DAG = DCI.DAG; |
11261 | if (N->getNumOperands() == 2) |
11262 | if (SDValue RV = PerformVMOVDRRCombine(N, DAG)) |
11263 | return RV; |
11264 | |
11265 | // Load i64 elements as f64 values so that type legalization does not split |
11266 | // them up into i32 values. |
11267 | EVT VT = N->getValueType(0); |
11268 | if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) |
11269 | return SDValue(); |
11270 | SDLoc dl(N); |
11271 | SmallVector<SDValue, 8> Ops; |
11272 | unsigned NumElts = VT.getVectorNumElements(); |
11273 | for (unsigned i = 0; i < NumElts; ++i) { |
11274 | SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); |
11275 | Ops.push_back(V); |
11276 | // Make the DAGCombiner fold the bitcast. |
11277 | DCI.AddToWorklist(V.getNode()); |
11278 | } |
11279 | EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); |
11280 | SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops); |
11281 | return DAG.getNode(ISD::BITCAST, dl, VT, BV); |
11282 | } |
11283 | |
11284 | /// \brief Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. |
11285 | static SDValue |
11286 | PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
11287 | // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR. |
11288 | // At that time, we may have inserted bitcasts from integer to float. |
11289 | // If these bitcasts have survived DAGCombine, change the lowering of this |
11290 | // BUILD_VECTOR in something more vector friendly, i.e., that does not |
11291 | // force to use floating point types. |
11292 | |
11293 | // Make sure we can change the type of the vector. |
11294 | // This is possible iff: |
11295 | // 1. The vector is only used in a bitcast to a integer type. I.e., |
11296 | // 1.1. Vector is used only once. |
11297 | // 1.2. Use is a bit convert to an integer type. |
11298 | // 2. The size of its operands are 32-bits (64-bits are not legal). |
11299 | EVT VT = N->getValueType(0); |
11300 | EVT EltVT = VT.getVectorElementType(); |
11301 | |
11302 | // Check 1.1. and 2. |
11303 | if (EltVT.getSizeInBits() != 32 || !N->hasOneUse()) |
11304 | return SDValue(); |
11305 | |
11306 | // By construction, the input type must be float. |
11307 | assert(EltVT == MVT::f32 && "Unexpected type!")(static_cast <bool> (EltVT == MVT::f32 && "Unexpected type!" ) ? void (0) : __assert_fail ("EltVT == MVT::f32 && \"Unexpected type!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 11307, __extension__ __PRETTY_FUNCTION__)); |
11308 | |
11309 | // Check 1.2. |
11310 | SDNode *Use = *N->use_begin(); |
11311 | if (Use->getOpcode() != ISD::BITCAST || |
11312 | Use->getValueType(0).isFloatingPoint()) |
11313 | return SDValue(); |
11314 | |
11315 | // Check profitability. |
11316 | // Model is, if more than half of the relevant operands are bitcast from |
11317 | // i32, turn the build_vector into a sequence of insert_vector_elt. |
11318 | // Relevant operands are everything that is not statically |
11319 | // (i.e., at compile time) bitcasted. |
11320 | unsigned NumOfBitCastedElts = 0; |
11321 | unsigned NumElts = VT.getVectorNumElements(); |
11322 | unsigned NumOfRelevantElts = NumElts; |
11323 | for (unsigned Idx = 0; Idx < NumElts; ++Idx) { |
11324 | SDValue Elt = N->getOperand(Idx); |
11325 | if (Elt->getOpcode() == ISD::BITCAST) { |
11326 | // Assume only bit cast to i32 will go away. |
11327 | if (Elt->getOperand(0).getValueType() == MVT::i32) |
11328 | ++NumOfBitCastedElts; |
11329 | } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt)) |
11330 | // Constants are statically casted, thus do not count them as |
11331 | // relevant operands. |
11332 | --NumOfRelevantElts; |
11333 | } |
11334 | |
11335 | // Check if more than half of the elements require a non-free bitcast. |
11336 | if (NumOfBitCastedElts <= NumOfRelevantElts / 2) |
11337 | return SDValue(); |
11338 | |
11339 | SelectionDAG &DAG = DCI.DAG; |
11340 | // Create the new vector type. |
11341 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); |
11342 | // Check if the type is legal. |
11343 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
11344 | if (!TLI.isTypeLegal(VecVT)) |
11345 | return SDValue(); |
11346 | |
11347 | // Combine: |
11348 | // ARMISD::BUILD_VECTOR E1, E2, ..., EN. |
11349 | // => BITCAST INSERT_VECTOR_ELT |
11350 | // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1), |
11351 | // (BITCAST EN), N. |
11352 | SDValue Vec = DAG.getUNDEF(VecVT); |
11353 | SDLoc dl(N); |
11354 | for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) { |
11355 | SDValue V = N->getOperand(Idx); |
11356 | if (V.isUndef()) |
11357 | continue; |
11358 | if (V.getOpcode() == ISD::BITCAST && |
11359 | V->getOperand(0).getValueType() == MVT::i32) |
11360 | // Fold obvious case. |
11361 | V = V.getOperand(0); |
11362 | else { |
11363 | V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V); |
11364 | // Make the DAGCombiner fold the bitcasts. |
11365 | DCI.AddToWorklist(V.getNode()); |
11366 | } |
11367 | SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32); |
11368 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx); |
11369 | } |
11370 | Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec); |
11371 | // Make the DAGCombiner fold the bitcasts. |
11372 | DCI.AddToWorklist(Vec.getNode()); |
11373 | return Vec; |
11374 | } |
11375 | |
11376 | /// PerformInsertEltCombine - Target-specific dag combine xforms for |
11377 | /// ISD::INSERT_VECTOR_ELT. |
11378 | static SDValue PerformInsertEltCombine(SDNode *N, |
11379 | TargetLowering::DAGCombinerInfo &DCI) { |
11380 | // Bitcast an i64 load inserted into a vector to f64. |
11381 | // Otherwise, the i64 value will be legalized to a pair of i32 values. |
11382 | EVT VT = N->getValueType(0); |
11383 | SDNode *Elt = N->getOperand(1).getNode(); |
11384 | if (VT.getVectorElementType() != MVT::i64 || |
11385 | !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) |
11386 | return SDValue(); |
11387 | |
11388 | SelectionDAG &DAG = DCI.DAG; |
11389 | SDLoc dl(N); |
11390 | EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, |
11391 | VT.getVectorNumElements()); |
11392 | SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); |
11393 | SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); |
11394 | // Make the DAGCombiner fold the bitcasts. |
11395 | DCI.AddToWorklist(Vec.getNode()); |
11396 | DCI.AddToWorklist(V.getNode()); |
11397 | SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, |
11398 | Vec, V, N->getOperand(2)); |
11399 | return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); |
11400 | } |
11401 | |
11402 | /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for |
11403 | /// ISD::VECTOR_SHUFFLE. |
11404 | static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { |
11405 | // The LLVM shufflevector instruction does not require the shuffle mask |
11406 | // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does |
11407 | // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the |
11408 | // operands do not match the mask length, they are extended by concatenating |
11409 | // them with undef vectors. That is probably the right thing for other |
11410 | // targets, but for NEON it is better to concatenate two double-register |
11411 | // size vector operands into a single quad-register size vector. Do that |
11412 | // transformation here: |
11413 | // shuffle(concat(v1, undef), concat(v2, undef)) -> |
11414 | // shuffle(concat(v1, v2), undef) |
11415 | SDValue Op0 = N->getOperand(0); |
11416 | SDValue Op1 = N->getOperand(1); |
11417 | if (Op0.getOpcode() != ISD::CONCAT_VECTORS || |
11418 | Op1.getOpcode() != ISD::CONCAT_VECTORS || |
11419 | Op0.getNumOperands() != 2 || |
11420 | Op1.getNumOperands() != 2) |
11421 | return SDValue(); |
11422 | SDValue Concat0Op1 = Op0.getOperand(1); |
11423 | SDValue Concat1Op1 = Op1.getOperand(1); |
11424 | if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef()) |
11425 | return SDValue(); |
11426 | // Skip the transformation if any of the types are illegal. |
11427 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
11428 | EVT VT = N->getValueType(0); |
11429 | if (!TLI.isTypeLegal(VT) || |
11430 | !TLI.isTypeLegal(Concat0Op1.getValueType()) || |
11431 | !TLI.isTypeLegal(Concat1Op1.getValueType())) |
11432 | return SDValue(); |
11433 | |
11434 | SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, |
11435 | Op0.getOperand(0), Op1.getOperand(0)); |
11436 | // Translate the shuffle mask. |
11437 | SmallVector<int, 16> NewMask; |
11438 | unsigned NumElts = VT.getVectorNumElements(); |
11439 | unsigned HalfElts = NumElts/2; |
11440 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); |
11441 | for (unsigned n = 0; n < NumElts; ++n) { |
11442 | int MaskElt = SVN->getMaskElt(n); |
11443 | int NewElt = -1; |
11444 | if (MaskElt < (int)HalfElts) |
11445 | NewElt = MaskElt; |
11446 | else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) |
11447 | NewElt = HalfElts + MaskElt - NumElts; |
11448 | NewMask.push_back(NewElt); |
11449 | } |
11450 | return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat, |
11451 | DAG.getUNDEF(VT), NewMask); |
11452 | } |
11453 | |
11454 | /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, |
11455 | /// NEON load/store intrinsics, and generic vector load/stores, to merge |
11456 | /// base address updates. |
11457 | /// For generic load/stores, the memory type is assumed to be a vector. |
11458 | /// The caller is assumed to have checked legality. |
11459 | static SDValue CombineBaseUpdate(SDNode *N, |
11460 | TargetLowering::DAGCombinerInfo &DCI) { |
11461 | SelectionDAG &DAG = DCI.DAG; |
11462 | const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || |
11463 | N->getOpcode() == ISD::INTRINSIC_W_CHAIN); |
11464 | const bool isStore = N->getOpcode() == ISD::STORE; |
11465 | const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1); |
11466 | SDValue Addr = N->getOperand(AddrOpIdx); |
11467 | MemSDNode *MemN = cast<MemSDNode>(N); |
11468 | SDLoc dl(N); |
11469 | |
11470 | // Search for a use of the address operand that is an increment. |
11471 | for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), |
11472 | UE = Addr.getNode()->use_end(); UI != UE; ++UI) { |
11473 | SDNode *User = *UI; |
11474 | if (User->getOpcode() != ISD::ADD || |
11475 | UI.getUse().getResNo() != Addr.getResNo()) |
11476 | continue; |
11477 | |
11478 | // Check that the add is independent of the load/store. Otherwise, folding |
11479 | // it would create a cycle. |
11480 | if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) |
11481 | continue; |
11482 | |
11483 | // Find the new opcode for the updating load/store. |
11484 | bool isLoadOp = true; |
11485 | bool isLaneOp = false; |
11486 | unsigned NewOpc = 0; |
11487 | unsigned NumVecs = 0; |
11488 | if (isIntrinsic) { |
11489 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); |
11490 | switch (IntNo) { |
11491 | default: llvm_unreachable("unexpected intrinsic for Neon base update")::llvm::llvm_unreachable_internal("unexpected intrinsic for Neon base update" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 11491); |
11492 | case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; |
11493 | NumVecs = 1; break; |
11494 | case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; |
11495 | NumVecs = 2; break; |
11496 | case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; |
11497 | NumVecs = 3; break; |
11498 | case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; |
11499 | NumVecs = 4; break; |
11500 | case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; |
11501 | NumVecs = 2; isLaneOp = true; break; |
11502 | case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; |
11503 | NumVecs = 3; isLaneOp = true; break; |
11504 | case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; |
11505 | NumVecs = 4; isLaneOp = true; break; |
11506 | case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; |
11507 | NumVecs = 1; isLoadOp = false; break; |
11508 | case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; |
11509 | NumVecs = 2; isLoadOp = false; break; |
11510 | case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; |
11511 | NumVecs = 3; isLoadOp = false; break; |
11512 | case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; |
11513 | NumVecs = 4; isLoadOp = false; break; |
11514 | case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; |
11515 | NumVecs = 2; isLoadOp = false; isLaneOp = true; break; |
11516 | case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; |
11517 | NumVecs = 3; isLoadOp = false; isLaneOp = true; break; |
11518 | case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; |
11519 | NumVecs = 4; isLoadOp = false; isLaneOp = true; break; |
11520 | } |
11521 | } else { |
11522 | isLaneOp = true; |
11523 | switch (N->getOpcode()) { |
11524 | default: llvm_unreachable("unexpected opcode for Neon base update")::llvm::llvm_unreachable_internal("unexpected opcode for Neon base update" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 11524); |
11525 | case ARMISD::VLD1DUP: NewOpc = ARMISD::VLD1DUP_UPD; NumVecs = 1; break; |
11526 | case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; |
11527 | case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; |
11528 | case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; |
11529 | case ISD::LOAD: NewOpc = ARMISD::VLD1_UPD; |
11530 | NumVecs = 1; isLaneOp = false; break; |
11531 | case ISD::STORE: NewOpc = ARMISD::VST1_UPD; |
11532 | NumVecs = 1; isLaneOp = false; isLoadOp = false; break; |
11533 | } |
11534 | } |
11535 | |
11536 | // Find the size of memory referenced by the load/store. |
11537 | EVT VecTy; |
11538 | if (isLoadOp) { |
11539 | VecTy = N->getValueType(0); |
11540 | } else if (isIntrinsic) { |
11541 | VecTy = N->getOperand(AddrOpIdx+1).getValueType(); |
11542 | } else { |
11543 | assert(isStore && "Node has to be a load, a store, or an intrinsic!")(static_cast <bool> (isStore && "Node has to be a load, a store, or an intrinsic!" ) ? void (0) : __assert_fail ("isStore && \"Node has to be a load, a store, or an intrinsic!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 11543, __extension__ __PRETTY_FUNCTION__)); |
11544 | VecTy = N->getOperand(1).getValueType(); |
11545 | } |
11546 | |
11547 | unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; |
11548 | if (isLaneOp) |
11549 | NumBytes /= VecTy.getVectorNumElements(); |
11550 | |
11551 | // If the increment is a constant, it must match the memory ref size. |
11552 | SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); |
11553 | ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode()); |
11554 | if (NumBytes >= 3 * 16 && (!CInc || CInc->getZExtValue() != NumBytes)) { |
11555 | // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two |
11556 | // separate instructions that make it harder to use a non-constant update. |
11557 | continue; |
11558 | } |
11559 | |
11560 | // OK, we found an ADD we can fold into the base update. |
11561 | // Now, create a _UPD node, taking care of not breaking alignment. |
11562 | |
11563 | EVT AlignedVecTy = VecTy; |
11564 | unsigned Alignment = MemN->getAlignment(); |
11565 | |
11566 | // If this is a less-than-standard-aligned load/store, change the type to |
11567 | // match the standard alignment. |
11568 | // The alignment is overlooked when selecting _UPD variants; and it's |
11569 | // easier to introduce bitcasts here than fix that. |
11570 | // There are 3 ways to get to this base-update combine: |
11571 | // - intrinsics: they are assumed to be properly aligned (to the standard |
11572 | // alignment of the memory type), so we don't need to do anything. |
11573 | // - ARMISD::VLDx nodes: they are only generated from the aforementioned |
11574 | // intrinsics, so, likewise, there's nothing to do. |
11575 | // - generic load/store instructions: the alignment is specified as an |
11576 | // explicit operand, rather than implicitly as the standard alignment |
11577 | // of the memory type (like the intrisics). We need to change the |
11578 | // memory type to match the explicit alignment. That way, we don't |
11579 | // generate non-standard-aligned ARMISD::VLDx nodes. |
11580 | if (isa<LSBaseSDNode>(N)) { |
11581 | if (Alignment == 0) |
11582 | Alignment = 1; |
11583 | if (Alignment < VecTy.getScalarSizeInBits() / 8) { |
11584 | MVT EltTy = MVT::getIntegerVT(Alignment * 8); |
11585 | assert(NumVecs == 1 && "Unexpected multi-element generic load/store.")(static_cast <bool> (NumVecs == 1 && "Unexpected multi-element generic load/store." ) ? void (0) : __assert_fail ("NumVecs == 1 && \"Unexpected multi-element generic load/store.\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 11585, __extension__ __PRETTY_FUNCTION__)); |
11586 | assert(!isLaneOp && "Unexpected generic load/store lane.")(static_cast <bool> (!isLaneOp && "Unexpected generic load/store lane." ) ? void (0) : __assert_fail ("!isLaneOp && \"Unexpected generic load/store lane.\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 11586, __extension__ __PRETTY_FUNCTION__)); |
11587 | unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8); |
11588 | AlignedVecTy = MVT::getVectorVT(EltTy, NumElts); |
11589 | } |
11590 | // Don't set an explicit alignment on regular load/stores that we want |
11591 | // to transform to VLD/VST 1_UPD nodes. |
11592 | // This matches the behavior of regular load/stores, which only get an |
11593 | // explicit alignment if the MMO alignment is larger than the standard |
11594 | // alignment of the memory type. |
11595 | // Intrinsics, however, always get an explicit alignment, set to the |
11596 | // alignment of the MMO. |
11597 | Alignment = 1; |
11598 | } |
11599 | |
11600 | // Create the new updating load/store node. |
11601 | // First, create an SDVTList for the new updating node's results. |
11602 | EVT Tys[6]; |
11603 | unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); |
11604 | unsigned n; |
11605 | for (n = 0; n < NumResultVecs; ++n) |
11606 | Tys[n] = AlignedVecTy; |
11607 | Tys[n++] = MVT::i32; |
11608 | Tys[n] = MVT::Other; |
11609 | SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2)); |
11610 | |
11611 | // Then, gather the new node's operands. |
11612 | SmallVector<SDValue, 8> Ops; |
11613 | Ops.push_back(N->getOperand(0)); // incoming chain |
11614 | Ops.push_back(N->getOperand(AddrOpIdx)); |
11615 | Ops.push_back(Inc); |
11616 | |
11617 | if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) { |
11618 | // Try to match the intrinsic's signature |
11619 | Ops.push_back(StN->getValue()); |
11620 | } else { |
11621 | // Loads (and of course intrinsics) match the intrinsics' signature, |
11622 | // so just add all but the alignment operand. |
11623 | for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i) |
11624 | Ops.push_back(N->getOperand(i)); |
11625 | } |
11626 | |
11627 | // For all node types, the alignment operand is always the last one. |
11628 | Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32)); |
11629 | |
11630 | // If this is a non-standard-aligned STORE, the penultimate operand is the |
11631 | // stored value. Bitcast it to the aligned type. |
11632 | if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) { |
11633 | SDValue &StVal = Ops[Ops.size()-2]; |
11634 | StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal); |
11635 | } |
11636 | |
11637 | EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy; |
11638 | SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, LoadVT, |
11639 | MemN->getMemOperand()); |
11640 | |
11641 | // Update the uses. |
11642 | SmallVector<SDValue, 5> NewResults; |
11643 | for (unsigned i = 0; i < NumResultVecs; ++i) |
11644 | NewResults.push_back(SDValue(UpdN.getNode(), i)); |
11645 | |
11646 | // If this is an non-standard-aligned LOAD, the first result is the loaded |
11647 | // value. Bitcast it to the expected result type. |
11648 | if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) { |
11649 | SDValue &LdVal = NewResults[0]; |
11650 | LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal); |
11651 | } |
11652 | |
11653 | NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain |
11654 | DCI.CombineTo(N, NewResults); |
11655 | DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); |
11656 | |
11657 | break; |
11658 | } |
11659 | return SDValue(); |
11660 | } |
11661 | |
11662 | static SDValue PerformVLDCombine(SDNode *N, |
11663 | TargetLowering::DAGCombinerInfo &DCI) { |
11664 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
11665 | return SDValue(); |
11666 | |
11667 | return CombineBaseUpdate(N, DCI); |
11668 | } |
11669 | |
11670 | /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a |
11671 | /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic |
11672 | /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and |
11673 | /// return true. |
11674 | static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
11675 | SelectionDAG &DAG = DCI.DAG; |
11676 | EVT VT = N->getValueType(0); |
11677 | // vldN-dup instructions only support 64-bit vectors for N > 1. |
11678 | if (!VT.is64BitVector()) |
11679 | return false; |
11680 | |
11681 | // Check if the VDUPLANE operand is a vldN-dup intrinsic. |
11682 | SDNode *VLD = N->getOperand(0).getNode(); |
11683 | if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) |
11684 | return false; |
11685 | unsigned NumVecs = 0; |
11686 | unsigned NewOpc = 0; |
11687 | unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); |
11688 | if (IntNo == Intrinsic::arm_neon_vld2lane) { |
11689 | NumVecs = 2; |
11690 | NewOpc = ARMISD::VLD2DUP; |
11691 | } else if (IntNo == Intrinsic::arm_neon_vld3lane) { |
11692 | NumVecs = 3; |
11693 | NewOpc = ARMISD::VLD3DUP; |
11694 | } else if (IntNo == Intrinsic::arm_neon_vld4lane) { |
11695 | NumVecs = 4; |
11696 | NewOpc = ARMISD::VLD4DUP; |
11697 | } else { |
11698 | return false; |
11699 | } |
11700 | |
11701 | // First check that all the vldN-lane uses are VDUPLANEs and that the lane |
11702 | // numbers match the load. |
11703 | unsigned VLDLaneNo = |
11704 | cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); |
11705 | for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); |
11706 | UI != UE; ++UI) { |
11707 | // Ignore uses of the chain result. |
11708 | if (UI.getUse().getResNo() == NumVecs) |
11709 | continue; |
11710 | SDNode *User = *UI; |
11711 | if (User->getOpcode() != ARMISD::VDUPLANE || |
11712 | VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) |
11713 | return false; |
11714 | } |
11715 | |
11716 | // Create the vldN-dup node. |
11717 | EVT Tys[5]; |
11718 | unsigned n; |
11719 | for (n = 0; n < NumVecs; ++n) |
11720 | Tys[n] = VT; |
11721 | Tys[n] = MVT::Other; |
11722 | SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1)); |
11723 | SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; |
11724 | MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); |
11725 | SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, |
11726 | Ops, VLDMemInt->getMemoryVT(), |
11727 | VLDMemInt->getMemOperand()); |
11728 | |
11729 | // Update the uses. |
11730 | for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); |
11731 | UI != UE; ++UI) { |
11732 | unsigned ResNo = UI.getUse().getResNo(); |
11733 | // Ignore uses of the chain result. |
11734 | if (ResNo == NumVecs) |
11735 | continue; |
11736 | SDNode *User = *UI; |
11737 | DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); |
11738 | } |
11739 | |
11740 | // Now the vldN-lane intrinsic is dead except for its chain result. |
11741 | // Update uses of the chain. |
11742 | std::vector<SDValue> VLDDupResults; |
11743 | for (unsigned n = 0; n < NumVecs; ++n) |
11744 | VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); |
11745 | VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); |
11746 | DCI.CombineTo(VLD, VLDDupResults); |
11747 | |
11748 | return true; |
11749 | } |
11750 | |
11751 | /// PerformVDUPLANECombine - Target-specific dag combine xforms for |
11752 | /// ARMISD::VDUPLANE. |
11753 | static SDValue PerformVDUPLANECombine(SDNode *N, |
11754 | TargetLowering::DAGCombinerInfo &DCI) { |
11755 | SDValue Op = N->getOperand(0); |
11756 | |
11757 | // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses |
11758 | // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. |
11759 | if (CombineVLDDUP(N, DCI)) |
11760 | return SDValue(N, 0); |
11761 | |
11762 | // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is |
11763 | // redundant. Ignore bit_converts for now; element sizes are checked below. |
11764 | while (Op.getOpcode() == ISD::BITCAST) |
11765 | Op = Op.getOperand(0); |
11766 | if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) |
11767 | return SDValue(); |
11768 | |
11769 | // Make sure the VMOV element size is not bigger than the VDUPLANE elements. |
11770 | unsigned EltSize = Op.getScalarValueSizeInBits(); |
11771 | // The canonical VMOV for a zero vector uses a 32-bit element size. |
11772 | unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
11773 | unsigned EltBits; |
11774 | if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) |
11775 | EltSize = 8; |
11776 | EVT VT = N->getValueType(0); |
11777 | if (EltSize > VT.getScalarSizeInBits()) |
11778 | return SDValue(); |
11779 | |
11780 | return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); |
11781 | } |
11782 | |
11783 | /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP. |
11784 | static SDValue PerformVDUPCombine(SDNode *N, |
11785 | TargetLowering::DAGCombinerInfo &DCI) { |
11786 | SelectionDAG &DAG = DCI.DAG; |
11787 | SDValue Op = N->getOperand(0); |
11788 | |
11789 | // Match VDUP(LOAD) -> VLD1DUP. |
11790 | // We match this pattern here rather than waiting for isel because the |
11791 | // transform is only legal for unindexed loads. |
11792 | LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()); |
11793 | if (LD && Op.hasOneUse() && LD->isUnindexed() && |
11794 | LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) { |
11795 | SDValue Ops[] = { LD->getOperand(0), LD->getOperand(1), |
11796 | DAG.getConstant(LD->getAlignment(), SDLoc(N), MVT::i32) }; |
11797 | SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other); |
11798 | SDValue VLDDup = DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys, |
11799 | Ops, LD->getMemoryVT(), |
11800 | LD->getMemOperand()); |
11801 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), VLDDup.getValue(1)); |
11802 | return VLDDup; |
11803 | } |
11804 | |
11805 | return SDValue(); |
11806 | } |
11807 | |
11808 | static SDValue PerformLOADCombine(SDNode *N, |
11809 | TargetLowering::DAGCombinerInfo &DCI) { |
11810 | EVT VT = N->getValueType(0); |
11811 | |
11812 | // If this is a legal vector load, try to combine it into a VLD1_UPD. |
11813 | if (ISD::isNormalLoad(N) && VT.isVector() && |
11814 | DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
11815 | return CombineBaseUpdate(N, DCI); |
11816 | |
11817 | return SDValue(); |
11818 | } |
11819 | |
11820 | /// PerformSTORECombine - Target-specific dag combine xforms for |
11821 | /// ISD::STORE. |
11822 | static SDValue PerformSTORECombine(SDNode *N, |
11823 | TargetLowering::DAGCombinerInfo &DCI) { |
11824 | StoreSDNode *St = cast<StoreSDNode>(N); |
11825 | if (St->isVolatile()) |
11826 | return SDValue(); |
11827 | |
11828 | // Optimize trunc store (of multiple scalars) to shuffle and store. First, |
11829 | // pack all of the elements in one place. Next, store to memory in fewer |
11830 | // chunks. |
11831 | SDValue StVal = St->getValue(); |
11832 | EVT VT = StVal.getValueType(); |
11833 | if (St->isTruncatingStore() && VT.isVector()) { |
11834 | SelectionDAG &DAG = DCI.DAG; |
11835 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
11836 | EVT StVT = St->getMemoryVT(); |
11837 | unsigned NumElems = VT.getVectorNumElements(); |
11838 | assert(StVT != VT && "Cannot truncate to the same type")(static_cast <bool> (StVT != VT && "Cannot truncate to the same type" ) ? void (0) : __assert_fail ("StVT != VT && \"Cannot truncate to the same type\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 11838, __extension__ __PRETTY_FUNCTION__)); |
11839 | unsigned FromEltSz = VT.getScalarSizeInBits(); |
11840 | unsigned ToEltSz = StVT.getScalarSizeInBits(); |
11841 | |
11842 | // From, To sizes and ElemCount must be pow of two |
11843 | if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue(); |
11844 | |
11845 | // We are going to use the original vector elt for storing. |
11846 | // Accumulated smaller vector elements must be a multiple of the store size. |
11847 | if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue(); |
11848 | |
11849 | unsigned SizeRatio = FromEltSz / ToEltSz; |
11850 | assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits())(static_cast <bool> (SizeRatio * NumElems * ToEltSz == VT .getSizeInBits()) ? void (0) : __assert_fail ("SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 11850, __extension__ __PRETTY_FUNCTION__)); |
11851 | |
11852 | // Create a type on which we perform the shuffle. |
11853 | EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(), |
11854 | NumElems*SizeRatio); |
11855 | assert(WideVecVT.getSizeInBits() == VT.getSizeInBits())(static_cast <bool> (WideVecVT.getSizeInBits() == VT.getSizeInBits ()) ? void (0) : __assert_fail ("WideVecVT.getSizeInBits() == VT.getSizeInBits()" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 11855, __extension__ __PRETTY_FUNCTION__)); |
11856 | |
11857 | SDLoc DL(St); |
11858 | SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal); |
11859 | SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); |
11860 | for (unsigned i = 0; i < NumElems; ++i) |
11861 | ShuffleVec[i] = DAG.getDataLayout().isBigEndian() |
11862 | ? (i + 1) * SizeRatio - 1 |
11863 | : i * SizeRatio; |
11864 | |
11865 | // Can't shuffle using an illegal type. |
11866 | if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); |
11867 | |
11868 | SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec, |
11869 | DAG.getUNDEF(WideVec.getValueType()), |
11870 | ShuffleVec); |
11871 | // At this point all of the data is stored at the bottom of the |
11872 | // register. We now need to save it to mem. |
11873 | |
11874 | // Find the largest store unit |
11875 | MVT StoreType = MVT::i8; |
11876 | for (MVT Tp : MVT::integer_valuetypes()) { |
11877 | if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) |
11878 | StoreType = Tp; |
11879 | } |
11880 | // Didn't find a legal store type. |
11881 | if (!TLI.isTypeLegal(StoreType)) |
11882 | return SDValue(); |
11883 | |
11884 | // Bitcast the original vector into a vector of store-size units |
11885 | EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), |
11886 | StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits()); |
11887 | assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits())(static_cast <bool> (StoreVecVT.getSizeInBits() == VT.getSizeInBits ()) ? void (0) : __assert_fail ("StoreVecVT.getSizeInBits() == VT.getSizeInBits()" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 11887, __extension__ __PRETTY_FUNCTION__)); |
11888 | SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff); |
11889 | SmallVector<SDValue, 8> Chains; |
11890 | SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL, |
11891 | TLI.getPointerTy(DAG.getDataLayout())); |
11892 | SDValue BasePtr = St->getBasePtr(); |
11893 | |
11894 | // Perform one or more big stores into memory. |
11895 | unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits(); |
11896 | for (unsigned I = 0; I < E; I++) { |
11897 | SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, |
11898 | StoreType, ShuffWide, |
11899 | DAG.getIntPtrConstant(I, DL)); |
11900 | SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr, |
11901 | St->getPointerInfo(), St->getAlignment(), |
11902 | St->getMemOperand()->getFlags()); |
11903 | BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, |
11904 | Increment); |
11905 | Chains.push_back(Ch); |
11906 | } |
11907 | return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); |
11908 | } |
11909 | |
11910 | if (!ISD::isNormalStore(St)) |
11911 | return SDValue(); |
11912 | |
11913 | // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and |
11914 | // ARM stores of arguments in the same cache line. |
11915 | if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && |
11916 | StVal.getNode()->hasOneUse()) { |
11917 | SelectionDAG &DAG = DCI.DAG; |
11918 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
11919 | SDLoc DL(St); |
11920 | SDValue BasePtr = St->getBasePtr(); |
11921 | SDValue NewST1 = DAG.getStore( |
11922 | St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0), |
11923 | BasePtr, St->getPointerInfo(), St->getAlignment(), |
11924 | St->getMemOperand()->getFlags()); |
11925 | |
11926 | SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, |
11927 | DAG.getConstant(4, DL, MVT::i32)); |
11928 | return DAG.getStore(NewST1.getValue(0), DL, |
11929 | StVal.getNode()->getOperand(isBigEndian ? 0 : 1), |
11930 | OffsetPtr, St->getPointerInfo(), |
11931 | std::min(4U, St->getAlignment() / 2), |
11932 | St->getMemOperand()->getFlags()); |
11933 | } |
11934 | |
11935 | if (StVal.getValueType() == MVT::i64 && |
11936 | StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
11937 | |
11938 | // Bitcast an i64 store extracted from a vector to f64. |
11939 | // Otherwise, the i64 value will be legalized to a pair of i32 values. |
11940 | SelectionDAG &DAG = DCI.DAG; |
11941 | SDLoc dl(StVal); |
11942 | SDValue IntVec = StVal.getOperand(0); |
11943 | EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, |
11944 | IntVec.getValueType().getVectorNumElements()); |
11945 | SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); |
11946 | SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, |
11947 | Vec, StVal.getOperand(1)); |
11948 | dl = SDLoc(N); |
11949 | SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); |
11950 | // Make the DAGCombiner fold the bitcasts. |
11951 | DCI.AddToWorklist(Vec.getNode()); |
11952 | DCI.AddToWorklist(ExtElt.getNode()); |
11953 | DCI.AddToWorklist(V.getNode()); |
11954 | return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), |
11955 | St->getPointerInfo(), St->getAlignment(), |
11956 | St->getMemOperand()->getFlags(), St->getAAInfo()); |
11957 | } |
11958 | |
11959 | // If this is a legal vector store, try to combine it into a VST1_UPD. |
11960 | if (ISD::isNormalStore(N) && VT.isVector() && |
11961 | DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
11962 | return CombineBaseUpdate(N, DCI); |
11963 | |
11964 | return SDValue(); |
11965 | } |
11966 | |
11967 | /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) |
11968 | /// can replace combinations of VMUL and VCVT (floating-point to integer) |
11969 | /// when the VMUL has a constant operand that is a power of 2. |
11970 | /// |
11971 | /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): |
11972 | /// vmul.f32 d16, d17, d16 |
11973 | /// vcvt.s32.f32 d16, d16 |
11974 | /// becomes: |
11975 | /// vcvt.s32.f32 d16, d16, #3 |
11976 | static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, |
11977 | const ARMSubtarget *Subtarget) { |
11978 | if (!Subtarget->hasNEON()) |
11979 | return SDValue(); |
11980 | |
11981 | SDValue Op = N->getOperand(0); |
11982 | if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || |
11983 | Op.getOpcode() != ISD::FMUL) |
11984 | return SDValue(); |
11985 | |
11986 | SDValue ConstVec = Op->getOperand(1); |
11987 | if (!isa<BuildVectorSDNode>(ConstVec)) |
11988 | return SDValue(); |
11989 | |
11990 | MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); |
11991 | uint32_t FloatBits = FloatTy.getSizeInBits(); |
11992 | MVT IntTy = N->getSimpleValueType(0).getVectorElementType(); |
11993 | uint32_t IntBits = IntTy.getSizeInBits(); |
11994 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); |
11995 | if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) { |
11996 | // These instructions only exist converting from f32 to i32. We can handle |
11997 | // smaller integers by generating an extra truncate, but larger ones would |
11998 | // be lossy. We also can't handle more then 4 lanes, since these intructions |
11999 | // only support v2i32/v4i32 types. |
12000 | return SDValue(); |
12001 | } |
12002 | |
12003 | BitVector UndefElements; |
12004 | BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); |
12005 | int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); |
12006 | if (C == -1 || C == 0 || C > 32) |
12007 | return SDValue(); |
12008 | |
12009 | SDLoc dl(N); |
12010 | bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; |
12011 | unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : |
12012 | Intrinsic::arm_neon_vcvtfp2fxu; |
12013 | SDValue FixConv = DAG.getNode( |
12014 | ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, |
12015 | DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0), |
12016 | DAG.getConstant(C, dl, MVT::i32)); |
12017 | |
12018 | if (IntBits < FloatBits) |
12019 | FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv); |
12020 | |
12021 | return FixConv; |
12022 | } |
12023 | |
12024 | /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) |
12025 | /// can replace combinations of VCVT (integer to floating-point) and VDIV |
12026 | /// when the VDIV has a constant operand that is a power of 2. |
12027 | /// |
12028 | /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): |
12029 | /// vcvt.f32.s32 d16, d16 |
12030 | /// vdiv.f32 d16, d17, d16 |
12031 | /// becomes: |
12032 | /// vcvt.f32.s32 d16, d16, #3 |
12033 | static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG, |
12034 | const ARMSubtarget *Subtarget) { |
12035 | if (!Subtarget->hasNEON()) |
12036 | return SDValue(); |
12037 | |
12038 | SDValue Op = N->getOperand(0); |
12039 | unsigned OpOpcode = Op.getNode()->getOpcode(); |
12040 | if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() || |
12041 | (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) |
12042 | return SDValue(); |
12043 | |
12044 | SDValue ConstVec = N->getOperand(1); |
12045 | if (!isa<BuildVectorSDNode>(ConstVec)) |
12046 | return SDValue(); |
12047 | |
12048 | MVT FloatTy = N->getSimpleValueType(0).getVectorElementType(); |
12049 | uint32_t FloatBits = FloatTy.getSizeInBits(); |
12050 | MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType(); |
12051 | uint32_t IntBits = IntTy.getSizeInBits(); |
12052 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); |
12053 | if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) { |
12054 | // These instructions only exist converting from i32 to f32. We can handle |
12055 | // smaller integers by generating an extra extend, but larger ones would |
12056 | // be lossy. We also can't handle more then 4 lanes, since these intructions |
12057 | // only support v2i32/v4i32 types. |
12058 | return SDValue(); |
12059 | } |
12060 | |
12061 | BitVector UndefElements; |
12062 | BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); |
12063 | int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); |
12064 | if (C == -1 || C == 0 || C > 32) |
12065 | return SDValue(); |
12066 | |
12067 | SDLoc dl(N); |
12068 | bool isSigned = OpOpcode == ISD::SINT_TO_FP; |
12069 | SDValue ConvInput = Op.getOperand(0); |
12070 | if (IntBits < FloatBits) |
12071 | ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, |
12072 | dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, |
12073 | ConvInput); |
12074 | |
12075 | unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : |
12076 | Intrinsic::arm_neon_vcvtfxu2fp; |
12077 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, |
12078 | Op.getValueType(), |
12079 | DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), |
12080 | ConvInput, DAG.getConstant(C, dl, MVT::i32)); |
12081 | } |
12082 | |
12083 | /// Getvshiftimm - Check if this is a valid build_vector for the immediate |
12084 | /// operand of a vector shift operation, where all the elements of the |
12085 | /// build_vector must have the same constant integer value. |
12086 | static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { |
12087 | // Ignore bit_converts. |
12088 | while (Op.getOpcode() == ISD::BITCAST) |
12089 | Op = Op.getOperand(0); |
12090 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); |
12091 | APInt SplatBits, SplatUndef; |
12092 | unsigned SplatBitSize; |
12093 | bool HasAnyUndefs; |
12094 | if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, |
12095 | HasAnyUndefs, ElementBits) || |
12096 | SplatBitSize > ElementBits) |
12097 | return false; |
12098 | Cnt = SplatBits.getSExtValue(); |
12099 | return true; |
12100 | } |
12101 | |
12102 | /// isVShiftLImm - Check if this is a valid build_vector for the immediate |
12103 | /// operand of a vector shift left operation. That value must be in the range: |
12104 | /// 0 <= Value < ElementBits for a left shift; or |
12105 | /// 0 <= Value <= ElementBits for a long left shift. |
12106 | static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { |
12107 | assert(VT.isVector() && "vector shift count is not a vector type")(static_cast <bool> (VT.isVector() && "vector shift count is not a vector type" ) ? void (0) : __assert_fail ("VT.isVector() && \"vector shift count is not a vector type\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 12107, __extension__ __PRETTY_FUNCTION__)); |
12108 | int64_t ElementBits = VT.getScalarSizeInBits(); |
12109 | if (! getVShiftImm(Op, ElementBits, Cnt)) |
12110 | return false; |
12111 | return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); |
12112 | } |
12113 | |
12114 | /// isVShiftRImm - Check if this is a valid build_vector for the immediate |
12115 | /// operand of a vector shift right operation. For a shift opcode, the value |
12116 | /// is positive, but for an intrinsic the value count must be negative. The |
12117 | /// absolute value must be in the range: |
12118 | /// 1 <= |Value| <= ElementBits for a right shift; or |
12119 | /// 1 <= |Value| <= ElementBits/2 for a narrow right shift. |
12120 | static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, |
12121 | int64_t &Cnt) { |
12122 | assert(VT.isVector() && "vector shift count is not a vector type")(static_cast <bool> (VT.isVector() && "vector shift count is not a vector type" ) ? void (0) : __assert_fail ("VT.isVector() && \"vector shift count is not a vector type\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 12122, __extension__ __PRETTY_FUNCTION__)); |
12123 | int64_t ElementBits = VT.getScalarSizeInBits(); |
12124 | if (! getVShiftImm(Op, ElementBits, Cnt)) |
12125 | return false; |
12126 | if (!isIntrinsic) |
12127 | return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); |
12128 | if (Cnt >= -(isNarrow ? ElementBits/2 : ElementBits) && Cnt <= -1) { |
12129 | Cnt = -Cnt; |
12130 | return true; |
12131 | } |
12132 | return false; |
12133 | } |
12134 | |
12135 | /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. |
12136 | static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { |
12137 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); |
12138 | switch (IntNo) { |
12139 | default: |
12140 | // Don't do anything for most intrinsics. |
12141 | break; |
12142 | |
12143 | // Vector shifts: check for immediate versions and lower them. |
12144 | // Note: This is done during DAG combining instead of DAG legalizing because |
12145 | // the build_vectors for 64-bit vector element shift counts are generally |
12146 | // not legal, and it is hard to see their values after they get legalized to |
12147 | // loads from a constant pool. |
12148 | case Intrinsic::arm_neon_vshifts: |
12149 | case Intrinsic::arm_neon_vshiftu: |
12150 | case Intrinsic::arm_neon_vrshifts: |
12151 | case Intrinsic::arm_neon_vrshiftu: |
12152 | case Intrinsic::arm_neon_vrshiftn: |
12153 | case Intrinsic::arm_neon_vqshifts: |
12154 | case Intrinsic::arm_neon_vqshiftu: |
12155 | case Intrinsic::arm_neon_vqshiftsu: |
12156 | case Intrinsic::arm_neon_vqshiftns: |
12157 | case Intrinsic::arm_neon_vqshiftnu: |
12158 | case Intrinsic::arm_neon_vqshiftnsu: |
12159 | case Intrinsic::arm_neon_vqrshiftns: |
12160 | case Intrinsic::arm_neon_vqrshiftnu: |
12161 | case Intrinsic::arm_neon_vqrshiftnsu: { |
12162 | EVT VT = N->getOperand(1).getValueType(); |
12163 | int64_t Cnt; |
12164 | unsigned VShiftOpc = 0; |
12165 | |
12166 | switch (IntNo) { |
12167 | case Intrinsic::arm_neon_vshifts: |
12168 | case Intrinsic::arm_neon_vshiftu: |
12169 | if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { |
12170 | VShiftOpc = ARMISD::VSHL; |
12171 | break; |
12172 | } |
12173 | if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { |
12174 | VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? |
12175 | ARMISD::VSHRs : ARMISD::VSHRu); |
12176 | break; |
12177 | } |
12178 | return SDValue(); |
12179 | |
12180 | case Intrinsic::arm_neon_vrshifts: |
12181 | case Intrinsic::arm_neon_vrshiftu: |
12182 | if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) |
12183 | break; |
12184 | return SDValue(); |
12185 | |
12186 | case Intrinsic::arm_neon_vqshifts: |
12187 | case Intrinsic::arm_neon_vqshiftu: |
12188 | if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) |
12189 | break; |
12190 | return SDValue(); |
12191 | |
12192 | case Intrinsic::arm_neon_vqshiftsu: |
12193 | if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) |
12194 | break; |
12195 | llvm_unreachable("invalid shift count for vqshlu intrinsic")::llvm::llvm_unreachable_internal("invalid shift count for vqshlu intrinsic" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 12195); |
12196 | |
12197 | case Intrinsic::arm_neon_vrshiftn: |
12198 | case Intrinsic::arm_neon_vqshiftns: |
12199 | case Intrinsic::arm_neon_vqshiftnu: |
12200 | case Intrinsic::arm_neon_vqshiftnsu: |
12201 | case Intrinsic::arm_neon_vqrshiftns: |
12202 | case Intrinsic::arm_neon_vqrshiftnu: |
12203 | case Intrinsic::arm_neon_vqrshiftnsu: |
12204 | // Narrowing shifts require an immediate right shift. |
12205 | if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) |
12206 | break; |
12207 | llvm_unreachable("invalid shift count for narrowing vector shift "::llvm::llvm_unreachable_internal("invalid shift count for narrowing vector shift " "intrinsic", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 12208) |
12208 | "intrinsic")::llvm::llvm_unreachable_internal("invalid shift count for narrowing vector shift " "intrinsic", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 12208); |
12209 | |
12210 | default: |
12211 | llvm_unreachable("unhandled vector shift")::llvm::llvm_unreachable_internal("unhandled vector shift", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 12211); |
12212 | } |
12213 | |
12214 | switch (IntNo) { |
12215 | case Intrinsic::arm_neon_vshifts: |
12216 | case Intrinsic::arm_neon_vshiftu: |
12217 | // Opcode already set above. |
12218 | break; |
12219 | case Intrinsic::arm_neon_vrshifts: |
12220 | VShiftOpc = ARMISD::VRSHRs; break; |
12221 | case Intrinsic::arm_neon_vrshiftu: |
12222 | VShiftOpc = ARMISD::VRSHRu; break; |
12223 | case Intrinsic::arm_neon_vrshiftn: |
12224 | VShiftOpc = ARMISD::VRSHRN; break; |
12225 | case Intrinsic::arm_neon_vqshifts: |
12226 | VShiftOpc = ARMISD::VQSHLs; break; |
12227 | case Intrinsic::arm_neon_vqshiftu: |
12228 | VShiftOpc = ARMISD::VQSHLu; break; |
12229 | case Intrinsic::arm_neon_vqshiftsu: |
12230 | VShiftOpc = ARMISD::VQSHLsu; break; |
12231 | case Intrinsic::arm_neon_vqshiftns: |
12232 | VShiftOpc = ARMISD::VQSHRNs; break; |
12233 | case Intrinsic::arm_neon_vqshiftnu: |
12234 | VShiftOpc = ARMISD::VQSHRNu; break; |
12235 | case Intrinsic::arm_neon_vqshiftnsu: |
12236 | VShiftOpc = ARMISD::VQSHRNsu; break; |
12237 | case Intrinsic::arm_neon_vqrshiftns: |
12238 | VShiftOpc = ARMISD::VQRSHRNs; break; |
12239 | case Intrinsic::arm_neon_vqrshiftnu: |
12240 | VShiftOpc = ARMISD::VQRSHRNu; break; |
12241 | case Intrinsic::arm_neon_vqrshiftnsu: |
12242 | VShiftOpc = ARMISD::VQRSHRNsu; break; |
12243 | } |
12244 | |
12245 | SDLoc dl(N); |
12246 | return DAG.getNode(VShiftOpc, dl, N->getValueType(0), |
12247 | N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32)); |
12248 | } |
12249 | |
12250 | case Intrinsic::arm_neon_vshiftins: { |
12251 | EVT VT = N->getOperand(1).getValueType(); |
12252 | int64_t Cnt; |
12253 | unsigned VShiftOpc = 0; |
12254 | |
12255 | if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) |
12256 | VShiftOpc = ARMISD::VSLI; |
12257 | else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) |
12258 | VShiftOpc = ARMISD::VSRI; |
12259 | else { |
12260 | llvm_unreachable("invalid shift count for vsli/vsri intrinsic")::llvm::llvm_unreachable_internal("invalid shift count for vsli/vsri intrinsic" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 12260); |
12261 | } |
12262 | |
12263 | SDLoc dl(N); |
12264 | return DAG.getNode(VShiftOpc, dl, N->getValueType(0), |
12265 | N->getOperand(1), N->getOperand(2), |
12266 | DAG.getConstant(Cnt, dl, MVT::i32)); |
12267 | } |
12268 | |
12269 | case Intrinsic::arm_neon_vqrshifts: |
12270 | case Intrinsic::arm_neon_vqrshiftu: |
12271 | // No immediate versions of these to check for. |
12272 | break; |
12273 | } |
12274 | |
12275 | return SDValue(); |
12276 | } |
12277 | |
12278 | /// PerformShiftCombine - Checks for immediate versions of vector shifts and |
12279 | /// lowers them. As with the vector shift intrinsics, this is done during DAG |
12280 | /// combining instead of DAG legalizing because the build_vectors for 64-bit |
12281 | /// vector element shift counts are generally not legal, and it is hard to see |
12282 | /// their values after they get legalized to loads from a constant pool. |
12283 | static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, |
12284 | const ARMSubtarget *ST) { |
12285 | EVT VT = N->getValueType(0); |
12286 | if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) { |
12287 | // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high |
12288 | // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16. |
12289 | SDValue N1 = N->getOperand(1); |
12290 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { |
12291 | SDValue N0 = N->getOperand(0); |
12292 | if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP && |
12293 | DAG.MaskedValueIsZero(N0.getOperand(0), |
12294 | APInt::getHighBitsSet(32, 16))) |
12295 | return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1); |
12296 | } |
12297 | } |
12298 | |
12299 | // Nothing to be done for scalar shifts. |
12300 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
12301 | if (!VT.isVector() || !TLI.isTypeLegal(VT)) |
12302 | return SDValue(); |
12303 | |
12304 | assert(ST->hasNEON() && "unexpected vector shift")(static_cast <bool> (ST->hasNEON() && "unexpected vector shift" ) ? void (0) : __assert_fail ("ST->hasNEON() && \"unexpected vector shift\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 12304, __extension__ __PRETTY_FUNCTION__)); |
12305 | int64_t Cnt; |
12306 | |
12307 | switch (N->getOpcode()) { |
12308 | default: llvm_unreachable("unexpected shift opcode")::llvm::llvm_unreachable_internal("unexpected shift opcode", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 12308); |
12309 | |
12310 | case ISD::SHL: |
12311 | if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) { |
12312 | SDLoc dl(N); |
12313 | return DAG.getNode(ARMISD::VSHL, dl, VT, N->getOperand(0), |
12314 | DAG.getConstant(Cnt, dl, MVT::i32)); |
12315 | } |
12316 | break; |
12317 | |
12318 | case ISD::SRA: |
12319 | case ISD::SRL: |
12320 | if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { |
12321 | unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? |
12322 | ARMISD::VSHRs : ARMISD::VSHRu); |
12323 | SDLoc dl(N); |
12324 | return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), |
12325 | DAG.getConstant(Cnt, dl, MVT::i32)); |
12326 | } |
12327 | } |
12328 | return SDValue(); |
12329 | } |
12330 | |
12331 | /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, |
12332 | /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. |
12333 | static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, |
12334 | const ARMSubtarget *ST) { |
12335 | SDValue N0 = N->getOperand(0); |
12336 | |
12337 | // Check for sign- and zero-extensions of vector extract operations of 8- |
12338 | // and 16-bit vector elements. NEON supports these directly. They are |
12339 | // handled during DAG combining because type legalization will promote them |
12340 | // to 32-bit types and it is messy to recognize the operations after that. |
12341 | if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
12342 | SDValue Vec = N0.getOperand(0); |
12343 | SDValue Lane = N0.getOperand(1); |
12344 | EVT VT = N->getValueType(0); |
12345 | EVT EltVT = N0.getValueType(); |
12346 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
12347 | |
12348 | if (VT == MVT::i32 && |
12349 | (EltVT == MVT::i8 || EltVT == MVT::i16) && |
12350 | TLI.isTypeLegal(Vec.getValueType()) && |
12351 | isa<ConstantSDNode>(Lane)) { |
12352 | |
12353 | unsigned Opc = 0; |
12354 | switch (N->getOpcode()) { |
12355 | default: llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 12355); |
12356 | case ISD::SIGN_EXTEND: |
12357 | Opc = ARMISD::VGETLANEs; |
12358 | break; |
12359 | case ISD::ZERO_EXTEND: |
12360 | case ISD::ANY_EXTEND: |
12361 | Opc = ARMISD::VGETLANEu; |
12362 | break; |
12363 | } |
12364 | return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane); |
12365 | } |
12366 | } |
12367 | |
12368 | return SDValue(); |
12369 | } |
12370 | |
12371 | static const APInt *isPowerOf2Constant(SDValue V) { |
12372 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); |
12373 | if (!C) |
12374 | return nullptr; |
12375 | const APInt *CV = &C->getAPIntValue(); |
12376 | return CV->isPowerOf2() ? CV : nullptr; |
12377 | } |
12378 | |
12379 | SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const { |
12380 | // If we have a CMOV, OR and AND combination such as: |
12381 | // if (x & CN) |
12382 | // y |= CM; |
12383 | // |
12384 | // And: |
12385 | // * CN is a single bit; |
12386 | // * All bits covered by CM are known zero in y |
12387 | // |
12388 | // Then we can convert this into a sequence of BFI instructions. This will |
12389 | // always be a win if CM is a single bit, will always be no worse than the |
12390 | // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is |
12391 | // three bits (due to the extra IT instruction). |
12392 | |
12393 | SDValue Op0 = CMOV->getOperand(0); |
12394 | SDValue Op1 = CMOV->getOperand(1); |
12395 | auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2)); |
12396 | auto CC = CCNode->getAPIntValue().getLimitedValue(); |
12397 | SDValue CmpZ = CMOV->getOperand(4); |
12398 | |
12399 | // The compare must be against zero. |
12400 | if (!isNullConstant(CmpZ->getOperand(1))) |
12401 | return SDValue(); |
12402 | |
12403 | assert(CmpZ->getOpcode() == ARMISD::CMPZ)(static_cast <bool> (CmpZ->getOpcode() == ARMISD::CMPZ ) ? void (0) : __assert_fail ("CmpZ->getOpcode() == ARMISD::CMPZ" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 12403, __extension__ __PRETTY_FUNCTION__)); |
12404 | SDValue And = CmpZ->getOperand(0); |
12405 | if (And->getOpcode() != ISD::AND) |
12406 | return SDValue(); |
12407 | const APInt *AndC = isPowerOf2Constant(And->getOperand(1)); |
12408 | if (!AndC) |
12409 | return SDValue(); |
12410 | SDValue X = And->getOperand(0); |
12411 | |
12412 | if (CC == ARMCC::EQ) { |
12413 | // We're performing an "equal to zero" compare. Swap the operands so we |
12414 | // canonicalize on a "not equal to zero" compare. |
12415 | std::swap(Op0, Op1); |
12416 | } else { |
12417 | assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?")(static_cast <bool> (CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?" ) ? void (0) : __assert_fail ("CC == ARMCC::NE && \"How can a CMPZ node not be EQ or NE?\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 12417, __extension__ __PRETTY_FUNCTION__)); |
12418 | } |
12419 | |
12420 | if (Op1->getOpcode() != ISD::OR) |
12421 | return SDValue(); |
12422 | |
12423 | ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1)); |
12424 | if (!OrC) |
12425 | return SDValue(); |
12426 | SDValue Y = Op1->getOperand(0); |
12427 | |
12428 | if (Op0 != Y) |
12429 | return SDValue(); |
12430 | |
12431 | // Now, is it profitable to continue? |
12432 | APInt OrCI = OrC->getAPIntValue(); |
12433 | unsigned Heuristic = Subtarget->isThumb() ? 3 : 2; |
12434 | if (OrCI.countPopulation() > Heuristic) |
12435 | return SDValue(); |
12436 | |
12437 | // Lastly, can we determine that the bits defined by OrCI |
12438 | // are zero in Y? |
12439 | KnownBits Known; |
12440 | DAG.computeKnownBits(Y, Known); |
12441 | if ((OrCI & Known.Zero) != OrCI) |
12442 | return SDValue(); |
12443 | |
12444 | // OK, we can do the combine. |
12445 | SDValue V = Y; |
12446 | SDLoc dl(X); |
12447 | EVT VT = X.getValueType(); |
12448 | unsigned BitInX = AndC->logBase2(); |
12449 | |
12450 | if (BitInX != 0) { |
12451 | // We must shift X first. |
12452 | X = DAG.getNode(ISD::SRL, dl, VT, X, |
12453 | DAG.getConstant(BitInX, dl, VT)); |
12454 | } |
12455 | |
12456 | for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits(); |
12457 | BitInY < NumActiveBits; ++BitInY) { |
12458 | if (OrCI[BitInY] == 0) |
12459 | continue; |
12460 | APInt Mask(VT.getSizeInBits(), 0); |
12461 | Mask.setBit(BitInY); |
12462 | V = DAG.getNode(ARMISD::BFI, dl, VT, V, X, |
12463 | // Confusingly, the operand is an *inverted* mask. |
12464 | DAG.getConstant(~Mask, dl, VT)); |
12465 | } |
12466 | |
12467 | return V; |
12468 | } |
12469 | |
12470 | /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND. |
12471 | SDValue |
12472 | ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const { |
12473 | SDValue Cmp = N->getOperand(4); |
12474 | if (Cmp.getOpcode() != ARMISD::CMPZ) |
12475 | // Only looking at NE cases. |
12476 | return SDValue(); |
12477 | |
12478 | EVT VT = N->getValueType(0); |
12479 | SDLoc dl(N); |
12480 | SDValue LHS = Cmp.getOperand(0); |
12481 | SDValue RHS = Cmp.getOperand(1); |
12482 | SDValue Chain = N->getOperand(0); |
12483 | SDValue BB = N->getOperand(1); |
12484 | SDValue ARMcc = N->getOperand(2); |
12485 | ARMCC::CondCodes CC = |
12486 | (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); |
12487 | |
12488 | // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0)) |
12489 | // -> (brcond Chain BB CC CPSR Cmp) |
12490 | if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() && |
12491 | LHS->getOperand(0)->getOpcode() == ARMISD::CMOV && |
12492 | LHS->getOperand(0)->hasOneUse()) { |
12493 | auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0)); |
12494 | auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1)); |
12495 | auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); |
12496 | auto *RHSC = dyn_cast<ConstantSDNode>(RHS); |
12497 | if ((LHS00C && LHS00C->getZExtValue() == 0) && |
12498 | (LHS01C && LHS01C->getZExtValue() == 1) && |
12499 | (LHS1C && LHS1C->getZExtValue() == 1) && |
12500 | (RHSC && RHSC->getZExtValue() == 0)) { |
12501 | return DAG.getNode( |
12502 | ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2), |
12503 | LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4)); |
12504 | } |
12505 | } |
12506 | |
12507 | return SDValue(); |
12508 | } |
12509 | |
12510 | /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. |
12511 | SDValue |
12512 | ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { |
12513 | SDValue Cmp = N->getOperand(4); |
12514 | if (Cmp.getOpcode() != ARMISD::CMPZ) |
12515 | // Only looking at EQ and NE cases. |
12516 | return SDValue(); |
12517 | |
12518 | EVT VT = N->getValueType(0); |
12519 | SDLoc dl(N); |
12520 | SDValue LHS = Cmp.getOperand(0); |
12521 | SDValue RHS = Cmp.getOperand(1); |
12522 | SDValue FalseVal = N->getOperand(0); |
12523 | SDValue TrueVal = N->getOperand(1); |
12524 | SDValue ARMcc = N->getOperand(2); |
12525 | ARMCC::CondCodes CC = |
12526 | (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); |
12527 | |
12528 | // BFI is only available on V6T2+. |
12529 | if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) { |
12530 | SDValue R = PerformCMOVToBFICombine(N, DAG); |
12531 | if (R) |
12532 | return R; |
12533 | } |
12534 | |
12535 | // Simplify |
12536 | // mov r1, r0 |
12537 | // cmp r1, x |
12538 | // mov r0, y |
12539 | // moveq r0, x |
12540 | // to |
12541 | // cmp r0, x |
12542 | // movne r0, y |
12543 | // |
12544 | // mov r1, r0 |
12545 | // cmp r1, x |
12546 | // mov r0, x |
12547 | // movne r0, y |
12548 | // to |
12549 | // cmp r0, x |
12550 | // movne r0, y |
12551 | /// FIXME: Turn this into a target neutral optimization? |
12552 | SDValue Res; |
12553 | if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { |
12554 | Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, |
12555 | N->getOperand(3), Cmp); |
12556 | } else if (CC == ARMCC::EQ && TrueVal == RHS) { |
12557 | SDValue ARMcc; |
12558 | SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); |
12559 | Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, |
12560 | N->getOperand(3), NewCmp); |
12561 | } |
12562 | |
12563 | // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0)) |
12564 | // -> (cmov F T CC CPSR Cmp) |
12565 | if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) { |
12566 | auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)); |
12567 | auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); |
12568 | auto *RHSC = dyn_cast<ConstantSDNode>(RHS); |
12569 | if ((LHS0C && LHS0C->getZExtValue() == 0) && |
12570 | (LHS1C && LHS1C->getZExtValue() == 1) && |
12571 | (RHSC && RHSC->getZExtValue() == 0)) { |
12572 | return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, |
12573 | LHS->getOperand(2), LHS->getOperand(3), |
12574 | LHS->getOperand(4)); |
12575 | } |
12576 | } |
12577 | |
12578 | if (!VT.isInteger()) |
12579 | return SDValue(); |
12580 | |
12581 | // Materialize a boolean comparison for integers so we can avoid branching. |
12582 | if (isNullConstant(FalseVal)) { |
12583 | if (CC == ARMCC::EQ && isOneConstant(TrueVal)) { |
12584 | if (!Subtarget->isThumb1Only() && Subtarget->hasV5TOps()) { |
12585 | // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it |
12586 | // right 5 bits will make that 32 be 1, otherwise it will be 0. |
12587 | // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5 |
12588 | SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); |
12589 | Res = DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::CTLZ, dl, VT, Sub), |
12590 | DAG.getConstant(5, dl, MVT::i32)); |
12591 | } else { |
12592 | // CMOV 0, 1, ==, (CMPZ x, y) -> |
12593 | // (ADDCARRY (SUB x, y), t:0, t:1) |
12594 | // where t = (SUBCARRY 0, (SUB x, y), 0) |
12595 | // |
12596 | // The SUBCARRY computes 0 - (x - y) and this will give a borrow when |
12597 | // x != y. In other words, a carry C == 1 when x == y, C == 0 |
12598 | // otherwise. |
12599 | // The final ADDCARRY computes |
12600 | // x - y + (0 - (x - y)) + C == C |
12601 | SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); |
12602 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); |
12603 | SDValue Neg = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, Sub); |
12604 | // ISD::SUBCARRY returns a borrow but we want the carry here |
12605 | // actually. |
12606 | SDValue Carry = |
12607 | DAG.getNode(ISD::SUB, dl, MVT::i32, |
12608 | DAG.getConstant(1, dl, MVT::i32), Neg.getValue(1)); |
12609 | Res = DAG.getNode(ISD::ADDCARRY, dl, VTs, Sub, Neg, Carry); |
12610 | } |
12611 | } else if (CC == ARMCC::NE && LHS != RHS && |
12612 | (!Subtarget->isThumb1Only() || isPowerOf2Constant(TrueVal))) { |
12613 | // This seems pointless but will allow us to combine it further below. |
12614 | // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUB x, y), z, !=, (CMPZ x, y) |
12615 | SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); |
12616 | Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, TrueVal, ARMcc, |
12617 | N->getOperand(3), Cmp); |
12618 | } |
12619 | } else if (isNullConstant(TrueVal)) { |
12620 | if (CC == ARMCC::EQ && LHS != RHS && |
12621 | (!Subtarget->isThumb1Only() || isPowerOf2Constant(FalseVal))) { |
12622 | // This seems pointless but will allow us to combine it further below |
12623 | // Note that we change == for != as this is the dual for the case above. |
12624 | // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUB x, y), z, !=, (CMPZ x, y) |
12625 | SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); |
12626 | Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, FalseVal, |
12627 | DAG.getConstant(ARMCC::NE, dl, MVT::i32), |
12628 | N->getOperand(3), Cmp); |
12629 | } |
12630 | } |
12631 | |
12632 | // On Thumb1, the DAG above may be further combined if z is a power of 2 |
12633 | // (z == 2 ^ K). |
12634 | // CMOV (SUB x, y), z, !=, (CMPZ x, y) -> |
12635 | // merge t3, t4 |
12636 | // where t1 = (SUBCARRY (SUB x, y), z, 0) |
12637 | // t2 = (SUBCARRY (SUB x, y), t1:0, t1:1) |
12638 | // t3 = if K != 0 then (SHL t2:0, K) else t2:0 |
12639 | // t4 = (SUB 1, t2:1) [ we want a carry, not a borrow ] |
12640 | const APInt *TrueConst; |
12641 | if (Subtarget->isThumb1Only() && CC == ARMCC::NE && |
12642 | (FalseVal.getOpcode() == ISD::SUB) && (FalseVal.getOperand(0) == LHS) && |
12643 | (FalseVal.getOperand(1) == RHS) && |
12644 | (TrueConst = isPowerOf2Constant(TrueVal))) { |
12645 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); |
12646 | unsigned ShiftAmount = TrueConst->logBase2(); |
12647 | if (ShiftAmount) |
12648 | TrueVal = DAG.getConstant(1, dl, VT); |
12649 | SDValue Subc = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, TrueVal); |
12650 | Res = DAG.getNode(ISD::SUBCARRY, dl, VTs, FalseVal, Subc, Subc.getValue(1)); |
12651 | // Make it a carry, not a borrow. |
12652 | SDValue Carry = DAG.getNode( |
12653 | ISD::SUB, dl, VT, DAG.getConstant(1, dl, MVT::i32), Res.getValue(1)); |
12654 | Res = DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Res, Carry); |
12655 | |
12656 | if (ShiftAmount) |
12657 | Res = DAG.getNode(ISD::SHL, dl, VT, Res, |
12658 | DAG.getConstant(ShiftAmount, dl, MVT::i32)); |
12659 | } |
12660 | |
12661 | if (Res.getNode()) { |
12662 | KnownBits Known; |
12663 | DAG.computeKnownBits(SDValue(N,0), Known); |
12664 | // Capture demanded bits information that would be otherwise lost. |
12665 | if (Known.Zero == 0xfffffffe) |
12666 | Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, |
12667 | DAG.getValueType(MVT::i1)); |
12668 | else if (Known.Zero == 0xffffff00) |
12669 | Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, |
12670 | DAG.getValueType(MVT::i8)); |
12671 | else if (Known.Zero == 0xffff0000) |
12672 | Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, |
12673 | DAG.getValueType(MVT::i16)); |
12674 | } |
12675 | |
12676 | return Res; |
12677 | } |
12678 | |
12679 | SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, |
12680 | DAGCombinerInfo &DCI) const { |
12681 | switch (N->getOpcode()) { |
12682 | default: break; |
12683 | case ARMISD::ADDE: return PerformADDECombine(N, DCI, Subtarget); |
12684 | case ARMISD::UMLAL: return PerformUMLALCombine(N, DCI.DAG, Subtarget); |
12685 | case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); |
12686 | case ISD::SUB: return PerformSUBCombine(N, DCI); |
12687 | case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); |
12688 | case ISD::OR: return PerformORCombine(N, DCI, Subtarget); |
12689 | case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); |
12690 | case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); |
12691 | case ARMISD::ADDC: |
12692 | case ARMISD::SUBC: return PerformAddcSubcCombine(N, DCI, Subtarget); |
12693 | case ARMISD::SUBE: return PerformAddeSubeCombine(N, DCI, Subtarget); |
12694 | case ARMISD::BFI: return PerformBFICombine(N, DCI); |
12695 | case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget); |
12696 | case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); |
12697 | case ISD::STORE: return PerformSTORECombine(N, DCI); |
12698 | case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget); |
12699 | case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); |
12700 | case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); |
12701 | case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); |
12702 | case ARMISD::VDUP: return PerformVDUPCombine(N, DCI); |
12703 | case ISD::FP_TO_SINT: |
12704 | case ISD::FP_TO_UINT: |
12705 | return PerformVCVTCombine(N, DCI.DAG, Subtarget); |
12706 | case ISD::FDIV: |
12707 | return PerformVDIVCombine(N, DCI.DAG, Subtarget); |
12708 | case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); |
12709 | case ISD::SHL: |
12710 | case ISD::SRA: |
12711 | case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); |
12712 | case ISD::SIGN_EXTEND: |
12713 | case ISD::ZERO_EXTEND: |
12714 | case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); |
12715 | case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); |
12716 | case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG); |
12717 | case ISD::LOAD: return PerformLOADCombine(N, DCI); |
12718 | case ARMISD::VLD1DUP: |
12719 | case ARMISD::VLD2DUP: |
12720 | case ARMISD::VLD3DUP: |
12721 | case ARMISD::VLD4DUP: |
12722 | return PerformVLDCombine(N, DCI); |
12723 | case ARMISD::BUILD_VECTOR: |
12724 | return PerformARMBUILD_VECTORCombine(N, DCI); |
12725 | case ARMISD::SMULWB: { |
12726 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); |
12727 | APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); |
12728 | if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) |
12729 | return SDValue(); |
12730 | break; |
12731 | } |
12732 | case ARMISD::SMULWT: { |
12733 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); |
12734 | APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16); |
12735 | if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) |
12736 | return SDValue(); |
12737 | break; |
12738 | } |
12739 | case ARMISD::SMLALBB: { |
12740 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); |
12741 | APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); |
12742 | if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || |
12743 | (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) |
12744 | return SDValue(); |
12745 | break; |
12746 | } |
12747 | case ARMISD::SMLALBT: { |
12748 | unsigned LowWidth = N->getOperand(0).getValueType().getSizeInBits(); |
12749 | APInt LowMask = APInt::getLowBitsSet(LowWidth, 16); |
12750 | unsigned HighWidth = N->getOperand(1).getValueType().getSizeInBits(); |
12751 | APInt HighMask = APInt::getHighBitsSet(HighWidth, 16); |
12752 | if ((SimplifyDemandedBits(N->getOperand(0), LowMask, DCI)) || |
12753 | (SimplifyDemandedBits(N->getOperand(1), HighMask, DCI))) |
12754 | return SDValue(); |
12755 | break; |
12756 | } |
12757 | case ARMISD::SMLALTB: { |
12758 | unsigned HighWidth = N->getOperand(0).getValueType().getSizeInBits(); |
12759 | APInt HighMask = APInt::getHighBitsSet(HighWidth, 16); |
12760 | unsigned LowWidth = N->getOperand(1).getValueType().getSizeInBits(); |
12761 | APInt LowMask = APInt::getLowBitsSet(LowWidth, 16); |
12762 | if ((SimplifyDemandedBits(N->getOperand(0), HighMask, DCI)) || |
12763 | (SimplifyDemandedBits(N->getOperand(1), LowMask, DCI))) |
12764 | return SDValue(); |
12765 | break; |
12766 | } |
12767 | case ARMISD::SMLALTT: { |
12768 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); |
12769 | APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16); |
12770 | if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || |
12771 | (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) |
12772 | return SDValue(); |
12773 | break; |
12774 | } |
12775 | case ISD::INTRINSIC_VOID: |
12776 | case ISD::INTRINSIC_W_CHAIN: |
12777 | switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { |
12778 | case Intrinsic::arm_neon_vld1: |
12779 | case Intrinsic::arm_neon_vld2: |
12780 | case Intrinsic::arm_neon_vld3: |
12781 | case Intrinsic::arm_neon_vld4: |
12782 | case Intrinsic::arm_neon_vld2lane: |
12783 | case Intrinsic::arm_neon_vld3lane: |
12784 | case Intrinsic::arm_neon_vld4lane: |
12785 | case Intrinsic::arm_neon_vst1: |
12786 | case Intrinsic::arm_neon_vst2: |
12787 | case Intrinsic::arm_neon_vst3: |
12788 | case Intrinsic::arm_neon_vst4: |
12789 | case Intrinsic::arm_neon_vst2lane: |
12790 | case Intrinsic::arm_neon_vst3lane: |
12791 | case Intrinsic::arm_neon_vst4lane: |
12792 | return PerformVLDCombine(N, DCI); |
12793 | default: break; |
12794 | } |
12795 | break; |
12796 | } |
12797 | return SDValue(); |
12798 | } |
12799 | |
12800 | bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, |
12801 | EVT VT) const { |
12802 | return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); |
12803 | } |
12804 | |
12805 | bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, |
12806 | unsigned, |
12807 | unsigned, |
12808 | bool *Fast) const { |
12809 | // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus |
12810 | bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); |
12811 | |
12812 | switch (VT.getSimpleVT().SimpleTy) { |
12813 | default: |
12814 | return false; |
12815 | case MVT::i8: |
12816 | case MVT::i16: |
12817 | case MVT::i32: { |
12818 | // Unaligned access can use (for example) LRDB, LRDH, LDR |
12819 | if (AllowsUnaligned) { |
12820 | if (Fast) |
12821 | *Fast = Subtarget->hasV7Ops(); |
12822 | return true; |
12823 | } |
12824 | return false; |
12825 | } |
12826 | case MVT::f64: |
12827 | case MVT::v2f64: { |
12828 | // For any little-endian targets with neon, we can support unaligned ld/st |
12829 | // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. |
12830 | // A big-endian target may also explicitly support unaligned accesses |
12831 | if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) { |
12832 | if (Fast) |
12833 | *Fast = true; |
12834 | return true; |
12835 | } |
12836 | return false; |
12837 | } |
12838 | } |
12839 | } |
12840 | |
12841 | static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, |
12842 | unsigned AlignCheck) { |
12843 | return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) && |
12844 | (DstAlign == 0 || DstAlign % AlignCheck == 0)); |
12845 | } |
12846 | |
12847 | EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size, |
12848 | unsigned DstAlign, unsigned SrcAlign, |
12849 | bool IsMemset, bool ZeroMemset, |
12850 | bool MemcpyStrSrc, |
12851 | MachineFunction &MF) const { |
12852 | const Function &F = MF.getFunction(); |
12853 | |
12854 | // See if we can use NEON instructions for this... |
12855 | if ((!IsMemset || ZeroMemset) && Subtarget->hasNEON() && |
12856 | !F.hasFnAttribute(Attribute::NoImplicitFloat)) { |
12857 | bool Fast; |
12858 | if (Size >= 16 && |
12859 | (memOpAlign(SrcAlign, DstAlign, 16) || |
12860 | (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1, &Fast) && Fast))) { |
12861 | return MVT::v2f64; |
12862 | } else if (Size >= 8 && |
12863 | (memOpAlign(SrcAlign, DstAlign, 8) || |
12864 | (allowsMisalignedMemoryAccesses(MVT::f64, 0, 1, &Fast) && |
12865 | Fast))) { |
12866 | return MVT::f64; |
12867 | } |
12868 | } |
12869 | |
12870 | // Let the target-independent logic figure it out. |
12871 | return MVT::Other; |
12872 | } |
12873 | |
12874 | // 64-bit integers are split into their high and low parts and held in two |
12875 | // different registers, so the trunc is free since the low register can just |
12876 | // be used. |
12877 | bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { |
12878 | if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) |
12879 | return false; |
12880 | unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); |
12881 | unsigned DestBits = DstTy->getPrimitiveSizeInBits(); |
12882 | return (SrcBits == 64 && DestBits == 32); |
12883 | } |
12884 | |
12885 | bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { |
12886 | if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() || |
12887 | !DstVT.isInteger()) |
12888 | return false; |
12889 | unsigned SrcBits = SrcVT.getSizeInBits(); |
12890 | unsigned DestBits = DstVT.getSizeInBits(); |
12891 | return (SrcBits == 64 && DestBits == 32); |
12892 | } |
12893 | |
12894 | bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { |
12895 | if (Val.getOpcode() != ISD::LOAD) |
12896 | return false; |
12897 | |
12898 | EVT VT1 = Val.getValueType(); |
12899 | if (!VT1.isSimple() || !VT1.isInteger() || |
12900 | !VT2.isSimple() || !VT2.isInteger()) |
12901 | return false; |
12902 | |
12903 | switch (VT1.getSimpleVT().SimpleTy) { |
12904 | default: break; |
12905 | case MVT::i1: |
12906 | case MVT::i8: |
12907 | case MVT::i16: |
12908 | // 8-bit and 16-bit loads implicitly zero-extend to 32-bits. |
12909 | return true; |
12910 | } |
12911 | |
12912 | return false; |
12913 | } |
12914 | |
12915 | bool ARMTargetLowering::isFNegFree(EVT VT) const { |
12916 | if (!VT.isSimple()) |
12917 | return false; |
12918 | |
12919 | // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that |
12920 | // negate values directly (fneg is free). So, we don't want to let the DAG |
12921 | // combiner rewrite fneg into xors and some other instructions. For f16 and |
12922 | // FullFP16 argument passing, some bitcast nodes may be introduced, |
12923 | // triggering this DAG combine rewrite, so we are avoiding that with this. |
12924 | switch (VT.getSimpleVT().SimpleTy) { |
12925 | default: break; |
12926 | case MVT::f16: |
12927 | return Subtarget->hasFullFP16(); |
12928 | } |
12929 | |
12930 | return false; |
12931 | } |
12932 | |
12933 | bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { |
12934 | EVT VT = ExtVal.getValueType(); |
12935 | |
12936 | if (!isTypeLegal(VT)) |
12937 | return false; |
12938 | |
12939 | // Don't create a loadext if we can fold the extension into a wide/long |
12940 | // instruction. |
12941 | // If there's more than one user instruction, the loadext is desirable no |
12942 | // matter what. There can be two uses by the same instruction. |
12943 | if (ExtVal->use_empty() || |
12944 | !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode())) |
12945 | return true; |
12946 | |
12947 | SDNode *U = *ExtVal->use_begin(); |
12948 | if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || |
12949 | U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL)) |
12950 | return false; |
12951 | |
12952 | return true; |
12953 | } |
12954 | |
12955 | bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { |
12956 | if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) |
12957 | return false; |
12958 | |
12959 | if (!isTypeLegal(EVT::getEVT(Ty1))) |
12960 | return false; |
12961 | |
12962 | assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop")(static_cast <bool> (Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop") ? void (0) : __assert_fail ("Ty1->getPrimitiveSizeInBits() <= 64 && \"i128 is probably not a noop\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 12962, __extension__ __PRETTY_FUNCTION__)); |
12963 | |
12964 | // Assuming the caller doesn't have a zeroext or signext return parameter, |
12965 | // truncation all the way down to i1 is valid. |
12966 | return true; |
12967 | } |
12968 | |
12969 | int ARMTargetLowering::getScalingFactorCost(const DataLayout &DL, |
12970 | const AddrMode &AM, Type *Ty, |
12971 | unsigned AS) const { |
12972 | if (isLegalAddressingMode(DL, AM, Ty, AS)) { |
12973 | if (Subtarget->hasFPAO()) |
12974 | return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster |
12975 | return 0; |
12976 | } |
12977 | return -1; |
12978 | } |
12979 | |
12980 | static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { |
12981 | if (V < 0) |
12982 | return false; |
12983 | |
12984 | unsigned Scale = 1; |
12985 | switch (VT.getSimpleVT().SimpleTy) { |
12986 | default: return false; |
12987 | case MVT::i1: |
12988 | case MVT::i8: |
12989 | // Scale == 1; |
12990 | break; |
12991 | case MVT::i16: |
12992 | // Scale == 2; |
12993 | Scale = 2; |
12994 | break; |
12995 | case MVT::i32: |
12996 | // Scale == 4; |
12997 | Scale = 4; |
12998 | break; |
12999 | } |
13000 | |
13001 | if ((V & (Scale - 1)) != 0) |
13002 | return false; |
13003 | V /= Scale; |
13004 | return V == (V & ((1LL << 5) - 1)); |
13005 | } |
13006 | |
13007 | static bool isLegalT2AddressImmediate(int64_t V, EVT VT, |
13008 | const ARMSubtarget *Subtarget) { |
13009 | bool isNeg = false; |
13010 | if (V < 0) { |
13011 | isNeg = true; |
13012 | V = - V; |
13013 | } |
13014 | |
13015 | switch (VT.getSimpleVT().SimpleTy) { |
13016 | default: return false; |
13017 | case MVT::i1: |
13018 | case MVT::i8: |
13019 | case MVT::i16: |
13020 | case MVT::i32: |
13021 | // + imm12 or - imm8 |
13022 | if (isNeg) |
13023 | return V == (V & ((1LL << 8) - 1)); |
13024 | return V == (V & ((1LL << 12) - 1)); |
13025 | case MVT::f32: |
13026 | case MVT::f64: |
13027 | // Same as ARM mode. FIXME: NEON? |
13028 | if (!Subtarget->hasVFP2()) |
13029 | return false; |
13030 | if ((V & 3) != 0) |
13031 | return false; |
13032 | V >>= 2; |
13033 | return V == (V & ((1LL << 8) - 1)); |
13034 | } |
13035 | } |
13036 | |
13037 | /// isLegalAddressImmediate - Return true if the integer value can be used |
13038 | /// as the offset of the target addressing mode for load / store of the |
13039 | /// given type. |
13040 | static bool isLegalAddressImmediate(int64_t V, EVT VT, |
13041 | const ARMSubtarget *Subtarget) { |
13042 | if (V == 0) |
13043 | return true; |
13044 | |
13045 | if (!VT.isSimple()) |
13046 | return false; |
13047 | |
13048 | if (Subtarget->isThumb1Only()) |
13049 | return isLegalT1AddressImmediate(V, VT); |
13050 | else if (Subtarget->isThumb2()) |
13051 | return isLegalT2AddressImmediate(V, VT, Subtarget); |
13052 | |
13053 | // ARM mode. |
13054 | if (V < 0) |
13055 | V = - V; |
13056 | switch (VT.getSimpleVT().SimpleTy) { |
13057 | default: return false; |
13058 | case MVT::i1: |
13059 | case MVT::i8: |
13060 | case MVT::i32: |
13061 | // +- imm12 |
13062 | return V == (V & ((1LL << 12) - 1)); |
13063 | case MVT::i16: |
13064 | // +- imm8 |
13065 | return V == (V & ((1LL << 8) - 1)); |
13066 | case MVT::f32: |
13067 | case MVT::f64: |
13068 | if (!Subtarget->hasVFP2()) // FIXME: NEON? |
13069 | return false; |
13070 | if ((V & 3) != 0) |
13071 | return false; |
13072 | V >>= 2; |
13073 | return V == (V & ((1LL << 8) - 1)); |
13074 | } |
13075 | } |
13076 | |
13077 | bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, |
13078 | EVT VT) const { |
13079 | int Scale = AM.Scale; |
13080 | if (Scale < 0) |
13081 | return false; |
13082 | |
13083 | switch (VT.getSimpleVT().SimpleTy) { |
13084 | default: return false; |
13085 | case MVT::i1: |
13086 | case MVT::i8: |
13087 | case MVT::i16: |
13088 | case MVT::i32: |
13089 | if (Scale == 1) |
13090 | return true; |
13091 | // r + r << imm |
13092 | Scale = Scale & ~1; |
13093 | return Scale == 2 || Scale == 4 || Scale == 8; |
13094 | case MVT::i64: |
13095 | // FIXME: What are we trying to model here? ldrd doesn't have an r + r |
13096 | // version in Thumb mode. |
13097 | // r + r |
13098 | if (Scale == 1) |
13099 | return true; |
13100 | // r * 2 (this can be lowered to r + r). |
13101 | if (!AM.HasBaseReg && Scale == 2) |
13102 | return true; |
13103 | return false; |
13104 | case MVT::isVoid: |
13105 | // Note, we allow "void" uses (basically, uses that aren't loads or |
13106 | // stores), because arm allows folding a scale into many arithmetic |
13107 | // operations. This should be made more precise and revisited later. |
13108 | |
13109 | // Allow r << imm, but the imm has to be a multiple of two. |
13110 | if (Scale & 1) return false; |
13111 | return isPowerOf2_32(Scale); |
13112 | } |
13113 | } |
13114 | |
13115 | bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM, |
13116 | EVT VT) const { |
13117 | const int Scale = AM.Scale; |
13118 | |
13119 | // Negative scales are not supported in Thumb1. |
13120 | if (Scale < 0) |
13121 | return false; |
13122 | |
13123 | // Thumb1 addressing modes do not support register scaling excepting the |
13124 | // following cases: |
13125 | // 1. Scale == 1 means no scaling. |
13126 | // 2. Scale == 2 this can be lowered to r + r if there is no base register. |
13127 | return (Scale == 1) || (!AM.HasBaseReg && Scale == 2); |
13128 | } |
13129 | |
13130 | /// isLegalAddressingMode - Return true if the addressing mode represented |
13131 | /// by AM is legal for this target, for a load/store of the specified type. |
13132 | bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL, |
13133 | const AddrMode &AM, Type *Ty, |
13134 | unsigned AS, Instruction *I) const { |
13135 | EVT VT = getValueType(DL, Ty, true); |
13136 | if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) |
13137 | return false; |
13138 | |
13139 | // Can never fold addr of global into load/store. |
13140 | if (AM.BaseGV) |
13141 | return false; |
13142 | |
13143 | switch (AM.Scale) { |
13144 | case 0: // no scale reg, must be "r+i" or "r", or "i". |
13145 | break; |
13146 | default: |
13147 | // ARM doesn't support any R+R*scale+imm addr modes. |
13148 | if (AM.BaseOffs) |
13149 | return false; |
13150 | |
13151 | if (!VT.isSimple()) |
13152 | return false; |
13153 | |
13154 | if (Subtarget->isThumb1Only()) |
13155 | return isLegalT1ScaledAddressingMode(AM, VT); |
13156 | |
13157 | if (Subtarget->isThumb2()) |
13158 | return isLegalT2ScaledAddressingMode(AM, VT); |
13159 | |
13160 | int Scale = AM.Scale; |
13161 | switch (VT.getSimpleVT().SimpleTy) { |
13162 | default: return false; |
13163 | case MVT::i1: |
13164 | case MVT::i8: |
13165 | case MVT::i32: |
13166 | if (Scale < 0) Scale = -Scale; |
13167 | if (Scale == 1) |
13168 | return true; |
13169 | // r + r << imm |
13170 | return isPowerOf2_32(Scale & ~1); |
13171 | case MVT::i16: |
13172 | case MVT::i64: |
13173 | // r +/- r |
13174 | if (Scale == 1 || (AM.HasBaseReg && Scale == -1)) |
13175 | return true; |
13176 | // r * 2 (this can be lowered to r + r). |
13177 | if (!AM.HasBaseReg && Scale == 2) |
13178 | return true; |
13179 | return false; |
13180 | |
13181 | case MVT::isVoid: |
13182 | // Note, we allow "void" uses (basically, uses that aren't loads or |
13183 | // stores), because arm allows folding a scale into many arithmetic |
13184 | // operations. This should be made more precise and revisited later. |
13185 | |
13186 | // Allow r << imm, but the imm has to be a multiple of two. |
13187 | if (Scale & 1) return false; |
13188 | return isPowerOf2_32(Scale); |
13189 | } |
13190 | } |
13191 | return true; |
13192 | } |
13193 | |
13194 | /// isLegalICmpImmediate - Return true if the specified immediate is legal |
13195 | /// icmp immediate, that is the target has icmp instructions which can compare |
13196 | /// a register against the immediate without having to materialize the |
13197 | /// immediate into a register. |
13198 | bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { |
13199 | // Thumb2 and ARM modes can use cmn for negative immediates. |
13200 | if (!Subtarget->isThumb()) |
13201 | return ARM_AM::getSOImmVal(std::abs(Imm)) != -1; |
13202 | if (Subtarget->isThumb2()) |
13203 | return ARM_AM::getT2SOImmVal(std::abs(Imm)) != -1; |
13204 | // Thumb1 doesn't have cmn, and only 8-bit immediates. |
13205 | return Imm >= 0 && Imm <= 255; |
13206 | } |
13207 | |
13208 | /// isLegalAddImmediate - Return true if the specified immediate is a legal add |
13209 | /// *or sub* immediate, that is the target has add or sub instructions which can |
13210 | /// add a register with the immediate without having to materialize the |
13211 | /// immediate into a register. |
13212 | bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { |
13213 | // Same encoding for add/sub, just flip the sign. |
13214 | int64_t AbsImm = std::abs(Imm); |
13215 | if (!Subtarget->isThumb()) |
13216 | return ARM_AM::getSOImmVal(AbsImm) != -1; |
13217 | if (Subtarget->isThumb2()) |
13218 | return ARM_AM::getT2SOImmVal(AbsImm) != -1; |
13219 | // Thumb1 only has 8-bit unsigned immediate. |
13220 | return AbsImm >= 0 && AbsImm <= 255; |
13221 | } |
13222 | |
13223 | static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, |
13224 | bool isSEXTLoad, SDValue &Base, |
13225 | SDValue &Offset, bool &isInc, |
13226 | SelectionDAG &DAG) { |
13227 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
13228 | return false; |
13229 | |
13230 | if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { |
13231 | // AddressingMode 3 |
13232 | Base = Ptr->getOperand(0); |
13233 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { |
13234 | int RHSC = (int)RHS->getZExtValue(); |
13235 | if (RHSC < 0 && RHSC > -256) { |
13236 | assert(Ptr->getOpcode() == ISD::ADD)(static_cast <bool> (Ptr->getOpcode() == ISD::ADD) ? void (0) : __assert_fail ("Ptr->getOpcode() == ISD::ADD", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13236, __extension__ __PRETTY_FUNCTION__)); |
13237 | isInc = false; |
13238 | Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); |
13239 | return true; |
13240 | } |
13241 | } |
13242 | isInc = (Ptr->getOpcode() == ISD::ADD); |
13243 | Offset = Ptr->getOperand(1); |
13244 | return true; |
13245 | } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { |
13246 | // AddressingMode 2 |
13247 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { |
13248 | int RHSC = (int)RHS->getZExtValue(); |
13249 | if (RHSC < 0 && RHSC > -0x1000) { |
13250 | assert(Ptr->getOpcode() == ISD::ADD)(static_cast <bool> (Ptr->getOpcode() == ISD::ADD) ? void (0) : __assert_fail ("Ptr->getOpcode() == ISD::ADD", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13250, __extension__ __PRETTY_FUNCTION__)); |
13251 | isInc = false; |
13252 | Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); |
13253 | Base = Ptr->getOperand(0); |
13254 | return true; |
13255 | } |
13256 | } |
13257 | |
13258 | if (Ptr->getOpcode() == ISD::ADD) { |
13259 | isInc = true; |
13260 | ARM_AM::ShiftOpc ShOpcVal= |
13261 | ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); |
13262 | if (ShOpcVal != ARM_AM::no_shift) { |
13263 | Base = Ptr->getOperand(1); |
13264 | Offset = Ptr->getOperand(0); |
13265 | } else { |
13266 | Base = Ptr->getOperand(0); |
13267 | Offset = Ptr->getOperand(1); |
13268 | } |
13269 | return true; |
13270 | } |
13271 | |
13272 | isInc = (Ptr->getOpcode() == ISD::ADD); |
13273 | Base = Ptr->getOperand(0); |
13274 | Offset = Ptr->getOperand(1); |
13275 | return true; |
13276 | } |
13277 | |
13278 | // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. |
13279 | return false; |
13280 | } |
13281 | |
13282 | static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, |
13283 | bool isSEXTLoad, SDValue &Base, |
13284 | SDValue &Offset, bool &isInc, |
13285 | SelectionDAG &DAG) { |
13286 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
13287 | return false; |
13288 | |
13289 | Base = Ptr->getOperand(0); |
13290 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { |
13291 | int RHSC = (int)RHS->getZExtValue(); |
13292 | if (RHSC < 0 && RHSC > -0x100) { // 8 bits. |
13293 | assert(Ptr->getOpcode() == ISD::ADD)(static_cast <bool> (Ptr->getOpcode() == ISD::ADD) ? void (0) : __assert_fail ("Ptr->getOpcode() == ISD::ADD", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13293, __extension__ __PRETTY_FUNCTION__)); |
13294 | isInc = false; |
13295 | Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); |
13296 | return true; |
13297 | } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. |
13298 | isInc = Ptr->getOpcode() == ISD::ADD; |
13299 | Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0)); |
13300 | return true; |
13301 | } |
13302 | } |
13303 | |
13304 | return false; |
13305 | } |
13306 | |
13307 | /// getPreIndexedAddressParts - returns true by value, base pointer and |
13308 | /// offset pointer and addressing mode by reference if the node's address |
13309 | /// can be legally represented as pre-indexed load / store address. |
13310 | bool |
13311 | ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, |
13312 | SDValue &Offset, |
13313 | ISD::MemIndexedMode &AM, |
13314 | SelectionDAG &DAG) const { |
13315 | if (Subtarget->isThumb1Only()) |
13316 | return false; |
13317 | |
13318 | EVT VT; |
13319 | SDValue Ptr; |
13320 | bool isSEXTLoad = false; |
13321 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { |
13322 | Ptr = LD->getBasePtr(); |
13323 | VT = LD->getMemoryVT(); |
13324 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
13325 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { |
13326 | Ptr = ST->getBasePtr(); |
13327 | VT = ST->getMemoryVT(); |
13328 | } else |
13329 | return false; |
13330 | |
13331 | bool isInc; |
13332 | bool isLegal = false; |
13333 | if (Subtarget->isThumb2()) |
13334 | isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, |
13335 | Offset, isInc, DAG); |
13336 | else |
13337 | isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, |
13338 | Offset, isInc, DAG); |
13339 | if (!isLegal) |
13340 | return false; |
13341 | |
13342 | AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; |
13343 | return true; |
13344 | } |
13345 | |
13346 | /// getPostIndexedAddressParts - returns true by value, base pointer and |
13347 | /// offset pointer and addressing mode by reference if this node can be |
13348 | /// combined with a load / store to form a post-indexed load / store. |
13349 | bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, |
13350 | SDValue &Base, |
13351 | SDValue &Offset, |
13352 | ISD::MemIndexedMode &AM, |
13353 | SelectionDAG &DAG) const { |
13354 | EVT VT; |
13355 | SDValue Ptr; |
13356 | bool isSEXTLoad = false, isNonExt; |
13357 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { |
13358 | VT = LD->getMemoryVT(); |
13359 | Ptr = LD->getBasePtr(); |
13360 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
13361 | isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; |
13362 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { |
13363 | VT = ST->getMemoryVT(); |
13364 | Ptr = ST->getBasePtr(); |
13365 | isNonExt = !ST->isTruncatingStore(); |
13366 | } else |
13367 | return false; |
13368 | |
13369 | if (Subtarget->isThumb1Only()) { |
13370 | // Thumb-1 can do a limited post-inc load or store as an updating LDM. It |
13371 | // must be non-extending/truncating, i32, with an offset of 4. |
13372 | assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!")(static_cast <bool> (Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!") ? void (0) : __assert_fail ("Op->getValueType(0) == MVT::i32 && \"Non-i32 post-inc op?!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13372, __extension__ __PRETTY_FUNCTION__)); |
13373 | if (Op->getOpcode() != ISD::ADD || !isNonExt) |
13374 | return false; |
13375 | auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1)); |
13376 | if (!RHS || RHS->getZExtValue() != 4) |
13377 | return false; |
13378 | |
13379 | Offset = Op->getOperand(1); |
13380 | Base = Op->getOperand(0); |
13381 | AM = ISD::POST_INC; |
13382 | return true; |
13383 | } |
13384 | |
13385 | bool isInc; |
13386 | bool isLegal = false; |
13387 | if (Subtarget->isThumb2()) |
13388 | isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, |
13389 | isInc, DAG); |
13390 | else |
13391 | isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, |
13392 | isInc, DAG); |
13393 | if (!isLegal) |
13394 | return false; |
13395 | |
13396 | if (Ptr != Base) { |
13397 | // Swap base ptr and offset to catch more post-index load / store when |
13398 | // it's legal. In Thumb2 mode, offset must be an immediate. |
13399 | if (Ptr == Offset && Op->getOpcode() == ISD::ADD && |
13400 | !Subtarget->isThumb2()) |
13401 | std::swap(Base, Offset); |
13402 | |
13403 | // Post-indexed load / store update the base pointer. |
13404 | if (Ptr != Base) |
13405 | return false; |
13406 | } |
13407 | |
13408 | AM = isInc ? ISD::POST_INC : ISD::POST_DEC; |
13409 | return true; |
13410 | } |
13411 | |
13412 | void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, |
13413 | KnownBits &Known, |
13414 | const APInt &DemandedElts, |
13415 | const SelectionDAG &DAG, |
13416 | unsigned Depth) const { |
13417 | unsigned BitWidth = Known.getBitWidth(); |
13418 | Known.resetAll(); |
13419 | switch (Op.getOpcode()) { |
13420 | default: break; |
13421 | case ARMISD::ADDC: |
13422 | case ARMISD::ADDE: |
13423 | case ARMISD::SUBC: |
13424 | case ARMISD::SUBE: |
13425 | // Special cases when we convert a carry to a boolean. |
13426 | if (Op.getResNo() == 0) { |
13427 | SDValue LHS = Op.getOperand(0); |
13428 | SDValue RHS = Op.getOperand(1); |
13429 | // (ADDE 0, 0, C) will give us a single bit. |
13430 | if (Op->getOpcode() == ARMISD::ADDE && isNullConstant(LHS) && |
13431 | isNullConstant(RHS)) { |
13432 | Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); |
13433 | return; |
13434 | } |
13435 | } |
13436 | break; |
13437 | case ARMISD::CMOV: { |
13438 | // Bits are known zero/one if known on the LHS and RHS. |
13439 | DAG.computeKnownBits(Op.getOperand(0), Known, Depth+1); |
13440 | if (Known.isUnknown()) |
13441 | return; |
13442 | |
13443 | KnownBits KnownRHS; |
13444 | DAG.computeKnownBits(Op.getOperand(1), KnownRHS, Depth+1); |
13445 | Known.Zero &= KnownRHS.Zero; |
13446 | Known.One &= KnownRHS.One; |
13447 | return; |
13448 | } |
13449 | case ISD::INTRINSIC_W_CHAIN: { |
13450 | ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1)); |
13451 | Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue()); |
13452 | switch (IntID) { |
13453 | default: return; |
13454 | case Intrinsic::arm_ldaex: |
13455 | case Intrinsic::arm_ldrex: { |
13456 | EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT(); |
13457 | unsigned MemBits = VT.getScalarSizeInBits(); |
13458 | Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); |
13459 | return; |
13460 | } |
13461 | } |
13462 | } |
13463 | case ARMISD::BFI: { |
13464 | // Conservatively, we can recurse down the first operand |
13465 | // and just mask out all affected bits. |
13466 | DAG.computeKnownBits(Op.getOperand(0), Known, Depth + 1); |
13467 | |
13468 | // The operand to BFI is already a mask suitable for removing the bits it |
13469 | // sets. |
13470 | ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2)); |
13471 | const APInt &Mask = CI->getAPIntValue(); |
13472 | Known.Zero &= Mask; |
13473 | Known.One &= Mask; |
13474 | return; |
13475 | } |
13476 | } |
13477 | } |
13478 | |
13479 | //===----------------------------------------------------------------------===// |
13480 | // ARM Inline Assembly Support |
13481 | //===----------------------------------------------------------------------===// |
13482 | |
13483 | bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { |
13484 | // Looking for "rev" which is V6+. |
13485 | if (!Subtarget->hasV6Ops()) |
13486 | return false; |
13487 | |
13488 | InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); |
13489 | std::string AsmStr = IA->getAsmString(); |
13490 | SmallVector<StringRef, 4> AsmPieces; |
13491 | SplitString(AsmStr, AsmPieces, ";\n"); |
13492 | |
13493 | switch (AsmPieces.size()) { |
13494 | default: return false; |
13495 | case 1: |
13496 | AsmStr = AsmPieces[0]; |
13497 | AsmPieces.clear(); |
13498 | SplitString(AsmStr, AsmPieces, " \t,"); |
13499 | |
13500 | // rev $0, $1 |
13501 | if (AsmPieces.size() == 3 && |
13502 | AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && |
13503 | IA->getConstraintString().compare(0, 4, "=l,l") == 0) { |
13504 | IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); |
13505 | if (Ty && Ty->getBitWidth() == 32) |
13506 | return IntrinsicLowering::LowerToByteSwap(CI); |
13507 | } |
13508 | break; |
13509 | } |
13510 | |
13511 | return false; |
13512 | } |
13513 | |
13514 | const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const { |
13515 | // At this point, we have to lower this constraint to something else, so we |
13516 | // lower it to an "r" or "w". However, by doing this we will force the result |
13517 | // to be in register, while the X constraint is much more permissive. |
13518 | // |
13519 | // Although we are correct (we are free to emit anything, without |
13520 | // constraints), we might break use cases that would expect us to be more |
13521 | // efficient and emit something else. |
13522 | if (!Subtarget->hasVFP2()) |
13523 | return "r"; |
13524 | if (ConstraintVT.isFloatingPoint()) |
13525 | return "w"; |
13526 | if (ConstraintVT.isVector() && Subtarget->hasNEON() && |
13527 | (ConstraintVT.getSizeInBits() == 64 || |
13528 | ConstraintVT.getSizeInBits() == 128)) |
13529 | return "w"; |
13530 | |
13531 | return "r"; |
13532 | } |
13533 | |
13534 | /// getConstraintType - Given a constraint letter, return the type of |
13535 | /// constraint it is for this target. |
13536 | ARMTargetLowering::ConstraintType |
13537 | ARMTargetLowering::getConstraintType(StringRef Constraint) const { |
13538 | if (Constraint.size() == 1) { |
13539 | switch (Constraint[0]) { |
13540 | default: break; |
13541 | case 'l': return C_RegisterClass; |
13542 | case 'w': return C_RegisterClass; |
13543 | case 'h': return C_RegisterClass; |
13544 | case 'x': return C_RegisterClass; |
13545 | case 't': return C_RegisterClass; |
13546 | case 'j': return C_Other; // Constant for movw. |
13547 | // An address with a single base register. Due to the way we |
13548 | // currently handle addresses it is the same as an 'r' memory constraint. |
13549 | case 'Q': return C_Memory; |
13550 | } |
13551 | } else if (Constraint.size() == 2) { |
13552 | switch (Constraint[0]) { |
13553 | default: break; |
13554 | // All 'U+' constraints are addresses. |
13555 | case 'U': return C_Memory; |
13556 | } |
13557 | } |
13558 | return TargetLowering::getConstraintType(Constraint); |
13559 | } |
13560 | |
13561 | /// Examine constraint type and operand type and determine a weight value. |
13562 | /// This object must already have been set up with the operand type |
13563 | /// and the current alternative constraint selected. |
13564 | TargetLowering::ConstraintWeight |
13565 | ARMTargetLowering::getSingleConstraintMatchWeight( |
13566 | AsmOperandInfo &info, const char *constraint) const { |
13567 | ConstraintWeight weight = CW_Invalid; |
13568 | Value *CallOperandVal = info.CallOperandVal; |
13569 | // If we don't have a value, we can't do a match, |
13570 | // but allow it at the lowest weight. |
13571 | if (!CallOperandVal) |
13572 | return CW_Default; |
13573 | Type *type = CallOperandVal->getType(); |
13574 | // Look at the constraint type. |
13575 | switch (*constraint) { |
13576 | default: |
13577 | weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); |
13578 | break; |
13579 | case 'l': |
13580 | if (type->isIntegerTy()) { |
13581 | if (Subtarget->isThumb()) |
13582 | weight = CW_SpecificReg; |
13583 | else |
13584 | weight = CW_Register; |
13585 | } |
13586 | break; |
13587 | case 'w': |
13588 | if (type->isFloatingPointTy()) |
13589 | weight = CW_Register; |
13590 | break; |
13591 | } |
13592 | return weight; |
13593 | } |
13594 | |
13595 | using RCPair = std::pair<unsigned, const TargetRegisterClass *>; |
13596 | |
13597 | RCPair ARMTargetLowering::getRegForInlineAsmConstraint( |
13598 | const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { |
13599 | if (Constraint.size() == 1) { |
13600 | // GCC ARM Constraint Letters |
13601 | switch (Constraint[0]) { |
13602 | case 'l': // Low regs or general regs. |
13603 | if (Subtarget->isThumb()) |
13604 | return RCPair(0U, &ARM::tGPRRegClass); |
13605 | return RCPair(0U, &ARM::GPRRegClass); |
13606 | case 'h': // High regs or no regs. |
13607 | if (Subtarget->isThumb()) |
13608 | return RCPair(0U, &ARM::hGPRRegClass); |
13609 | break; |
13610 | case 'r': |
13611 | if (Subtarget->isThumb1Only()) |
13612 | return RCPair(0U, &ARM::tGPRRegClass); |
13613 | return RCPair(0U, &ARM::GPRRegClass); |
13614 | case 'w': |
13615 | if (VT == MVT::Other) |
13616 | break; |
13617 | if (VT == MVT::f32) |
13618 | return RCPair(0U, &ARM::SPRRegClass); |
13619 | if (VT.getSizeInBits() == 64) |
13620 | return RCPair(0U, &ARM::DPRRegClass); |
13621 | if (VT.getSizeInBits() == 128) |
13622 | return RCPair(0U, &ARM::QPRRegClass); |
13623 | break; |
13624 | case 'x': |
13625 | if (VT == MVT::Other) |
13626 | break; |
13627 | if (VT == MVT::f32) |
13628 | return RCPair(0U, &ARM::SPR_8RegClass); |
13629 | if (VT.getSizeInBits() == 64) |
13630 | return RCPair(0U, &ARM::DPR_8RegClass); |
13631 | if (VT.getSizeInBits() == 128) |
13632 | return RCPair(0U, &ARM::QPR_8RegClass); |
13633 | break; |
13634 | case 't': |
13635 | if (VT == MVT::Other) |
13636 | break; |
13637 | if (VT == MVT::f32 || VT == MVT::i32) |
13638 | return RCPair(0U, &ARM::SPRRegClass); |
13639 | if (VT.getSizeInBits() == 64) |
13640 | return RCPair(0U, &ARM::DPR_VFP2RegClass); |
13641 | if (VT.getSizeInBits() == 128) |
13642 | return RCPair(0U, &ARM::QPR_VFP2RegClass); |
13643 | break; |
13644 | } |
13645 | } |
13646 | if (StringRef("{cc}").equals_lower(Constraint)) |
13647 | return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass); |
13648 | |
13649 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
13650 | } |
13651 | |
13652 | /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops |
13653 | /// vector. If it is invalid, don't add anything to Ops. |
13654 | void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, |
13655 | std::string &Constraint, |
13656 | std::vector<SDValue>&Ops, |
13657 | SelectionDAG &DAG) const { |
13658 | SDValue Result; |
13659 | |
13660 | // Currently only support length 1 constraints. |
13661 | if (Constraint.length() != 1) return; |
13662 | |
13663 | char ConstraintLetter = Constraint[0]; |
13664 | switch (ConstraintLetter) { |
13665 | default: break; |
13666 | case 'j': |
13667 | case 'I': case 'J': case 'K': case 'L': |
13668 | case 'M': case 'N': case 'O': |
13669 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); |
13670 | if (!C) |
13671 | return; |
13672 | |
13673 | int64_t CVal64 = C->getSExtValue(); |
13674 | int CVal = (int) CVal64; |
13675 | // None of these constraints allow values larger than 32 bits. Check |
13676 | // that the value fits in an int. |
13677 | if (CVal != CVal64) |
13678 | return; |
13679 | |
13680 | switch (ConstraintLetter) { |
13681 | case 'j': |
13682 | // Constant suitable for movw, must be between 0 and |
13683 | // 65535. |
13684 | if (Subtarget->hasV6T2Ops()) |
13685 | if (CVal >= 0 && CVal <= 65535) |
13686 | break; |
13687 | return; |
13688 | case 'I': |
13689 | if (Subtarget->isThumb1Only()) { |
13690 | // This must be a constant between 0 and 255, for ADD |
13691 | // immediates. |
13692 | if (CVal >= 0 && CVal <= 255) |
13693 | break; |
13694 | } else if (Subtarget->isThumb2()) { |
13695 | // A constant that can be used as an immediate value in a |
13696 | // data-processing instruction. |
13697 | if (ARM_AM::getT2SOImmVal(CVal) != -1) |
13698 | break; |
13699 | } else { |
13700 | // A constant that can be used as an immediate value in a |
13701 | // data-processing instruction. |
13702 | if (ARM_AM::getSOImmVal(CVal) != -1) |
13703 | break; |
13704 | } |
13705 | return; |
13706 | |
13707 | case 'J': |
13708 | if (Subtarget->isThumb1Only()) { |
13709 | // This must be a constant between -255 and -1, for negated ADD |
13710 | // immediates. This can be used in GCC with an "n" modifier that |
13711 | // prints the negated value, for use with SUB instructions. It is |
13712 | // not useful otherwise but is implemented for compatibility. |
13713 | if (CVal >= -255 && CVal <= -1) |
13714 | break; |
13715 | } else { |
13716 | // This must be a constant between -4095 and 4095. It is not clear |
13717 | // what this constraint is intended for. Implemented for |
13718 | // compatibility with GCC. |
13719 | if (CVal >= -4095 && CVal <= 4095) |
13720 | break; |
13721 | } |
13722 | return; |
13723 | |
13724 | case 'K': |
13725 | if (Subtarget->isThumb1Only()) { |
13726 | // A 32-bit value where only one byte has a nonzero value. Exclude |
13727 | // zero to match GCC. This constraint is used by GCC internally for |
13728 | // constants that can be loaded with a move/shift combination. |
13729 | // It is not useful otherwise but is implemented for compatibility. |
13730 | if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) |
13731 | break; |
13732 | } else if (Subtarget->isThumb2()) { |
13733 | // A constant whose bitwise inverse can be used as an immediate |
13734 | // value in a data-processing instruction. This can be used in GCC |
13735 | // with a "B" modifier that prints the inverted value, for use with |
13736 | // BIC and MVN instructions. It is not useful otherwise but is |
13737 | // implemented for compatibility. |
13738 | if (ARM_AM::getT2SOImmVal(~CVal) != -1) |
13739 | break; |
13740 | } else { |
13741 | // A constant whose bitwise inverse can be used as an immediate |
13742 | // value in a data-processing instruction. This can be used in GCC |
13743 | // with a "B" modifier that prints the inverted value, for use with |
13744 | // BIC and MVN instructions. It is not useful otherwise but is |
13745 | // implemented for compatibility. |
13746 | if (ARM_AM::getSOImmVal(~CVal) != -1) |
13747 | break; |
13748 | } |
13749 | return; |
13750 | |
13751 | case 'L': |
13752 | if (Subtarget->isThumb1Only()) { |
13753 | // This must be a constant between -7 and 7, |
13754 | // for 3-operand ADD/SUB immediate instructions. |
13755 | if (CVal >= -7 && CVal < 7) |
13756 | break; |
13757 | } else if (Subtarget->isThumb2()) { |
13758 | // A constant whose negation can be used as an immediate value in a |
13759 | // data-processing instruction. This can be used in GCC with an "n" |
13760 | // modifier that prints the negated value, for use with SUB |
13761 | // instructions. It is not useful otherwise but is implemented for |
13762 | // compatibility. |
13763 | if (ARM_AM::getT2SOImmVal(-CVal) != -1) |
13764 | break; |
13765 | } else { |
13766 | // A constant whose negation can be used as an immediate value in a |
13767 | // data-processing instruction. This can be used in GCC with an "n" |
13768 | // modifier that prints the negated value, for use with SUB |
13769 | // instructions. It is not useful otherwise but is implemented for |
13770 | // compatibility. |
13771 | if (ARM_AM::getSOImmVal(-CVal) != -1) |
13772 | break; |
13773 | } |
13774 | return; |
13775 | |
13776 | case 'M': |
13777 | if (Subtarget->isThumb1Only()) { |
13778 | // This must be a multiple of 4 between 0 and 1020, for |
13779 | // ADD sp + immediate. |
13780 | if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) |
13781 | break; |
13782 | } else { |
13783 | // A power of two or a constant between 0 and 32. This is used in |
13784 | // GCC for the shift amount on shifted register operands, but it is |
13785 | // useful in general for any shift amounts. |
13786 | if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) |
13787 | break; |
13788 | } |
13789 | return; |
13790 | |
13791 | case 'N': |
13792 | if (Subtarget->isThumb()) { // FIXME thumb2 |
13793 | // This must be a constant between 0 and 31, for shift amounts. |
13794 | if (CVal >= 0 && CVal <= 31) |
13795 | break; |
13796 | } |
13797 | return; |
13798 | |
13799 | case 'O': |
13800 | if (Subtarget->isThumb()) { // FIXME thumb2 |
13801 | // This must be a multiple of 4 between -508 and 508, for |
13802 | // ADD/SUB sp = sp + immediate. |
13803 | if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) |
13804 | break; |
13805 | } |
13806 | return; |
13807 | } |
13808 | Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType()); |
13809 | break; |
13810 | } |
13811 | |
13812 | if (Result.getNode()) { |
13813 | Ops.push_back(Result); |
13814 | return; |
13815 | } |
13816 | return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); |
13817 | } |
13818 | |
13819 | static RTLIB::Libcall getDivRemLibcall( |
13820 | const SDNode *N, MVT::SimpleValueType SVT) { |
13821 | assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||(static_cast <bool> ((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && "Unhandled Opcode in getDivRemLibcall" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && \"Unhandled Opcode in getDivRemLibcall\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13823, __extension__ __PRETTY_FUNCTION__)) |
13822 | N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) &&(static_cast <bool> ((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && "Unhandled Opcode in getDivRemLibcall" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && \"Unhandled Opcode in getDivRemLibcall\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13823, __extension__ __PRETTY_FUNCTION__)) |
13823 | "Unhandled Opcode in getDivRemLibcall")(static_cast <bool> ((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && "Unhandled Opcode in getDivRemLibcall" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && \"Unhandled Opcode in getDivRemLibcall\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13823, __extension__ __PRETTY_FUNCTION__)); |
13824 | bool isSigned = N->getOpcode() == ISD::SDIVREM || |
13825 | N->getOpcode() == ISD::SREM; |
13826 | RTLIB::Libcall LC; |
13827 | switch (SVT) { |
13828 | default: llvm_unreachable("Unexpected request for libcall!")::llvm::llvm_unreachable_internal("Unexpected request for libcall!" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13828); |
13829 | case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; |
13830 | case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; |
13831 | case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; |
13832 | case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; |
13833 | } |
13834 | return LC; |
13835 | } |
13836 | |
13837 | static TargetLowering::ArgListTy getDivRemArgList( |
13838 | const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) { |
13839 | assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||(static_cast <bool> ((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && "Unhandled Opcode in getDivRemArgList" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && \"Unhandled Opcode in getDivRemArgList\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13841, __extension__ __PRETTY_FUNCTION__)) |
13840 | N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) &&(static_cast <bool> ((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && "Unhandled Opcode in getDivRemArgList" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && \"Unhandled Opcode in getDivRemArgList\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13841, __extension__ __PRETTY_FUNCTION__)) |
13841 | "Unhandled Opcode in getDivRemArgList")(static_cast <bool> ((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && "Unhandled Opcode in getDivRemArgList" ) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && \"Unhandled Opcode in getDivRemArgList\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13841, __extension__ __PRETTY_FUNCTION__)); |
13842 | bool isSigned = N->getOpcode() == ISD::SDIVREM || |
13843 | N->getOpcode() == ISD::SREM; |
13844 | TargetLowering::ArgListTy Args; |
13845 | TargetLowering::ArgListEntry Entry; |
13846 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
13847 | EVT ArgVT = N->getOperand(i).getValueType(); |
13848 | Type *ArgTy = ArgVT.getTypeForEVT(*Context); |
13849 | Entry.Node = N->getOperand(i); |
13850 | Entry.Ty = ArgTy; |
13851 | Entry.IsSExt = isSigned; |
13852 | Entry.IsZExt = !isSigned; |
13853 | Args.push_back(Entry); |
13854 | } |
13855 | if (Subtarget->isTargetWindows() && Args.size() >= 2) |
13856 | std::swap(Args[0], Args[1]); |
13857 | return Args; |
13858 | } |
13859 | |
13860 | SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { |
13861 | assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||(static_cast <bool> ((Subtarget->isTargetAEABI() || Subtarget ->isTargetAndroid() || Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || Subtarget->isTargetWindows ()) && "Register-based DivRem lowering only") ? void ( 0) : __assert_fail ("(Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || Subtarget->isTargetWindows()) && \"Register-based DivRem lowering only\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13864, __extension__ __PRETTY_FUNCTION__)) |
13862 | Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||(static_cast <bool> ((Subtarget->isTargetAEABI() || Subtarget ->isTargetAndroid() || Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || Subtarget->isTargetWindows ()) && "Register-based DivRem lowering only") ? void ( 0) : __assert_fail ("(Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || Subtarget->isTargetWindows()) && \"Register-based DivRem lowering only\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13864, __extension__ __PRETTY_FUNCTION__)) |
13863 | Subtarget->isTargetWindows()) &&(static_cast <bool> ((Subtarget->isTargetAEABI() || Subtarget ->isTargetAndroid() || Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || Subtarget->isTargetWindows ()) && "Register-based DivRem lowering only") ? void ( 0) : __assert_fail ("(Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || Subtarget->isTargetWindows()) && \"Register-based DivRem lowering only\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13864, __extension__ __PRETTY_FUNCTION__)) |
13864 | "Register-based DivRem lowering only")(static_cast <bool> ((Subtarget->isTargetAEABI() || Subtarget ->isTargetAndroid() || Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || Subtarget->isTargetWindows ()) && "Register-based DivRem lowering only") ? void ( 0) : __assert_fail ("(Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || Subtarget->isTargetWindows()) && \"Register-based DivRem lowering only\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13864, __extension__ __PRETTY_FUNCTION__)); |
13865 | unsigned Opcode = Op->getOpcode(); |
13866 | assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&(static_cast <bool> ((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && "Invalid opcode for Div/Rem lowering" ) ? void (0) : __assert_fail ("(Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && \"Invalid opcode for Div/Rem lowering\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13867, __extension__ __PRETTY_FUNCTION__)) |
13867 | "Invalid opcode for Div/Rem lowering")(static_cast <bool> ((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && "Invalid opcode for Div/Rem lowering" ) ? void (0) : __assert_fail ("(Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && \"Invalid opcode for Div/Rem lowering\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13867, __extension__ __PRETTY_FUNCTION__)); |
13868 | bool isSigned = (Opcode == ISD::SDIVREM); |
13869 | EVT VT = Op->getValueType(0); |
13870 | Type *Ty = VT.getTypeForEVT(*DAG.getContext()); |
13871 | SDLoc dl(Op); |
13872 | |
13873 | // If the target has hardware divide, use divide + multiply + subtract: |
13874 | // div = a / b |
13875 | // rem = a - b * div |
13876 | // return {div, rem} |
13877 | // This should be lowered into UDIV/SDIV + MLS later on. |
13878 | bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() |
13879 | : Subtarget->hasDivideInARMMode(); |
13880 | if (hasDivide && Op->getValueType(0).isSimple() && |
13881 | Op->getSimpleValueType(0) == MVT::i32) { |
13882 | unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV; |
13883 | const SDValue Dividend = Op->getOperand(0); |
13884 | const SDValue Divisor = Op->getOperand(1); |
13885 | SDValue Div = DAG.getNode(DivOpcode, dl, VT, Dividend, Divisor); |
13886 | SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Div, Divisor); |
13887 | SDValue Rem = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul); |
13888 | |
13889 | SDValue Values[2] = {Div, Rem}; |
13890 | return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VT, VT), Values); |
13891 | } |
13892 | |
13893 | RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(), |
13894 | VT.getSimpleVT().SimpleTy); |
13895 | SDValue InChain = DAG.getEntryNode(); |
13896 | |
13897 | TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(), |
13898 | DAG.getContext(), |
13899 | Subtarget); |
13900 | |
13901 | SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), |
13902 | getPointerTy(DAG.getDataLayout())); |
13903 | |
13904 | Type *RetTy = StructType::get(Ty, Ty); |
13905 | |
13906 | if (Subtarget->isTargetWindows()) |
13907 | InChain = WinDBZCheckDenominator(DAG, Op.getNode(), InChain); |
13908 | |
13909 | TargetLowering::CallLoweringInfo CLI(DAG); |
13910 | CLI.setDebugLoc(dl).setChain(InChain) |
13911 | .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) |
13912 | .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned); |
13913 | |
13914 | std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); |
13915 | return CallInfo.first; |
13916 | } |
13917 | |
13918 | // Lowers REM using divmod helpers |
13919 | // see RTABI section 4.2/4.3 |
13920 | SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const { |
13921 | // Build return types (div and rem) |
13922 | std::vector<Type*> RetTyParams; |
13923 | Type *RetTyElement; |
13924 | |
13925 | switch (N->getValueType(0).getSimpleVT().SimpleTy) { |
13926 | default: llvm_unreachable("Unexpected request for libcall!")::llvm::llvm_unreachable_internal("Unexpected request for libcall!" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13926); |
13927 | case MVT::i8: RetTyElement = Type::getInt8Ty(*DAG.getContext()); break; |
13928 | case MVT::i16: RetTyElement = Type::getInt16Ty(*DAG.getContext()); break; |
13929 | case MVT::i32: RetTyElement = Type::getInt32Ty(*DAG.getContext()); break; |
13930 | case MVT::i64: RetTyElement = Type::getInt64Ty(*DAG.getContext()); break; |
13931 | } |
13932 | |
13933 | RetTyParams.push_back(RetTyElement); |
13934 | RetTyParams.push_back(RetTyElement); |
13935 | ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams); |
13936 | Type *RetTy = StructType::get(*DAG.getContext(), ret); |
13937 | |
13938 | RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT(). |
13939 | SimpleTy); |
13940 | SDValue InChain = DAG.getEntryNode(); |
13941 | TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext(), |
13942 | Subtarget); |
13943 | bool isSigned = N->getOpcode() == ISD::SREM; |
13944 | SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), |
13945 | getPointerTy(DAG.getDataLayout())); |
13946 | |
13947 | if (Subtarget->isTargetWindows()) |
13948 | InChain = WinDBZCheckDenominator(DAG, N, InChain); |
13949 | |
13950 | // Lower call |
13951 | CallLoweringInfo CLI(DAG); |
13952 | CLI.setChain(InChain) |
13953 | .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args)) |
13954 | .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N)); |
13955 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
13956 | |
13957 | // Return second (rem) result operand (first contains div) |
13958 | SDNode *ResNode = CallResult.first.getNode(); |
13959 | assert(ResNode->getNumOperands() == 2 && "divmod should return two operands")(static_cast <bool> (ResNode->getNumOperands() == 2 && "divmod should return two operands") ? void (0) : __assert_fail ("ResNode->getNumOperands() == 2 && \"divmod should return two operands\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13959, __extension__ __PRETTY_FUNCTION__)); |
13960 | return ResNode->getOperand(1); |
13961 | } |
13962 | |
13963 | SDValue |
13964 | ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { |
13965 | assert(Subtarget->isTargetWindows() && "unsupported target platform")(static_cast <bool> (Subtarget->isTargetWindows() && "unsupported target platform") ? void (0) : __assert_fail ("Subtarget->isTargetWindows() && \"unsupported target platform\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 13965, __extension__ __PRETTY_FUNCTION__)); |
13966 | SDLoc DL(Op); |
13967 | |
13968 | // Get the inputs. |
13969 | SDValue Chain = Op.getOperand(0); |
13970 | SDValue Size = Op.getOperand(1); |
13971 | |
13972 | if (DAG.getMachineFunction().getFunction().hasFnAttribute( |
13973 | "no-stack-arg-probe")) { |
13974 | unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); |
13975 | SDValue SP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); |
13976 | Chain = SP.getValue(1); |
13977 | SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size); |
13978 | if (Align) |
13979 | SP = DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0), |
13980 | DAG.getConstant(-(uint64_t)Align, DL, MVT::i32)); |
13981 | Chain = DAG.getCopyToReg(Chain, DL, ARM::SP, SP); |
13982 | SDValue Ops[2] = { SP, Chain }; |
13983 | return DAG.getMergeValues(Ops, DL); |
13984 | } |
13985 | |
13986 | SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size, |
13987 | DAG.getConstant(2, DL, MVT::i32)); |
13988 | |
13989 | SDValue Flag; |
13990 | Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag); |
13991 | Flag = Chain.getValue(1); |
13992 | |
13993 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); |
13994 | Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag); |
13995 | |
13996 | SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); |
13997 | Chain = NewSP.getValue(1); |
13998 | |
13999 | SDValue Ops[2] = { NewSP, Chain }; |
14000 | return DAG.getMergeValues(Ops, DL); |
14001 | } |
14002 | |
14003 | SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { |
14004 | assert(Op.getValueType() == MVT::f64 && Subtarget->isFPOnlySP() &&(static_cast <bool> (Op.getValueType() == MVT::f64 && Subtarget->isFPOnlySP() && "Unexpected type for custom-lowering FP_EXTEND" ) ? void (0) : __assert_fail ("Op.getValueType() == MVT::f64 && Subtarget->isFPOnlySP() && \"Unexpected type for custom-lowering FP_EXTEND\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14005, __extension__ __PRETTY_FUNCTION__)) |
14005 | "Unexpected type for custom-lowering FP_EXTEND")(static_cast <bool> (Op.getValueType() == MVT::f64 && Subtarget->isFPOnlySP() && "Unexpected type for custom-lowering FP_EXTEND" ) ? void (0) : __assert_fail ("Op.getValueType() == MVT::f64 && Subtarget->isFPOnlySP() && \"Unexpected type for custom-lowering FP_EXTEND\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14005, __extension__ __PRETTY_FUNCTION__)); |
14006 | |
14007 | RTLIB::Libcall LC; |
14008 | LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType()); |
14009 | |
14010 | SDValue SrcVal = Op.getOperand(0); |
14011 | return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false, |
14012 | SDLoc(Op)).first; |
14013 | } |
14014 | |
14015 | SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { |
14016 | assert(Op.getOperand(0).getValueType() == MVT::f64 &&(static_cast <bool> (Op.getOperand(0).getValueType() == MVT::f64 && Subtarget->isFPOnlySP() && "Unexpected type for custom-lowering FP_ROUND" ) ? void (0) : __assert_fail ("Op.getOperand(0).getValueType() == MVT::f64 && Subtarget->isFPOnlySP() && \"Unexpected type for custom-lowering FP_ROUND\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14018, __extension__ __PRETTY_FUNCTION__)) |
14017 | Subtarget->isFPOnlySP() &&(static_cast <bool> (Op.getOperand(0).getValueType() == MVT::f64 && Subtarget->isFPOnlySP() && "Unexpected type for custom-lowering FP_ROUND" ) ? void (0) : __assert_fail ("Op.getOperand(0).getValueType() == MVT::f64 && Subtarget->isFPOnlySP() && \"Unexpected type for custom-lowering FP_ROUND\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14018, __extension__ __PRETTY_FUNCTION__)) |
14018 | "Unexpected type for custom-lowering FP_ROUND")(static_cast <bool> (Op.getOperand(0).getValueType() == MVT::f64 && Subtarget->isFPOnlySP() && "Unexpected type for custom-lowering FP_ROUND" ) ? void (0) : __assert_fail ("Op.getOperand(0).getValueType() == MVT::f64 && Subtarget->isFPOnlySP() && \"Unexpected type for custom-lowering FP_ROUND\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14018, __extension__ __PRETTY_FUNCTION__)); |
14019 | |
14020 | RTLIB::Libcall LC; |
14021 | LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType()); |
14022 | |
14023 | SDValue SrcVal = Op.getOperand(0); |
14024 | return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false, |
14025 | SDLoc(Op)).first; |
14026 | } |
14027 | |
14028 | bool |
14029 | ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { |
14030 | // The ARM target isn't yet aware of offsets. |
14031 | return false; |
14032 | } |
14033 | |
14034 | bool ARM::isBitFieldInvertedMask(unsigned v) { |
14035 | if (v == 0xffffffff) |
14036 | return false; |
14037 | |
14038 | // there can be 1's on either or both "outsides", all the "inside" |
14039 | // bits must be 0's |
14040 | return isShiftedMask_32(~v); |
14041 | } |
14042 | |
14043 | /// isFPImmLegal - Returns true if the target can instruction select the |
14044 | /// specified FP immediate natively. If false, the legalizer will |
14045 | /// materialize the FP immediate as a load from a constant pool. |
14046 | bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { |
14047 | if (!Subtarget->hasVFP3()) |
14048 | return false; |
14049 | if (VT == MVT::f16 && Subtarget->hasFullFP16()) |
14050 | return ARM_AM::getFP16Imm(Imm) != -1; |
14051 | if (VT == MVT::f32) |
14052 | return ARM_AM::getFP32Imm(Imm) != -1; |
14053 | if (VT == MVT::f64 && !Subtarget->isFPOnlySP()) |
14054 | return ARM_AM::getFP64Imm(Imm) != -1; |
14055 | return false; |
14056 | } |
14057 | |
14058 | /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as |
14059 | /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment |
14060 | /// specified in the intrinsic calls. |
14061 | bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, |
14062 | const CallInst &I, |
14063 | MachineFunction &MF, |
14064 | unsigned Intrinsic) const { |
14065 | switch (Intrinsic) { |
14066 | case Intrinsic::arm_neon_vld1: |
14067 | case Intrinsic::arm_neon_vld2: |
14068 | case Intrinsic::arm_neon_vld3: |
14069 | case Intrinsic::arm_neon_vld4: |
14070 | case Intrinsic::arm_neon_vld2lane: |
14071 | case Intrinsic::arm_neon_vld3lane: |
14072 | case Intrinsic::arm_neon_vld4lane: { |
14073 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
14074 | // Conservatively set memVT to the entire set of vectors loaded. |
14075 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); |
14076 | uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; |
14077 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); |
14078 | Info.ptrVal = I.getArgOperand(0); |
14079 | Info.offset = 0; |
14080 | Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); |
14081 | Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); |
14082 | // volatile loads with NEON intrinsics not supported |
14083 | Info.flags = MachineMemOperand::MOLoad; |
14084 | return true; |
14085 | } |
14086 | case Intrinsic::arm_neon_vst1: |
14087 | case Intrinsic::arm_neon_vst2: |
14088 | case Intrinsic::arm_neon_vst3: |
14089 | case Intrinsic::arm_neon_vst4: |
14090 | case Intrinsic::arm_neon_vst2lane: |
14091 | case Intrinsic::arm_neon_vst3lane: |
14092 | case Intrinsic::arm_neon_vst4lane: { |
14093 | Info.opc = ISD::INTRINSIC_VOID; |
14094 | // Conservatively set memVT to the entire set of vectors stored. |
14095 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); |
14096 | unsigned NumElts = 0; |
14097 | for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { |
14098 | Type *ArgTy = I.getArgOperand(ArgI)->getType(); |
14099 | if (!ArgTy->isVectorTy()) |
14100 | break; |
14101 | NumElts += DL.getTypeSizeInBits(ArgTy) / 64; |
14102 | } |
14103 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); |
14104 | Info.ptrVal = I.getArgOperand(0); |
14105 | Info.offset = 0; |
14106 | Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); |
14107 | Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); |
14108 | // volatile stores with NEON intrinsics not supported |
14109 | Info.flags = MachineMemOperand::MOStore; |
14110 | return true; |
14111 | } |
14112 | case Intrinsic::arm_ldaex: |
14113 | case Intrinsic::arm_ldrex: { |
14114 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); |
14115 | PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); |
14116 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
14117 | Info.memVT = MVT::getVT(PtrTy->getElementType()); |
14118 | Info.ptrVal = I.getArgOperand(0); |
14119 | Info.offset = 0; |
14120 | Info.align = DL.getABITypeAlignment(PtrTy->getElementType()); |
14121 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; |
14122 | return true; |
14123 | } |
14124 | case Intrinsic::arm_stlex: |
14125 | case Intrinsic::arm_strex: { |
14126 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); |
14127 | PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType()); |
14128 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
14129 | Info.memVT = MVT::getVT(PtrTy->getElementType()); |
14130 | Info.ptrVal = I.getArgOperand(1); |
14131 | Info.offset = 0; |
14132 | Info.align = DL.getABITypeAlignment(PtrTy->getElementType()); |
14133 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; |
14134 | return true; |
14135 | } |
14136 | case Intrinsic::arm_stlexd: |
14137 | case Intrinsic::arm_strexd: |
14138 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
14139 | Info.memVT = MVT::i64; |
14140 | Info.ptrVal = I.getArgOperand(2); |
14141 | Info.offset = 0; |
14142 | Info.align = 8; |
14143 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; |
14144 | return true; |
14145 | |
14146 | case Intrinsic::arm_ldaexd: |
14147 | case Intrinsic::arm_ldrexd: |
14148 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
14149 | Info.memVT = MVT::i64; |
14150 | Info.ptrVal = I.getArgOperand(0); |
14151 | Info.offset = 0; |
14152 | Info.align = 8; |
14153 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; |
14154 | return true; |
14155 | |
14156 | default: |
14157 | break; |
14158 | } |
14159 | |
14160 | return false; |
14161 | } |
14162 | |
14163 | /// \brief Returns true if it is beneficial to convert a load of a constant |
14164 | /// to just the constant itself. |
14165 | bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, |
14166 | Type *Ty) const { |
14167 | assert(Ty->isIntegerTy())(static_cast <bool> (Ty->isIntegerTy()) ? void (0) : __assert_fail ("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14167, __extension__ __PRETTY_FUNCTION__)); |
14168 | |
14169 | unsigned Bits = Ty->getPrimitiveSizeInBits(); |
14170 | if (Bits == 0 || Bits > 32) |
14171 | return false; |
14172 | return true; |
14173 | } |
14174 | |
14175 | bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, |
14176 | unsigned Index) const { |
14177 | if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT)) |
14178 | return false; |
14179 | |
14180 | return (Index == 0 || Index == ResVT.getVectorNumElements()); |
14181 | } |
14182 | |
14183 | Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder, |
14184 | ARM_MB::MemBOpt Domain) const { |
14185 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
14186 | |
14187 | // First, if the target has no DMB, see what fallback we can use. |
14188 | if (!Subtarget->hasDataBarrier()) { |
14189 | // Some ARMv6 cpus can support data barriers with an mcr instruction. |
14190 | // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get |
14191 | // here. |
14192 | if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) { |
14193 | Function *MCR = Intrinsic::getDeclaration(M, Intrinsic::arm_mcr); |
14194 | Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0), |
14195 | Builder.getInt32(0), Builder.getInt32(7), |
14196 | Builder.getInt32(10), Builder.getInt32(5)}; |
14197 | return Builder.CreateCall(MCR, args); |
14198 | } else { |
14199 | // Instead of using barriers, atomic accesses on these subtargets use |
14200 | // libcalls. |
14201 | llvm_unreachable("makeDMB on a target so old that it has no barriers")::llvm::llvm_unreachable_internal("makeDMB on a target so old that it has no barriers" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14201); |
14202 | } |
14203 | } else { |
14204 | Function *DMB = Intrinsic::getDeclaration(M, Intrinsic::arm_dmb); |
14205 | // Only a full system barrier exists in the M-class architectures. |
14206 | Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain; |
14207 | Constant *CDomain = Builder.getInt32(Domain); |
14208 | return Builder.CreateCall(DMB, CDomain); |
14209 | } |
14210 | } |
14211 | |
14212 | // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html |
14213 | Instruction *ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder, |
14214 | Instruction *Inst, |
14215 | AtomicOrdering Ord) const { |
14216 | switch (Ord) { |
14217 | case AtomicOrdering::NotAtomic: |
14218 | case AtomicOrdering::Unordered: |
14219 | llvm_unreachable("Invalid fence: unordered/non-atomic")::llvm::llvm_unreachable_internal("Invalid fence: unordered/non-atomic" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14219); |
14220 | case AtomicOrdering::Monotonic: |
14221 | case AtomicOrdering::Acquire: |
14222 | return nullptr; // Nothing to do |
14223 | case AtomicOrdering::SequentiallyConsistent: |
14224 | if (!Inst->hasAtomicStore()) |
14225 | return nullptr; // Nothing to do |
14226 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
14227 | case AtomicOrdering::Release: |
14228 | case AtomicOrdering::AcquireRelease: |
14229 | if (Subtarget->preferISHSTBarriers()) |
14230 | return makeDMB(Builder, ARM_MB::ISHST); |
14231 | // FIXME: add a comment with a link to documentation justifying this. |
14232 | else |
14233 | return makeDMB(Builder, ARM_MB::ISH); |
14234 | } |
14235 | llvm_unreachable("Unknown fence ordering in emitLeadingFence")::llvm::llvm_unreachable_internal("Unknown fence ordering in emitLeadingFence" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14235); |
14236 | } |
14237 | |
14238 | Instruction *ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder, |
14239 | Instruction *Inst, |
14240 | AtomicOrdering Ord) const { |
14241 | switch (Ord) { |
14242 | case AtomicOrdering::NotAtomic: |
14243 | case AtomicOrdering::Unordered: |
14244 | llvm_unreachable("Invalid fence: unordered/not-atomic")::llvm::llvm_unreachable_internal("Invalid fence: unordered/not-atomic" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14244); |
14245 | case AtomicOrdering::Monotonic: |
14246 | case AtomicOrdering::Release: |
14247 | return nullptr; // Nothing to do |
14248 | case AtomicOrdering::Acquire: |
14249 | case AtomicOrdering::AcquireRelease: |
14250 | case AtomicOrdering::SequentiallyConsistent: |
14251 | return makeDMB(Builder, ARM_MB::ISH); |
14252 | } |
14253 | llvm_unreachable("Unknown fence ordering in emitTrailingFence")::llvm::llvm_unreachable_internal("Unknown fence ordering in emitTrailingFence" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14253); |
14254 | } |
14255 | |
14256 | // Loads and stores less than 64-bits are already atomic; ones above that |
14257 | // are doomed anyway, so defer to the default libcall and blame the OS when |
14258 | // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit |
14259 | // anything for those. |
14260 | bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { |
14261 | unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); |
14262 | return (Size == 64) && !Subtarget->isMClass(); |
14263 | } |
14264 | |
14265 | // Loads and stores less than 64-bits are already atomic; ones above that |
14266 | // are doomed anyway, so defer to the default libcall and blame the OS when |
14267 | // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit |
14268 | // anything for those. |
14269 | // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that |
14270 | // guarantee, see DDI0406C ARM architecture reference manual, |
14271 | // sections A8.8.72-74 LDRD) |
14272 | TargetLowering::AtomicExpansionKind |
14273 | ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { |
14274 | unsigned Size = LI->getType()->getPrimitiveSizeInBits(); |
14275 | return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly |
14276 | : AtomicExpansionKind::None; |
14277 | } |
14278 | |
14279 | // For the real atomic operations, we have ldrex/strex up to 32 bits, |
14280 | // and up to 64 bits on the non-M profiles |
14281 | TargetLowering::AtomicExpansionKind |
14282 | ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { |
14283 | unsigned Size = AI->getType()->getPrimitiveSizeInBits(); |
14284 | bool hasAtomicRMW = !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps(); |
14285 | return (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW) |
14286 | ? AtomicExpansionKind::LLSC |
14287 | : AtomicExpansionKind::None; |
14288 | } |
14289 | |
14290 | bool ARMTargetLowering::shouldExpandAtomicCmpXchgInIR( |
14291 | AtomicCmpXchgInst *AI) const { |
14292 | // At -O0, fast-regalloc cannot cope with the live vregs necessary to |
14293 | // implement cmpxchg without spilling. If the address being exchanged is also |
14294 | // on the stack and close enough to the spill slot, this can lead to a |
14295 | // situation where the monitor always gets cleared and the atomic operation |
14296 | // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. |
14297 | bool hasAtomicCmpXchg = |
14298 | !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps(); |
14299 | return getTargetMachine().getOptLevel() != 0 && hasAtomicCmpXchg; |
14300 | } |
14301 | |
14302 | bool ARMTargetLowering::shouldInsertFencesForAtomic( |
14303 | const Instruction *I) const { |
14304 | return InsertFencesForAtomic; |
14305 | } |
14306 | |
14307 | // This has so far only been implemented for MachO. |
14308 | bool ARMTargetLowering::useLoadStackGuardNode() const { |
14309 | return Subtarget->isTargetMachO(); |
14310 | } |
14311 | |
14312 | bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx, |
14313 | unsigned &Cost) const { |
14314 | // If we do not have NEON, vector types are not natively supported. |
14315 | if (!Subtarget->hasNEON()) |
14316 | return false; |
14317 | |
14318 | // Floating point values and vector values map to the same register file. |
14319 | // Therefore, although we could do a store extract of a vector type, this is |
14320 | // better to leave at float as we have more freedom in the addressing mode for |
14321 | // those. |
14322 | if (VectorTy->isFPOrFPVectorTy()) |
14323 | return false; |
14324 | |
14325 | // If the index is unknown at compile time, this is very expensive to lower |
14326 | // and it is not possible to combine the store with the extract. |
14327 | if (!isa<ConstantInt>(Idx)) |
14328 | return false; |
14329 | |
14330 | assert(VectorTy->isVectorTy() && "VectorTy is not a vector type")(static_cast <bool> (VectorTy->isVectorTy() && "VectorTy is not a vector type") ? void (0) : __assert_fail ( "VectorTy->isVectorTy() && \"VectorTy is not a vector type\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14330, __extension__ __PRETTY_FUNCTION__)); |
14331 | unsigned BitWidth = cast<VectorType>(VectorTy)->getBitWidth(); |
14332 | // We can do a store + vector extract on any vector that fits perfectly in a D |
14333 | // or Q register. |
14334 | if (BitWidth == 64 || BitWidth == 128) { |
14335 | Cost = 0; |
14336 | return true; |
14337 | } |
14338 | return false; |
14339 | } |
14340 | |
14341 | bool ARMTargetLowering::isCheapToSpeculateCttz() const { |
14342 | return Subtarget->hasV6T2Ops(); |
14343 | } |
14344 | |
14345 | bool ARMTargetLowering::isCheapToSpeculateCtlz() const { |
14346 | return Subtarget->hasV6T2Ops(); |
14347 | } |
14348 | |
14349 | Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, |
14350 | AtomicOrdering Ord) const { |
14351 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
14352 | Type *ValTy = cast<PointerType>(Addr->getType())->getElementType(); |
14353 | bool IsAcquire = isAcquireOrStronger(Ord); |
14354 | |
14355 | // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd |
14356 | // intrinsic must return {i32, i32} and we have to recombine them into a |
14357 | // single i64 here. |
14358 | if (ValTy->getPrimitiveSizeInBits() == 64) { |
14359 | Intrinsic::ID Int = |
14360 | IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd; |
14361 | Function *Ldrex = Intrinsic::getDeclaration(M, Int); |
14362 | |
14363 | Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); |
14364 | Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi"); |
14365 | |
14366 | Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); |
14367 | Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi"); |
14368 | if (!Subtarget->isLittle()) |
14369 | std::swap (Lo, Hi); |
14370 | Lo = Builder.CreateZExt(Lo, ValTy, "lo64"); |
14371 | Hi = Builder.CreateZExt(Hi, ValTy, "hi64"); |
14372 | return Builder.CreateOr( |
14373 | Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64"); |
14374 | } |
14375 | |
14376 | Type *Tys[] = { Addr->getType() }; |
14377 | Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex; |
14378 | Function *Ldrex = Intrinsic::getDeclaration(M, Int, Tys); |
14379 | |
14380 | return Builder.CreateTruncOrBitCast( |
14381 | Builder.CreateCall(Ldrex, Addr), |
14382 | cast<PointerType>(Addr->getType())->getElementType()); |
14383 | } |
14384 | |
14385 | void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance( |
14386 | IRBuilder<> &Builder) const { |
14387 | if (!Subtarget->hasV7Ops()) |
14388 | return; |
14389 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
14390 | Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::arm_clrex)); |
14391 | } |
14392 | |
14393 | Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val, |
14394 | Value *Addr, |
14395 | AtomicOrdering Ord) const { |
14396 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
14397 | bool IsRelease = isReleaseOrStronger(Ord); |
14398 | |
14399 | // Since the intrinsics must have legal type, the i64 intrinsics take two |
14400 | // parameters: "i32, i32". We must marshal Val into the appropriate form |
14401 | // before the call. |
14402 | if (Val->getType()->getPrimitiveSizeInBits() == 64) { |
14403 | Intrinsic::ID Int = |
14404 | IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd; |
14405 | Function *Strex = Intrinsic::getDeclaration(M, Int); |
14406 | Type *Int32Ty = Type::getInt32Ty(M->getContext()); |
14407 | |
14408 | Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo"); |
14409 | Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi"); |
14410 | if (!Subtarget->isLittle()) |
14411 | std::swap(Lo, Hi); |
14412 | Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); |
14413 | return Builder.CreateCall(Strex, {Lo, Hi, Addr}); |
14414 | } |
14415 | |
14416 | Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex; |
14417 | Type *Tys[] = { Addr->getType() }; |
14418 | Function *Strex = Intrinsic::getDeclaration(M, Int, Tys); |
14419 | |
14420 | return Builder.CreateCall( |
14421 | Strex, {Builder.CreateZExtOrBitCast( |
14422 | Val, Strex->getFunctionType()->getParamType(0)), |
14423 | Addr}); |
14424 | } |
14425 | |
14426 | /// A helper function for determining the number of interleaved accesses we |
14427 | /// will generate when lowering accesses of the given type. |
14428 | unsigned |
14429 | ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy, |
14430 | const DataLayout &DL) const { |
14431 | return (DL.getTypeSizeInBits(VecTy) + 127) / 128; |
14432 | } |
14433 | |
14434 | bool ARMTargetLowering::isLegalInterleavedAccessType( |
14435 | VectorType *VecTy, const DataLayout &DL) const { |
14436 | |
14437 | unsigned VecSize = DL.getTypeSizeInBits(VecTy); |
14438 | unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType()); |
14439 | |
14440 | // Ensure the vector doesn't have f16 elements. Even though we could do an |
14441 | // i16 vldN, we can't hold the f16 vectors and will end up converting via |
14442 | // f32. |
14443 | if (VecTy->getElementType()->isHalfTy()) |
14444 | return false; |
14445 | |
14446 | // Ensure the number of vector elements is greater than 1. |
14447 | if (VecTy->getNumElements() < 2) |
14448 | return false; |
14449 | |
14450 | // Ensure the element type is legal. |
14451 | if (ElSize != 8 && ElSize != 16 && ElSize != 32) |
14452 | return false; |
14453 | |
14454 | // Ensure the total vector size is 64 or a multiple of 128. Types larger than |
14455 | // 128 will be split into multiple interleaved accesses. |
14456 | return VecSize == 64 || VecSize % 128 == 0; |
14457 | } |
14458 | |
14459 | /// \brief Lower an interleaved load into a vldN intrinsic. |
14460 | /// |
14461 | /// E.g. Lower an interleaved load (Factor = 2): |
14462 | /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4 |
14463 | /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements |
14464 | /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements |
14465 | /// |
14466 | /// Into: |
14467 | /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4) |
14468 | /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0 |
14469 | /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1 |
14470 | bool ARMTargetLowering::lowerInterleavedLoad( |
14471 | LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles, |
14472 | ArrayRef<unsigned> Indices, unsigned Factor) const { |
14473 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14474, __extension__ __PRETTY_FUNCTION__)) |
14474 | "Invalid interleave factor")(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14474, __extension__ __PRETTY_FUNCTION__)); |
14475 | assert(!Shuffles.empty() && "Empty shufflevector input")(static_cast <bool> (!Shuffles.empty() && "Empty shufflevector input" ) ? void (0) : __assert_fail ("!Shuffles.empty() && \"Empty shufflevector input\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14475, __extension__ __PRETTY_FUNCTION__)); |
14476 | assert(Shuffles.size() == Indices.size() &&(static_cast <bool> (Shuffles.size() == Indices.size() && "Unmatched number of shufflevectors and indices") ? void (0) : __assert_fail ("Shuffles.size() == Indices.size() && \"Unmatched number of shufflevectors and indices\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14477, __extension__ __PRETTY_FUNCTION__)) |
14477 | "Unmatched number of shufflevectors and indices")(static_cast <bool> (Shuffles.size() == Indices.size() && "Unmatched number of shufflevectors and indices") ? void (0) : __assert_fail ("Shuffles.size() == Indices.size() && \"Unmatched number of shufflevectors and indices\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14477, __extension__ __PRETTY_FUNCTION__)); |
14478 | |
14479 | VectorType *VecTy = Shuffles[0]->getType(); |
14480 | Type *EltTy = VecTy->getVectorElementType(); |
14481 | |
14482 | const DataLayout &DL = LI->getModule()->getDataLayout(); |
14483 | |
14484 | // Skip if we do not have NEON and skip illegal vector types. We can |
14485 | // "legalize" wide vector types into multiple interleaved accesses as long as |
14486 | // the vector types are divisible by 128. |
14487 | if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(VecTy, DL)) |
14488 | return false; |
14489 | |
14490 | unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL); |
14491 | |
14492 | // A pointer vector can not be the return type of the ldN intrinsics. Need to |
14493 | // load integer vectors first and then convert to pointer vectors. |
14494 | if (EltTy->isPointerTy()) |
14495 | VecTy = |
14496 | VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements()); |
14497 | |
14498 | IRBuilder<> Builder(LI); |
14499 | |
14500 | // The base address of the load. |
14501 | Value *BaseAddr = LI->getPointerOperand(); |
14502 | |
14503 | if (NumLoads > 1) { |
14504 | // If we're going to generate more than one load, reset the sub-vector type |
14505 | // to something legal. |
14506 | VecTy = VectorType::get(VecTy->getVectorElementType(), |
14507 | VecTy->getVectorNumElements() / NumLoads); |
14508 | |
14509 | // We will compute the pointer operand of each load from the original base |
14510 | // address using GEPs. Cast the base address to a pointer to the scalar |
14511 | // element type. |
14512 | BaseAddr = Builder.CreateBitCast( |
14513 | BaseAddr, VecTy->getVectorElementType()->getPointerTo( |
14514 | LI->getPointerAddressSpace())); |
14515 | } |
14516 | |
14517 | assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!")(static_cast <bool> (isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!") ? void (0) : __assert_fail ("isTypeLegal(EVT::getEVT(VecTy)) && \"Illegal vldN vector type!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14517, __extension__ __PRETTY_FUNCTION__)); |
14518 | |
14519 | Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace()); |
14520 | Type *Tys[] = {VecTy, Int8Ptr}; |
14521 | static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2, |
14522 | Intrinsic::arm_neon_vld3, |
14523 | Intrinsic::arm_neon_vld4}; |
14524 | Function *VldnFunc = |
14525 | Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys); |
14526 | |
14527 | // Holds sub-vectors extracted from the load intrinsic return values. The |
14528 | // sub-vectors are associated with the shufflevector instructions they will |
14529 | // replace. |
14530 | DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs; |
14531 | |
14532 | for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) { |
14533 | // If we're generating more than one load, compute the base address of |
14534 | // subsequent loads as an offset from the previous. |
14535 | if (LoadCount > 0) |
14536 | BaseAddr = Builder.CreateConstGEP1_32( |
14537 | BaseAddr, VecTy->getVectorNumElements() * Factor); |
14538 | |
14539 | SmallVector<Value *, 2> Ops; |
14540 | Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr)); |
14541 | Ops.push_back(Builder.getInt32(LI->getAlignment())); |
14542 | |
14543 | CallInst *VldN = Builder.CreateCall(VldnFunc, Ops, "vldN"); |
14544 | |
14545 | // Replace uses of each shufflevector with the corresponding vector loaded |
14546 | // by ldN. |
14547 | for (unsigned i = 0; i < Shuffles.size(); i++) { |
14548 | ShuffleVectorInst *SV = Shuffles[i]; |
14549 | unsigned Index = Indices[i]; |
14550 | |
14551 | Value *SubVec = Builder.CreateExtractValue(VldN, Index); |
14552 | |
14553 | // Convert the integer vector to pointer vector if the element is pointer. |
14554 | if (EltTy->isPointerTy()) |
14555 | SubVec = Builder.CreateIntToPtr( |
14556 | SubVec, VectorType::get(SV->getType()->getVectorElementType(), |
14557 | VecTy->getVectorNumElements())); |
14558 | |
14559 | SubVecs[SV].push_back(SubVec); |
14560 | } |
14561 | } |
14562 | |
14563 | // Replace uses of the shufflevector instructions with the sub-vectors |
14564 | // returned by the load intrinsic. If a shufflevector instruction is |
14565 | // associated with more than one sub-vector, those sub-vectors will be |
14566 | // concatenated into a single wide vector. |
14567 | for (ShuffleVectorInst *SVI : Shuffles) { |
14568 | auto &SubVec = SubVecs[SVI]; |
14569 | auto *WideVec = |
14570 | SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0]; |
14571 | SVI->replaceAllUsesWith(WideVec); |
14572 | } |
14573 | |
14574 | return true; |
14575 | } |
14576 | |
14577 | /// \brief Lower an interleaved store into a vstN intrinsic. |
14578 | /// |
14579 | /// E.g. Lower an interleaved store (Factor = 3): |
14580 | /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, |
14581 | /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> |
14582 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4 |
14583 | /// |
14584 | /// Into: |
14585 | /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> |
14586 | /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> |
14587 | /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> |
14588 | /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) |
14589 | /// |
14590 | /// Note that the new shufflevectors will be removed and we'll only generate one |
14591 | /// vst3 instruction in CodeGen. |
14592 | /// |
14593 | /// Example for a more general valid mask (Factor 3). Lower: |
14594 | /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1, |
14595 | /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19> |
14596 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr |
14597 | /// |
14598 | /// Into: |
14599 | /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7> |
14600 | /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35> |
14601 | /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19> |
14602 | /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) |
14603 | bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI, |
14604 | ShuffleVectorInst *SVI, |
14605 | unsigned Factor) const { |
14606 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14607, __extension__ __PRETTY_FUNCTION__)) |
14607 | "Invalid interleave factor")(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14607, __extension__ __PRETTY_FUNCTION__)); |
14608 | |
14609 | VectorType *VecTy = SVI->getType(); |
14610 | assert(VecTy->getVectorNumElements() % Factor == 0 &&(static_cast <bool> (VecTy->getVectorNumElements() % Factor == 0 && "Invalid interleaved store") ? void ( 0) : __assert_fail ("VecTy->getVectorNumElements() % Factor == 0 && \"Invalid interleaved store\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14611, __extension__ __PRETTY_FUNCTION__)) |
14611 | "Invalid interleaved store")(static_cast <bool> (VecTy->getVectorNumElements() % Factor == 0 && "Invalid interleaved store") ? void ( 0) : __assert_fail ("VecTy->getVectorNumElements() % Factor == 0 && \"Invalid interleaved store\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14611, __extension__ __PRETTY_FUNCTION__)); |
14612 | |
14613 | unsigned LaneLen = VecTy->getVectorNumElements() / Factor; |
14614 | Type *EltTy = VecTy->getVectorElementType(); |
14615 | VectorType *SubVecTy = VectorType::get(EltTy, LaneLen); |
14616 | |
14617 | const DataLayout &DL = SI->getModule()->getDataLayout(); |
14618 | |
14619 | // Skip if we do not have NEON and skip illegal vector types. We can |
14620 | // "legalize" wide vector types into multiple interleaved accesses as long as |
14621 | // the vector types are divisible by 128. |
14622 | if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(SubVecTy, DL)) |
14623 | return false; |
14624 | |
14625 | unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL); |
14626 | |
14627 | Value *Op0 = SVI->getOperand(0); |
14628 | Value *Op1 = SVI->getOperand(1); |
14629 | IRBuilder<> Builder(SI); |
14630 | |
14631 | // StN intrinsics don't support pointer vectors as arguments. Convert pointer |
14632 | // vectors to integer vectors. |
14633 | if (EltTy->isPointerTy()) { |
14634 | Type *IntTy = DL.getIntPtrType(EltTy); |
14635 | |
14636 | // Convert to the corresponding integer vector. |
14637 | Type *IntVecTy = |
14638 | VectorType::get(IntTy, Op0->getType()->getVectorNumElements()); |
14639 | Op0 = Builder.CreatePtrToInt(Op0, IntVecTy); |
14640 | Op1 = Builder.CreatePtrToInt(Op1, IntVecTy); |
14641 | |
14642 | SubVecTy = VectorType::get(IntTy, LaneLen); |
14643 | } |
14644 | |
14645 | // The base address of the store. |
14646 | Value *BaseAddr = SI->getPointerOperand(); |
14647 | |
14648 | if (NumStores > 1) { |
14649 | // If we're going to generate more than one store, reset the lane length |
14650 | // and sub-vector type to something legal. |
14651 | LaneLen /= NumStores; |
14652 | SubVecTy = VectorType::get(SubVecTy->getVectorElementType(), LaneLen); |
14653 | |
14654 | // We will compute the pointer operand of each store from the original base |
14655 | // address using GEPs. Cast the base address to a pointer to the scalar |
14656 | // element type. |
14657 | BaseAddr = Builder.CreateBitCast( |
14658 | BaseAddr, SubVecTy->getVectorElementType()->getPointerTo( |
14659 | SI->getPointerAddressSpace())); |
14660 | } |
14661 | |
14662 | assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!")(static_cast <bool> (isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!") ? void (0) : __assert_fail ("isTypeLegal(EVT::getEVT(SubVecTy)) && \"Illegal vstN vector type!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14662, __extension__ __PRETTY_FUNCTION__)); |
14663 | |
14664 | auto Mask = SVI->getShuffleMask(); |
14665 | |
14666 | Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace()); |
14667 | Type *Tys[] = {Int8Ptr, SubVecTy}; |
14668 | static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2, |
14669 | Intrinsic::arm_neon_vst3, |
14670 | Intrinsic::arm_neon_vst4}; |
14671 | |
14672 | for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) { |
14673 | // If we generating more than one store, we compute the base address of |
14674 | // subsequent stores as an offset from the previous. |
14675 | if (StoreCount > 0) |
14676 | BaseAddr = Builder.CreateConstGEP1_32(BaseAddr, LaneLen * Factor); |
14677 | |
14678 | SmallVector<Value *, 6> Ops; |
14679 | Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr)); |
14680 | |
14681 | Function *VstNFunc = |
14682 | Intrinsic::getDeclaration(SI->getModule(), StoreInts[Factor - 2], Tys); |
14683 | |
14684 | // Split the shufflevector operands into sub vectors for the new vstN call. |
14685 | for (unsigned i = 0; i < Factor; i++) { |
14686 | unsigned IdxI = StoreCount * LaneLen * Factor + i; |
14687 | if (Mask[IdxI] >= 0) { |
14688 | Ops.push_back(Builder.CreateShuffleVector( |
14689 | Op0, Op1, createSequentialMask(Builder, Mask[IdxI], LaneLen, 0))); |
14690 | } else { |
14691 | unsigned StartMask = 0; |
14692 | for (unsigned j = 1; j < LaneLen; j++) { |
14693 | unsigned IdxJ = StoreCount * LaneLen * Factor + j; |
14694 | if (Mask[IdxJ * Factor + IdxI] >= 0) { |
14695 | StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ; |
14696 | break; |
14697 | } |
14698 | } |
14699 | // Note: If all elements in a chunk are undefs, StartMask=0! |
14700 | // Note: Filling undef gaps with random elements is ok, since |
14701 | // those elements were being written anyway (with undefs). |
14702 | // In the case of all undefs we're defaulting to using elems from 0 |
14703 | // Note: StartMask cannot be negative, it's checked in |
14704 | // isReInterleaveMask |
14705 | Ops.push_back(Builder.CreateShuffleVector( |
14706 | Op0, Op1, createSequentialMask(Builder, StartMask, LaneLen, 0))); |
14707 | } |
14708 | } |
14709 | |
14710 | Ops.push_back(Builder.getInt32(SI->getAlignment())); |
14711 | Builder.CreateCall(VstNFunc, Ops); |
14712 | } |
14713 | return true; |
14714 | } |
14715 | |
14716 | enum HABaseType { |
14717 | HA_UNKNOWN = 0, |
14718 | HA_FLOAT, |
14719 | HA_DOUBLE, |
14720 | HA_VECT64, |
14721 | HA_VECT128 |
14722 | }; |
14723 | |
14724 | static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, |
14725 | uint64_t &Members) { |
14726 | if (auto *ST = dyn_cast<StructType>(Ty)) { |
14727 | for (unsigned i = 0; i < ST->getNumElements(); ++i) { |
14728 | uint64_t SubMembers = 0; |
14729 | if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers)) |
14730 | return false; |
14731 | Members += SubMembers; |
14732 | } |
14733 | } else if (auto *AT = dyn_cast<ArrayType>(Ty)) { |
14734 | uint64_t SubMembers = 0; |
14735 | if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers)) |
14736 | return false; |
14737 | Members += SubMembers * AT->getNumElements(); |
14738 | } else if (Ty->isFloatTy()) { |
14739 | if (Base != HA_UNKNOWN && Base != HA_FLOAT) |
14740 | return false; |
14741 | Members = 1; |
14742 | Base = HA_FLOAT; |
14743 | } else if (Ty->isDoubleTy()) { |
14744 | if (Base != HA_UNKNOWN && Base != HA_DOUBLE) |
14745 | return false; |
14746 | Members = 1; |
14747 | Base = HA_DOUBLE; |
14748 | } else if (auto *VT = dyn_cast<VectorType>(Ty)) { |
14749 | Members = 1; |
14750 | switch (Base) { |
14751 | case HA_FLOAT: |
14752 | case HA_DOUBLE: |
14753 | return false; |
14754 | case HA_VECT64: |
14755 | return VT->getBitWidth() == 64; |
14756 | case HA_VECT128: |
14757 | return VT->getBitWidth() == 128; |
14758 | case HA_UNKNOWN: |
14759 | switch (VT->getBitWidth()) { |
14760 | case 64: |
14761 | Base = HA_VECT64; |
14762 | return true; |
14763 | case 128: |
14764 | Base = HA_VECT128; |
14765 | return true; |
14766 | default: |
14767 | return false; |
14768 | } |
14769 | } |
14770 | } |
14771 | |
14772 | return (Members > 0 && Members <= 4); |
14773 | } |
14774 | |
14775 | /// \brief Return true if a type is an AAPCS-VFP homogeneous aggregate or one of |
14776 | /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when |
14777 | /// passing according to AAPCS rules. |
14778 | bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters( |
14779 | Type *Ty, CallingConv::ID CallConv, bool isVarArg) const { |
14780 | if (getEffectiveCallingConv(CallConv, isVarArg) != |
14781 | CallingConv::ARM_AAPCS_VFP) |
14782 | return false; |
14783 | |
14784 | HABaseType Base = HA_UNKNOWN; |
14785 | uint64_t Members = 0; |
14786 | bool IsHA = isHomogeneousAggregate(Ty, Base, Members); |
14787 | DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("arm-isel")) { dbgs() << "isHA: " << IsHA << " "; Ty->dump(); } } while (false); |
14788 | |
14789 | bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy(); |
14790 | return IsHA || IsIntArray; |
14791 | } |
14792 | |
14793 | unsigned ARMTargetLowering::getExceptionPointerRegister( |
14794 | const Constant *PersonalityFn) const { |
14795 | // Platforms which do not use SjLj EH may return values in these registers |
14796 | // via the personality function. |
14797 | return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R0; |
14798 | } |
14799 | |
14800 | unsigned ARMTargetLowering::getExceptionSelectorRegister( |
14801 | const Constant *PersonalityFn) const { |
14802 | // Platforms which do not use SjLj EH may return values in these registers |
14803 | // via the personality function. |
14804 | return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R1; |
14805 | } |
14806 | |
14807 | void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { |
14808 | // Update IsSplitCSR in ARMFunctionInfo. |
14809 | ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>(); |
14810 | AFI->setIsSplitCSR(true); |
14811 | } |
14812 | |
14813 | void ARMTargetLowering::insertCopiesSplitCSR( |
14814 | MachineBasicBlock *Entry, |
14815 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const { |
14816 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
14817 | const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); |
14818 | if (!IStart) |
14819 | return; |
14820 | |
14821 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
14822 | MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); |
14823 | MachineBasicBlock::iterator MBBI = Entry->begin(); |
14824 | for (const MCPhysReg *I = IStart; *I; ++I) { |
14825 | const TargetRegisterClass *RC = nullptr; |
14826 | if (ARM::GPRRegClass.contains(*I)) |
14827 | RC = &ARM::GPRRegClass; |
14828 | else if (ARM::DPRRegClass.contains(*I)) |
14829 | RC = &ARM::DPRRegClass; |
14830 | else |
14831 | llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14831); |
14832 | |
14833 | unsigned NewVR = MRI->createVirtualRegister(RC); |
14834 | // Create copy from CSR to a virtual register. |
14835 | // FIXME: this currently does not emit CFI pseudo-instructions, it works |
14836 | // fine for CXX_FAST_TLS since the C++-style TLS access functions should be |
14837 | // nounwind. If we want to generalize this later, we may need to emit |
14838 | // CFI pseudo-instructions. |
14839 | assert(Entry->getParent()->getFunction().hasFnAttribute((static_cast <bool> (Entry->getParent()->getFunction ().hasFnAttribute( Attribute::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!" ) ? void (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14841, __extension__ __PRETTY_FUNCTION__)) |
14840 | Attribute::NoUnwind) &&(static_cast <bool> (Entry->getParent()->getFunction ().hasFnAttribute( Attribute::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!" ) ? void (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14841, __extension__ __PRETTY_FUNCTION__)) |
14841 | "Function should be nounwind in insertCopiesSplitCSR!")(static_cast <bool> (Entry->getParent()->getFunction ().hasFnAttribute( Attribute::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!" ) ? void (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\"" , "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/ARM/ARMISelLowering.cpp" , 14841, __extension__ __PRETTY_FUNCTION__)); |
14842 | Entry->addLiveIn(*I); |
14843 | BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) |
14844 | .addReg(*I); |
14845 | |
14846 | // Insert the copy-back instructions right before the terminator. |
14847 | for (auto *Exit : Exits) |
14848 | BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), |
14849 | TII->get(TargetOpcode::COPY), *I) |
14850 | .addReg(NewVR); |
14851 | } |
14852 | } |
14853 | |
14854 | void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const { |
14855 | MF.getFrameInfo().computeMaxCallFrameSize(MF); |
14856 | TargetLoweringBase::finalizeLowering(MF); |
14857 | } |