File: | build/source/llvm/lib/Target/PowerPC/PPCISelLowering.cpp |
Warning: | line 17152, column 9 Assigned value is garbage or undefined |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the PPCISelLowering class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "PPCISelLowering.h" |
14 | #include "MCTargetDesc/PPCPredicates.h" |
15 | #include "PPC.h" |
16 | #include "PPCCCState.h" |
17 | #include "PPCCallingConv.h" |
18 | #include "PPCFrameLowering.h" |
19 | #include "PPCInstrInfo.h" |
20 | #include "PPCMachineFunctionInfo.h" |
21 | #include "PPCPerfectShuffle.h" |
22 | #include "PPCRegisterInfo.h" |
23 | #include "PPCSubtarget.h" |
24 | #include "PPCTargetMachine.h" |
25 | #include "llvm/ADT/APFloat.h" |
26 | #include "llvm/ADT/APInt.h" |
27 | #include "llvm/ADT/ArrayRef.h" |
28 | #include "llvm/ADT/DenseMap.h" |
29 | #include "llvm/ADT/STLExtras.h" |
30 | #include "llvm/ADT/SmallPtrSet.h" |
31 | #include "llvm/ADT/SmallSet.h" |
32 | #include "llvm/ADT/SmallVector.h" |
33 | #include "llvm/ADT/Statistic.h" |
34 | #include "llvm/ADT/StringRef.h" |
35 | #include "llvm/ADT/StringSwitch.h" |
36 | #include "llvm/CodeGen/CallingConvLower.h" |
37 | #include "llvm/CodeGen/ISDOpcodes.h" |
38 | #include "llvm/CodeGen/MachineBasicBlock.h" |
39 | #include "llvm/CodeGen/MachineFrameInfo.h" |
40 | #include "llvm/CodeGen/MachineFunction.h" |
41 | #include "llvm/CodeGen/MachineInstr.h" |
42 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
43 | #include "llvm/CodeGen/MachineJumpTableInfo.h" |
44 | #include "llvm/CodeGen/MachineLoopInfo.h" |
45 | #include "llvm/CodeGen/MachineMemOperand.h" |
46 | #include "llvm/CodeGen/MachineModuleInfo.h" |
47 | #include "llvm/CodeGen/MachineOperand.h" |
48 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
49 | #include "llvm/CodeGen/MachineValueType.h" |
50 | #include "llvm/CodeGen/RuntimeLibcalls.h" |
51 | #include "llvm/CodeGen/SelectionDAG.h" |
52 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
53 | #include "llvm/CodeGen/TargetInstrInfo.h" |
54 | #include "llvm/CodeGen/TargetLowering.h" |
55 | #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" |
56 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
57 | #include "llvm/CodeGen/ValueTypes.h" |
58 | #include "llvm/IR/CallingConv.h" |
59 | #include "llvm/IR/Constant.h" |
60 | #include "llvm/IR/Constants.h" |
61 | #include "llvm/IR/DataLayout.h" |
62 | #include "llvm/IR/DebugLoc.h" |
63 | #include "llvm/IR/DerivedTypes.h" |
64 | #include "llvm/IR/Function.h" |
65 | #include "llvm/IR/GlobalValue.h" |
66 | #include "llvm/IR/IRBuilder.h" |
67 | #include "llvm/IR/Instructions.h" |
68 | #include "llvm/IR/Intrinsics.h" |
69 | #include "llvm/IR/IntrinsicsPowerPC.h" |
70 | #include "llvm/IR/Module.h" |
71 | #include "llvm/IR/Type.h" |
72 | #include "llvm/IR/Use.h" |
73 | #include "llvm/IR/Value.h" |
74 | #include "llvm/MC/MCContext.h" |
75 | #include "llvm/MC/MCExpr.h" |
76 | #include "llvm/MC/MCRegisterInfo.h" |
77 | #include "llvm/MC/MCSectionXCOFF.h" |
78 | #include "llvm/MC/MCSymbolXCOFF.h" |
79 | #include "llvm/Support/AtomicOrdering.h" |
80 | #include "llvm/Support/BranchProbability.h" |
81 | #include "llvm/Support/Casting.h" |
82 | #include "llvm/Support/CodeGen.h" |
83 | #include "llvm/Support/CommandLine.h" |
84 | #include "llvm/Support/Compiler.h" |
85 | #include "llvm/Support/Debug.h" |
86 | #include "llvm/Support/ErrorHandling.h" |
87 | #include "llvm/Support/Format.h" |
88 | #include "llvm/Support/KnownBits.h" |
89 | #include "llvm/Support/MathExtras.h" |
90 | #include "llvm/Support/raw_ostream.h" |
91 | #include "llvm/Target/TargetMachine.h" |
92 | #include "llvm/Target/TargetOptions.h" |
93 | #include <algorithm> |
94 | #include <cassert> |
95 | #include <cstdint> |
96 | #include <iterator> |
97 | #include <list> |
98 | #include <optional> |
99 | #include <utility> |
100 | #include <vector> |
101 | |
102 | using namespace llvm; |
103 | |
104 | #define DEBUG_TYPE"ppc-lowering" "ppc-lowering" |
105 | |
106 | static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", |
107 | cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); |
108 | |
109 | static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", |
110 | cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); |
111 | |
112 | static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", |
113 | cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); |
114 | |
115 | static cl::opt<bool> DisableSCO("disable-ppc-sco", |
116 | cl::desc("disable sibling call optimization on ppc"), cl::Hidden); |
117 | |
118 | static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32", |
119 | cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden); |
120 | |
121 | static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables", |
122 | cl::desc("use absolute jump tables on ppc"), cl::Hidden); |
123 | |
124 | static cl::opt<bool> EnableQuadwordAtomics( |
125 | "ppc-quadword-atomics", |
126 | cl::desc("enable quadword lock-free atomic operations"), cl::init(false), |
127 | cl::Hidden); |
128 | |
129 | static cl::opt<bool> |
130 | DisablePerfectShuffle("ppc-disable-perfect-shuffle", |
131 | cl::desc("disable vector permute decomposition"), |
132 | cl::init(true), cl::Hidden); |
133 | |
134 | cl::opt<bool> DisableAutoPairedVecSt( |
135 | "disable-auto-paired-vec-st", |
136 | cl::desc("disable automatically generated 32byte paired vector stores"), |
137 | cl::init(true), cl::Hidden); |
138 | |
139 | STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"ppc-lowering", "NumTailCalls" , "Number of tail calls"}; |
140 | STATISTIC(NumSiblingCalls, "Number of sibling calls")static llvm::Statistic NumSiblingCalls = {"ppc-lowering", "NumSiblingCalls" , "Number of sibling calls"}; |
141 | STATISTIC(ShufflesHandledWithVPERM,static llvm::Statistic ShufflesHandledWithVPERM = {"ppc-lowering" , "ShufflesHandledWithVPERM", "Number of shuffles lowered to a VPERM or XXPERM" } |
142 | "Number of shuffles lowered to a VPERM or XXPERM")static llvm::Statistic ShufflesHandledWithVPERM = {"ppc-lowering" , "ShufflesHandledWithVPERM", "Number of shuffles lowered to a VPERM or XXPERM" }; |
143 | STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed")static llvm::Statistic NumDynamicAllocaProbed = {"ppc-lowering" , "NumDynamicAllocaProbed", "Number of dynamic stack allocation probed" }; |
144 | |
145 | static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int); |
146 | |
147 | static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl); |
148 | |
149 | static const char AIXSSPCanaryWordName[] = "__ssp_canary_word"; |
150 | |
151 | // FIXME: Remove this once the bug has been fixed! |
152 | extern cl::opt<bool> ANDIGlueBug; |
153 | |
154 | PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, |
155 | const PPCSubtarget &STI) |
156 | : TargetLowering(TM), Subtarget(STI) { |
157 | // Initialize map that relates the PPC addressing modes to the computed flags |
158 | // of a load/store instruction. The map is used to determine the optimal |
159 | // addressing mode when selecting load and stores. |
160 | initializeAddrModeMap(); |
161 | // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all |
162 | // arguments are at least 4/8 bytes aligned. |
163 | bool isPPC64 = Subtarget.isPPC64(); |
164 | setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4)); |
165 | |
166 | // Set up the register classes. |
167 | addRegisterClass(MVT::i32, &PPC::GPRCRegClass); |
168 | if (!useSoftFloat()) { |
169 | if (hasSPE()) { |
170 | addRegisterClass(MVT::f32, &PPC::GPRCRegClass); |
171 | // EFPU2 APU only supports f32 |
172 | if (!Subtarget.hasEFPU2()) |
173 | addRegisterClass(MVT::f64, &PPC::SPERCRegClass); |
174 | } else { |
175 | addRegisterClass(MVT::f32, &PPC::F4RCRegClass); |
176 | addRegisterClass(MVT::f64, &PPC::F8RCRegClass); |
177 | } |
178 | } |
179 | |
180 | // Match BITREVERSE to customized fast code sequence in the td file. |
181 | setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); |
182 | setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); |
183 | |
184 | // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended. |
185 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); |
186 | |
187 | // Custom lower inline assembly to check for special registers. |
188 | setOperationAction(ISD::INLINEASM, MVT::Other, Custom); |
189 | setOperationAction(ISD::INLINEASM_BR, MVT::Other, Custom); |
190 | |
191 | // PowerPC has an i16 but no i8 (or i1) SEXTLOAD. |
192 | for (MVT VT : MVT::integer_valuetypes()) { |
193 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); |
194 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); |
195 | } |
196 | |
197 | if (Subtarget.isISA3_0()) { |
198 | setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal); |
199 | setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal); |
200 | setTruncStoreAction(MVT::f64, MVT::f16, Legal); |
201 | setTruncStoreAction(MVT::f32, MVT::f16, Legal); |
202 | } else { |
203 | // No extending loads from f16 or HW conversions back and forth. |
204 | setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); |
205 | setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); |
206 | setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); |
207 | setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); |
208 | setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); |
209 | setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); |
210 | setTruncStoreAction(MVT::f64, MVT::f16, Expand); |
211 | setTruncStoreAction(MVT::f32, MVT::f16, Expand); |
212 | } |
213 | |
214 | setTruncStoreAction(MVT::f64, MVT::f32, Expand); |
215 | |
216 | // PowerPC has pre-inc load and store's. |
217 | setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); |
218 | setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); |
219 | setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); |
220 | setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); |
221 | setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); |
222 | setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); |
223 | setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); |
224 | setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); |
225 | setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); |
226 | setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); |
227 | if (!Subtarget.hasSPE()) { |
228 | setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); |
229 | setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); |
230 | setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); |
231 | setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); |
232 | } |
233 | |
234 | // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry. |
235 | const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; |
236 | for (MVT VT : ScalarIntVTs) { |
237 | setOperationAction(ISD::ADDC, VT, Legal); |
238 | setOperationAction(ISD::ADDE, VT, Legal); |
239 | setOperationAction(ISD::SUBC, VT, Legal); |
240 | setOperationAction(ISD::SUBE, VT, Legal); |
241 | } |
242 | |
243 | if (Subtarget.useCRBits()) { |
244 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); |
245 | |
246 | if (isPPC64 || Subtarget.hasFPCVT()) { |
247 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Promote); |
248 | AddPromotedToType(ISD::STRICT_SINT_TO_FP, MVT::i1, |
249 | isPPC64 ? MVT::i64 : MVT::i32); |
250 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Promote); |
251 | AddPromotedToType(ISD::STRICT_UINT_TO_FP, MVT::i1, |
252 | isPPC64 ? MVT::i64 : MVT::i32); |
253 | |
254 | setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); |
255 | AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, |
256 | isPPC64 ? MVT::i64 : MVT::i32); |
257 | setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); |
258 | AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, |
259 | isPPC64 ? MVT::i64 : MVT::i32); |
260 | |
261 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i1, Promote); |
262 | AddPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::i1, |
263 | isPPC64 ? MVT::i64 : MVT::i32); |
264 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i1, Promote); |
265 | AddPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::i1, |
266 | isPPC64 ? MVT::i64 : MVT::i32); |
267 | |
268 | setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote); |
269 | AddPromotedToType(ISD::FP_TO_SINT, MVT::i1, |
270 | isPPC64 ? MVT::i64 : MVT::i32); |
271 | setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote); |
272 | AddPromotedToType(ISD::FP_TO_UINT, MVT::i1, |
273 | isPPC64 ? MVT::i64 : MVT::i32); |
274 | } else { |
275 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Custom); |
276 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Custom); |
277 | setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); |
278 | setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); |
279 | } |
280 | |
281 | // PowerPC does not support direct load/store of condition registers. |
282 | setOperationAction(ISD::LOAD, MVT::i1, Custom); |
283 | setOperationAction(ISD::STORE, MVT::i1, Custom); |
284 | |
285 | // FIXME: Remove this once the ANDI glue bug is fixed: |
286 | if (ANDIGlueBug) |
287 | setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); |
288 | |
289 | for (MVT VT : MVT::integer_valuetypes()) { |
290 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); |
291 | setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); |
292 | setTruncStoreAction(VT, MVT::i1, Expand); |
293 | } |
294 | |
295 | addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); |
296 | } |
297 | |
298 | // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on |
299 | // PPC (the libcall is not available). |
300 | setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom); |
301 | setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom); |
302 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::ppcf128, Custom); |
303 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::ppcf128, Custom); |
304 | |
305 | // We do not currently implement these libm ops for PowerPC. |
306 | setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); |
307 | setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); |
308 | setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); |
309 | setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); |
310 | setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); |
311 | setOperationAction(ISD::FREM, MVT::ppcf128, Expand); |
312 | |
313 | // PowerPC has no SREM/UREM instructions unless we are on P9 |
314 | // On P9 we may use a hardware instruction to compute the remainder. |
315 | // When the result of both the remainder and the division is required it is |
316 | // more efficient to compute the remainder from the result of the division |
317 | // rather than use the remainder instruction. The instructions are legalized |
318 | // directly because the DivRemPairsPass performs the transformation at the IR |
319 | // level. |
320 | if (Subtarget.isISA3_0()) { |
321 | setOperationAction(ISD::SREM, MVT::i32, Legal); |
322 | setOperationAction(ISD::UREM, MVT::i32, Legal); |
323 | setOperationAction(ISD::SREM, MVT::i64, Legal); |
324 | setOperationAction(ISD::UREM, MVT::i64, Legal); |
325 | } else { |
326 | setOperationAction(ISD::SREM, MVT::i32, Expand); |
327 | setOperationAction(ISD::UREM, MVT::i32, Expand); |
328 | setOperationAction(ISD::SREM, MVT::i64, Expand); |
329 | setOperationAction(ISD::UREM, MVT::i64, Expand); |
330 | } |
331 | |
332 | // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. |
333 | setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); |
334 | setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); |
335 | setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); |
336 | setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); |
337 | setOperationAction(ISD::UDIVREM, MVT::i32, Expand); |
338 | setOperationAction(ISD::SDIVREM, MVT::i32, Expand); |
339 | setOperationAction(ISD::UDIVREM, MVT::i64, Expand); |
340 | setOperationAction(ISD::SDIVREM, MVT::i64, Expand); |
341 | |
342 | // Handle constrained floating-point operations of scalar. |
343 | // TODO: Handle SPE specific operation. |
344 | setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal); |
345 | setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal); |
346 | setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal); |
347 | setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal); |
348 | setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); |
349 | |
350 | setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal); |
351 | setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal); |
352 | setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal); |
353 | setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal); |
354 | |
355 | if (!Subtarget.hasSPE()) { |
356 | setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal); |
357 | setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal); |
358 | } |
359 | |
360 | if (Subtarget.hasVSX()) { |
361 | setOperationAction(ISD::STRICT_FRINT, MVT::f32, Legal); |
362 | setOperationAction(ISD::STRICT_FRINT, MVT::f64, Legal); |
363 | } |
364 | |
365 | if (Subtarget.hasFSQRT()) { |
366 | setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal); |
367 | setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal); |
368 | } |
369 | |
370 | if (Subtarget.hasFPRND()) { |
371 | setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal); |
372 | setOperationAction(ISD::STRICT_FCEIL, MVT::f32, Legal); |
373 | setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal); |
374 | setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal); |
375 | |
376 | setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal); |
377 | setOperationAction(ISD::STRICT_FCEIL, MVT::f64, Legal); |
378 | setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal); |
379 | setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal); |
380 | } |
381 | |
382 | // We don't support sin/cos/sqrt/fmod/pow |
383 | setOperationAction(ISD::FSIN , MVT::f64, Expand); |
384 | setOperationAction(ISD::FCOS , MVT::f64, Expand); |
385 | setOperationAction(ISD::FSINCOS, MVT::f64, Expand); |
386 | setOperationAction(ISD::FREM , MVT::f64, Expand); |
387 | setOperationAction(ISD::FPOW , MVT::f64, Expand); |
388 | setOperationAction(ISD::FSIN , MVT::f32, Expand); |
389 | setOperationAction(ISD::FCOS , MVT::f32, Expand); |
390 | setOperationAction(ISD::FSINCOS, MVT::f32, Expand); |
391 | setOperationAction(ISD::FREM , MVT::f32, Expand); |
392 | setOperationAction(ISD::FPOW , MVT::f32, Expand); |
393 | |
394 | // MASS transformation for LLVM intrinsics with replicating fast-math flag |
395 | // to be consistent to PPCGenScalarMASSEntries pass |
396 | if (TM.getOptLevel() == CodeGenOpt::Aggressive) { |
397 | setOperationAction(ISD::FSIN , MVT::f64, Custom); |
398 | setOperationAction(ISD::FCOS , MVT::f64, Custom); |
399 | setOperationAction(ISD::FPOW , MVT::f64, Custom); |
400 | setOperationAction(ISD::FLOG, MVT::f64, Custom); |
401 | setOperationAction(ISD::FLOG10, MVT::f64, Custom); |
402 | setOperationAction(ISD::FEXP, MVT::f64, Custom); |
403 | setOperationAction(ISD::FSIN , MVT::f32, Custom); |
404 | setOperationAction(ISD::FCOS , MVT::f32, Custom); |
405 | setOperationAction(ISD::FPOW , MVT::f32, Custom); |
406 | setOperationAction(ISD::FLOG, MVT::f32, Custom); |
407 | setOperationAction(ISD::FLOG10, MVT::f32, Custom); |
408 | setOperationAction(ISD::FEXP, MVT::f32, Custom); |
409 | } |
410 | |
411 | if (Subtarget.hasSPE()) { |
412 | setOperationAction(ISD::FMA , MVT::f64, Expand); |
413 | setOperationAction(ISD::FMA , MVT::f32, Expand); |
414 | } else { |
415 | setOperationAction(ISD::FMA , MVT::f64, Legal); |
416 | setOperationAction(ISD::FMA , MVT::f32, Legal); |
417 | } |
418 | |
419 | if (Subtarget.hasSPE()) |
420 | setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); |
421 | |
422 | setOperationAction(ISD::GET_ROUNDING, MVT::i32, Custom); |
423 | |
424 | // If we're enabling GP optimizations, use hardware square root |
425 | if (!Subtarget.hasFSQRT() && |
426 | !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && |
427 | Subtarget.hasFRE())) |
428 | setOperationAction(ISD::FSQRT, MVT::f64, Expand); |
429 | |
430 | if (!Subtarget.hasFSQRT() && |
431 | !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && |
432 | Subtarget.hasFRES())) |
433 | setOperationAction(ISD::FSQRT, MVT::f32, Expand); |
434 | |
435 | if (Subtarget.hasFCPSGN()) { |
436 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); |
437 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); |
438 | } else { |
439 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); |
440 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); |
441 | } |
442 | |
443 | if (Subtarget.hasFPRND()) { |
444 | setOperationAction(ISD::FFLOOR, MVT::f64, Legal); |
445 | setOperationAction(ISD::FCEIL, MVT::f64, Legal); |
446 | setOperationAction(ISD::FTRUNC, MVT::f64, Legal); |
447 | setOperationAction(ISD::FROUND, MVT::f64, Legal); |
448 | |
449 | setOperationAction(ISD::FFLOOR, MVT::f32, Legal); |
450 | setOperationAction(ISD::FCEIL, MVT::f32, Legal); |
451 | setOperationAction(ISD::FTRUNC, MVT::f32, Legal); |
452 | setOperationAction(ISD::FROUND, MVT::f32, Legal); |
453 | } |
454 | |
455 | // Prior to P10, PowerPC does not have BSWAP, but we can use vector BSWAP |
456 | // instruction xxbrd to speed up scalar BSWAP64. |
457 | if (Subtarget.isISA3_1()) { |
458 | setOperationAction(ISD::BSWAP, MVT::i32, Legal); |
459 | setOperationAction(ISD::BSWAP, MVT::i64, Legal); |
460 | } else { |
461 | setOperationAction(ISD::BSWAP, MVT::i32, Expand); |
462 | setOperationAction( |
463 | ISD::BSWAP, MVT::i64, |
464 | (Subtarget.hasP9Vector() && Subtarget.isPPC64()) ? Custom : Expand); |
465 | } |
466 | |
467 | // CTPOP or CTTZ were introduced in P8/P9 respectively |
468 | if (Subtarget.isISA3_0()) { |
469 | setOperationAction(ISD::CTTZ , MVT::i32 , Legal); |
470 | setOperationAction(ISD::CTTZ , MVT::i64 , Legal); |
471 | } else { |
472 | setOperationAction(ISD::CTTZ , MVT::i32 , Expand); |
473 | setOperationAction(ISD::CTTZ , MVT::i64 , Expand); |
474 | } |
475 | |
476 | if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { |
477 | setOperationAction(ISD::CTPOP, MVT::i32 , Legal); |
478 | setOperationAction(ISD::CTPOP, MVT::i64 , Legal); |
479 | } else { |
480 | setOperationAction(ISD::CTPOP, MVT::i32 , Expand); |
481 | setOperationAction(ISD::CTPOP, MVT::i64 , Expand); |
482 | } |
483 | |
484 | // PowerPC does not have ROTR |
485 | setOperationAction(ISD::ROTR, MVT::i32 , Expand); |
486 | setOperationAction(ISD::ROTR, MVT::i64 , Expand); |
487 | |
488 | if (!Subtarget.useCRBits()) { |
489 | // PowerPC does not have Select |
490 | setOperationAction(ISD::SELECT, MVT::i32, Expand); |
491 | setOperationAction(ISD::SELECT, MVT::i64, Expand); |
492 | setOperationAction(ISD::SELECT, MVT::f32, Expand); |
493 | setOperationAction(ISD::SELECT, MVT::f64, Expand); |
494 | } |
495 | |
496 | // PowerPC wants to turn select_cc of FP into fsel when possible. |
497 | setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); |
498 | setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); |
499 | |
500 | // PowerPC wants to optimize integer setcc a bit |
501 | if (!Subtarget.useCRBits()) |
502 | setOperationAction(ISD::SETCC, MVT::i32, Custom); |
503 | |
504 | if (Subtarget.hasFPU()) { |
505 | setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal); |
506 | setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal); |
507 | setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal); |
508 | |
509 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal); |
510 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal); |
511 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal); |
512 | } |
513 | |
514 | // PowerPC does not have BRCOND which requires SetCC |
515 | if (!Subtarget.useCRBits()) |
516 | setOperationAction(ISD::BRCOND, MVT::Other, Expand); |
517 | |
518 | setOperationAction(ISD::BR_JT, MVT::Other, Expand); |
519 | |
520 | if (Subtarget.hasSPE()) { |
521 | // SPE has built-in conversions |
522 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal); |
523 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal); |
524 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal); |
525 | setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); |
526 | setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); |
527 | setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); |
528 | |
529 | // SPE supports signaling compare of f32/f64. |
530 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal); |
531 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal); |
532 | } else { |
533 | // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. |
534 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); |
535 | setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); |
536 | |
537 | // PowerPC does not have [U|S]INT_TO_FP |
538 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Expand); |
539 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Expand); |
540 | setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); |
541 | setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); |
542 | } |
543 | |
544 | if (Subtarget.hasDirectMove() && isPPC64) { |
545 | setOperationAction(ISD::BITCAST, MVT::f32, Legal); |
546 | setOperationAction(ISD::BITCAST, MVT::i32, Legal); |
547 | setOperationAction(ISD::BITCAST, MVT::i64, Legal); |
548 | setOperationAction(ISD::BITCAST, MVT::f64, Legal); |
549 | if (TM.Options.UnsafeFPMath) { |
550 | setOperationAction(ISD::LRINT, MVT::f64, Legal); |
551 | setOperationAction(ISD::LRINT, MVT::f32, Legal); |
552 | setOperationAction(ISD::LLRINT, MVT::f64, Legal); |
553 | setOperationAction(ISD::LLRINT, MVT::f32, Legal); |
554 | setOperationAction(ISD::LROUND, MVT::f64, Legal); |
555 | setOperationAction(ISD::LROUND, MVT::f32, Legal); |
556 | setOperationAction(ISD::LLROUND, MVT::f64, Legal); |
557 | setOperationAction(ISD::LLROUND, MVT::f32, Legal); |
558 | } |
559 | } else { |
560 | setOperationAction(ISD::BITCAST, MVT::f32, Expand); |
561 | setOperationAction(ISD::BITCAST, MVT::i32, Expand); |
562 | setOperationAction(ISD::BITCAST, MVT::i64, Expand); |
563 | setOperationAction(ISD::BITCAST, MVT::f64, Expand); |
564 | } |
565 | |
566 | // We cannot sextinreg(i1). Expand to shifts. |
567 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); |
568 | |
569 | // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support |
570 | // SjLj exception handling but a light-weight setjmp/longjmp replacement to |
571 | // support continuation, user-level threading, and etc.. As a result, no |
572 | // other SjLj exception interfaces are implemented and please don't build |
573 | // your own exception handling based on them. |
574 | // LLVM/Clang supports zero-cost DWARF exception handling. |
575 | setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); |
576 | setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); |
577 | |
578 | // We want to legalize GlobalAddress and ConstantPool nodes into the |
579 | // appropriate instructions to materialize the address. |
580 | setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); |
581 | setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); |
582 | setOperationAction(ISD::BlockAddress, MVT::i32, Custom); |
583 | setOperationAction(ISD::ConstantPool, MVT::i32, Custom); |
584 | setOperationAction(ISD::JumpTable, MVT::i32, Custom); |
585 | setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); |
586 | setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); |
587 | setOperationAction(ISD::BlockAddress, MVT::i64, Custom); |
588 | setOperationAction(ISD::ConstantPool, MVT::i64, Custom); |
589 | setOperationAction(ISD::JumpTable, MVT::i64, Custom); |
590 | |
591 | // TRAP is legal. |
592 | setOperationAction(ISD::TRAP, MVT::Other, Legal); |
593 | |
594 | // TRAMPOLINE is custom lowered. |
595 | setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); |
596 | setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); |
597 | |
598 | // VASTART needs to be custom lowered to use the VarArgsFrameIndex |
599 | setOperationAction(ISD::VASTART , MVT::Other, Custom); |
600 | |
601 | if (Subtarget.is64BitELFABI()) { |
602 | // VAARG always uses double-word chunks, so promote anything smaller. |
603 | setOperationAction(ISD::VAARG, MVT::i1, Promote); |
604 | AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64); |
605 | setOperationAction(ISD::VAARG, MVT::i8, Promote); |
606 | AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64); |
607 | setOperationAction(ISD::VAARG, MVT::i16, Promote); |
608 | AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64); |
609 | setOperationAction(ISD::VAARG, MVT::i32, Promote); |
610 | AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64); |
611 | setOperationAction(ISD::VAARG, MVT::Other, Expand); |
612 | } else if (Subtarget.is32BitELFABI()) { |
613 | // VAARG is custom lowered with the 32-bit SVR4 ABI. |
614 | setOperationAction(ISD::VAARG, MVT::Other, Custom); |
615 | setOperationAction(ISD::VAARG, MVT::i64, Custom); |
616 | } else |
617 | setOperationAction(ISD::VAARG, MVT::Other, Expand); |
618 | |
619 | // VACOPY is custom lowered with the 32-bit SVR4 ABI. |
620 | if (Subtarget.is32BitELFABI()) |
621 | setOperationAction(ISD::VACOPY , MVT::Other, Custom); |
622 | else |
623 | setOperationAction(ISD::VACOPY , MVT::Other, Expand); |
624 | |
625 | // Use the default implementation. |
626 | setOperationAction(ISD::VAEND , MVT::Other, Expand); |
627 | setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); |
628 | setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); |
629 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); |
630 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); |
631 | setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); |
632 | setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); |
633 | setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); |
634 | setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); |
635 | |
636 | // We want to custom lower some of our intrinsics. |
637 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); |
638 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f64, Custom); |
639 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::ppcf128, Custom); |
640 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); |
641 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f64, Custom); |
642 | |
643 | // To handle counter-based loop conditions. |
644 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); |
645 | |
646 | setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); |
647 | setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); |
648 | setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); |
649 | setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); |
650 | |
651 | // Comparisons that require checking two conditions. |
652 | if (Subtarget.hasSPE()) { |
653 | setCondCodeAction(ISD::SETO, MVT::f32, Expand); |
654 | setCondCodeAction(ISD::SETO, MVT::f64, Expand); |
655 | setCondCodeAction(ISD::SETUO, MVT::f32, Expand); |
656 | setCondCodeAction(ISD::SETUO, MVT::f64, Expand); |
657 | } |
658 | setCondCodeAction(ISD::SETULT, MVT::f32, Expand); |
659 | setCondCodeAction(ISD::SETULT, MVT::f64, Expand); |
660 | setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); |
661 | setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); |
662 | setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); |
663 | setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); |
664 | setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); |
665 | setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); |
666 | setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); |
667 | setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); |
668 | setCondCodeAction(ISD::SETONE, MVT::f32, Expand); |
669 | setCondCodeAction(ISD::SETONE, MVT::f64, Expand); |
670 | |
671 | setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal); |
672 | setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal); |
673 | |
674 | if (Subtarget.has64BitSupport()) { |
675 | // They also have instructions for converting between i64 and fp. |
676 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); |
677 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand); |
678 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom); |
679 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand); |
680 | setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); |
681 | setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); |
682 | setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); |
683 | setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); |
684 | // This is just the low 32 bits of a (signed) fp->i64 conversion. |
685 | // We cannot do this with Promote because i64 is not a legal type. |
686 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); |
687 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); |
688 | |
689 | if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) { |
690 | setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); |
691 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom); |
692 | } |
693 | } else { |
694 | // PowerPC does not have FP_TO_UINT on 32-bit implementations. |
695 | if (Subtarget.hasSPE()) { |
696 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal); |
697 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); |
698 | } else { |
699 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Expand); |
700 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); |
701 | } |
702 | } |
703 | |
704 | // With the instructions enabled under FPCVT, we can do everything. |
705 | if (Subtarget.hasFPCVT()) { |
706 | if (Subtarget.has64BitSupport()) { |
707 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); |
708 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom); |
709 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom); |
710 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom); |
711 | setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); |
712 | setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); |
713 | setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); |
714 | setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); |
715 | } |
716 | |
717 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); |
718 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); |
719 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom); |
720 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom); |
721 | setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); |
722 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); |
723 | setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); |
724 | setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); |
725 | } |
726 | |
727 | if (Subtarget.use64BitRegs()) { |
728 | // 64-bit PowerPC implementations can support i64 types directly |
729 | addRegisterClass(MVT::i64, &PPC::G8RCRegClass); |
730 | // BUILD_PAIR can't be handled natively, and should be expanded to shl/or |
731 | setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); |
732 | // 64-bit PowerPC wants to expand i128 shifts itself. |
733 | setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); |
734 | setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); |
735 | setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); |
736 | } else { |
737 | // 32-bit PowerPC wants to expand i64 shifts itself. |
738 | setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); |
739 | setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); |
740 | setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); |
741 | } |
742 | |
743 | // PowerPC has better expansions for funnel shifts than the generic |
744 | // TargetLowering::expandFunnelShift. |
745 | if (Subtarget.has64BitSupport()) { |
746 | setOperationAction(ISD::FSHL, MVT::i64, Custom); |
747 | setOperationAction(ISD::FSHR, MVT::i64, Custom); |
748 | } |
749 | setOperationAction(ISD::FSHL, MVT::i32, Custom); |
750 | setOperationAction(ISD::FSHR, MVT::i32, Custom); |
751 | |
752 | if (Subtarget.hasVSX()) { |
753 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); |
754 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); |
755 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); |
756 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); |
757 | } |
758 | |
759 | if (Subtarget.hasAltivec()) { |
760 | for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) { |
761 | setOperationAction(ISD::SADDSAT, VT, Legal); |
762 | setOperationAction(ISD::SSUBSAT, VT, Legal); |
763 | setOperationAction(ISD::UADDSAT, VT, Legal); |
764 | setOperationAction(ISD::USUBSAT, VT, Legal); |
765 | } |
766 | // First set operation action for all vector types to expand. Then we |
767 | // will selectively turn on ones that can be effectively codegen'd. |
768 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
769 | // add/sub are legal for all supported vector VT's. |
770 | setOperationAction(ISD::ADD, VT, Legal); |
771 | setOperationAction(ISD::SUB, VT, Legal); |
772 | |
773 | // For v2i64, these are only valid with P8Vector. This is corrected after |
774 | // the loop. |
775 | if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) { |
776 | setOperationAction(ISD::SMAX, VT, Legal); |
777 | setOperationAction(ISD::SMIN, VT, Legal); |
778 | setOperationAction(ISD::UMAX, VT, Legal); |
779 | setOperationAction(ISD::UMIN, VT, Legal); |
780 | } |
781 | else { |
782 | setOperationAction(ISD::SMAX, VT, Expand); |
783 | setOperationAction(ISD::SMIN, VT, Expand); |
784 | setOperationAction(ISD::UMAX, VT, Expand); |
785 | setOperationAction(ISD::UMIN, VT, Expand); |
786 | } |
787 | |
788 | if (Subtarget.hasVSX()) { |
789 | setOperationAction(ISD::FMAXNUM, VT, Legal); |
790 | setOperationAction(ISD::FMINNUM, VT, Legal); |
791 | } |
792 | |
793 | // Vector instructions introduced in P8 |
794 | if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { |
795 | setOperationAction(ISD::CTPOP, VT, Legal); |
796 | setOperationAction(ISD::CTLZ, VT, Legal); |
797 | } |
798 | else { |
799 | setOperationAction(ISD::CTPOP, VT, Expand); |
800 | setOperationAction(ISD::CTLZ, VT, Expand); |
801 | } |
802 | |
803 | // Vector instructions introduced in P9 |
804 | if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) |
805 | setOperationAction(ISD::CTTZ, VT, Legal); |
806 | else |
807 | setOperationAction(ISD::CTTZ, VT, Expand); |
808 | |
809 | // We promote all shuffles to v16i8. |
810 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); |
811 | AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); |
812 | |
813 | // We promote all non-typed operations to v4i32. |
814 | setOperationAction(ISD::AND , VT, Promote); |
815 | AddPromotedToType (ISD::AND , VT, MVT::v4i32); |
816 | setOperationAction(ISD::OR , VT, Promote); |
817 | AddPromotedToType (ISD::OR , VT, MVT::v4i32); |
818 | setOperationAction(ISD::XOR , VT, Promote); |
819 | AddPromotedToType (ISD::XOR , VT, MVT::v4i32); |
820 | setOperationAction(ISD::LOAD , VT, Promote); |
821 | AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); |
822 | setOperationAction(ISD::SELECT, VT, Promote); |
823 | AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); |
824 | setOperationAction(ISD::VSELECT, VT, Legal); |
825 | setOperationAction(ISD::SELECT_CC, VT, Promote); |
826 | AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); |
827 | setOperationAction(ISD::STORE, VT, Promote); |
828 | AddPromotedToType (ISD::STORE, VT, MVT::v4i32); |
829 | |
830 | // No other operations are legal. |
831 | setOperationAction(ISD::MUL , VT, Expand); |
832 | setOperationAction(ISD::SDIV, VT, Expand); |
833 | setOperationAction(ISD::SREM, VT, Expand); |
834 | setOperationAction(ISD::UDIV, VT, Expand); |
835 | setOperationAction(ISD::UREM, VT, Expand); |
836 | setOperationAction(ISD::FDIV, VT, Expand); |
837 | setOperationAction(ISD::FREM, VT, Expand); |
838 | setOperationAction(ISD::FNEG, VT, Expand); |
839 | setOperationAction(ISD::FSQRT, VT, Expand); |
840 | setOperationAction(ISD::FLOG, VT, Expand); |
841 | setOperationAction(ISD::FLOG10, VT, Expand); |
842 | setOperationAction(ISD::FLOG2, VT, Expand); |
843 | setOperationAction(ISD::FEXP, VT, Expand); |
844 | setOperationAction(ISD::FEXP2, VT, Expand); |
845 | setOperationAction(ISD::FSIN, VT, Expand); |
846 | setOperationAction(ISD::FCOS, VT, Expand); |
847 | setOperationAction(ISD::FABS, VT, Expand); |
848 | setOperationAction(ISD::FFLOOR, VT, Expand); |
849 | setOperationAction(ISD::FCEIL, VT, Expand); |
850 | setOperationAction(ISD::FTRUNC, VT, Expand); |
851 | setOperationAction(ISD::FRINT, VT, Expand); |
852 | setOperationAction(ISD::FNEARBYINT, VT, Expand); |
853 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); |
854 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); |
855 | setOperationAction(ISD::BUILD_VECTOR, VT, Expand); |
856 | setOperationAction(ISD::MULHU, VT, Expand); |
857 | setOperationAction(ISD::MULHS, VT, Expand); |
858 | setOperationAction(ISD::UMUL_LOHI, VT, Expand); |
859 | setOperationAction(ISD::SMUL_LOHI, VT, Expand); |
860 | setOperationAction(ISD::UDIVREM, VT, Expand); |
861 | setOperationAction(ISD::SDIVREM, VT, Expand); |
862 | setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); |
863 | setOperationAction(ISD::FPOW, VT, Expand); |
864 | setOperationAction(ISD::BSWAP, VT, Expand); |
865 | setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); |
866 | setOperationAction(ISD::ROTL, VT, Expand); |
867 | setOperationAction(ISD::ROTR, VT, Expand); |
868 | |
869 | for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { |
870 | setTruncStoreAction(VT, InnerVT, Expand); |
871 | setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); |
872 | setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); |
873 | setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); |
874 | } |
875 | } |
876 | setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand); |
877 | if (!Subtarget.hasP8Vector()) { |
878 | setOperationAction(ISD::SMAX, MVT::v2i64, Expand); |
879 | setOperationAction(ISD::SMIN, MVT::v2i64, Expand); |
880 | setOperationAction(ISD::UMAX, MVT::v2i64, Expand); |
881 | setOperationAction(ISD::UMIN, MVT::v2i64, Expand); |
882 | } |
883 | |
884 | // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle |
885 | // with merges, splats, etc. |
886 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); |
887 | |
888 | // Vector truncates to sub-word integer that fit in an Altivec/VSX register |
889 | // are cheap, so handle them before they get expanded to scalar. |
890 | setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom); |
891 | setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom); |
892 | setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom); |
893 | setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom); |
894 | setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom); |
895 | |
896 | setOperationAction(ISD::AND , MVT::v4i32, Legal); |
897 | setOperationAction(ISD::OR , MVT::v4i32, Legal); |
898 | setOperationAction(ISD::XOR , MVT::v4i32, Legal); |
899 | setOperationAction(ISD::LOAD , MVT::v4i32, Legal); |
900 | setOperationAction(ISD::SELECT, MVT::v4i32, |
901 | Subtarget.useCRBits() ? Legal : Expand); |
902 | setOperationAction(ISD::STORE , MVT::v4i32, Legal); |
903 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal); |
904 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal); |
905 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal); |
906 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal); |
907 | setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); |
908 | setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); |
909 | setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); |
910 | setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); |
911 | setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); |
912 | setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); |
913 | setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); |
914 | setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); |
915 | |
916 | // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8. |
917 | setOperationAction(ISD::ROTL, MVT::v1i128, Custom); |
918 | // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w). |
919 | if (Subtarget.hasAltivec()) |
920 | for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8}) |
921 | setOperationAction(ISD::ROTL, VT, Legal); |
922 | // With hasP8Altivec set, we can lower ISD::ROTL to vrld. |
923 | if (Subtarget.hasP8Altivec()) |
924 | setOperationAction(ISD::ROTL, MVT::v2i64, Legal); |
925 | |
926 | addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); |
927 | addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); |
928 | addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); |
929 | addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); |
930 | |
931 | setOperationAction(ISD::MUL, MVT::v4f32, Legal); |
932 | setOperationAction(ISD::FMA, MVT::v4f32, Legal); |
933 | |
934 | if (Subtarget.hasVSX()) { |
935 | setOperationAction(ISD::FDIV, MVT::v4f32, Legal); |
936 | setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); |
937 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); |
938 | } |
939 | |
940 | if (Subtarget.hasP8Altivec()) |
941 | setOperationAction(ISD::MUL, MVT::v4i32, Legal); |
942 | else |
943 | setOperationAction(ISD::MUL, MVT::v4i32, Custom); |
944 | |
945 | if (Subtarget.isISA3_1()) { |
946 | setOperationAction(ISD::MUL, MVT::v2i64, Legal); |
947 | setOperationAction(ISD::MULHS, MVT::v2i64, Legal); |
948 | setOperationAction(ISD::MULHU, MVT::v2i64, Legal); |
949 | setOperationAction(ISD::MULHS, MVT::v4i32, Legal); |
950 | setOperationAction(ISD::MULHU, MVT::v4i32, Legal); |
951 | setOperationAction(ISD::UDIV, MVT::v2i64, Legal); |
952 | setOperationAction(ISD::SDIV, MVT::v2i64, Legal); |
953 | setOperationAction(ISD::UDIV, MVT::v4i32, Legal); |
954 | setOperationAction(ISD::SDIV, MVT::v4i32, Legal); |
955 | setOperationAction(ISD::UREM, MVT::v2i64, Legal); |
956 | setOperationAction(ISD::SREM, MVT::v2i64, Legal); |
957 | setOperationAction(ISD::UREM, MVT::v4i32, Legal); |
958 | setOperationAction(ISD::SREM, MVT::v4i32, Legal); |
959 | setOperationAction(ISD::UREM, MVT::v1i128, Legal); |
960 | setOperationAction(ISD::SREM, MVT::v1i128, Legal); |
961 | setOperationAction(ISD::UDIV, MVT::v1i128, Legal); |
962 | setOperationAction(ISD::SDIV, MVT::v1i128, Legal); |
963 | setOperationAction(ISD::ROTL, MVT::v1i128, Legal); |
964 | } |
965 | |
966 | setOperationAction(ISD::MUL, MVT::v8i16, Legal); |
967 | setOperationAction(ISD::MUL, MVT::v16i8, Custom); |
968 | |
969 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); |
970 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); |
971 | |
972 | setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); |
973 | setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); |
974 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); |
975 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); |
976 | |
977 | // Altivec does not contain unordered floating-point compare instructions |
978 | setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); |
979 | setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); |
980 | setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); |
981 | setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); |
982 | |
983 | if (Subtarget.hasVSX()) { |
984 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); |
985 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); |
986 | if (Subtarget.hasP8Vector()) { |
987 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); |
988 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); |
989 | } |
990 | if (Subtarget.hasDirectMove() && isPPC64) { |
991 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); |
992 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); |
993 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); |
994 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); |
995 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); |
996 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); |
997 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); |
998 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); |
999 | } |
1000 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); |
1001 | |
1002 | // The nearbyint variants are not allowed to raise the inexact exception |
1003 | // so we can only code-gen them with unsafe math. |
1004 | if (TM.Options.UnsafeFPMath) { |
1005 | setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); |
1006 | setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); |
1007 | } |
1008 | |
1009 | setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); |
1010 | setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); |
1011 | setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); |
1012 | setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); |
1013 | setOperationAction(ISD::FRINT, MVT::v2f64, Legal); |
1014 | setOperationAction(ISD::FROUND, MVT::v2f64, Legal); |
1015 | setOperationAction(ISD::FROUND, MVT::f64, Legal); |
1016 | setOperationAction(ISD::FRINT, MVT::f64, Legal); |
1017 | |
1018 | setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); |
1019 | setOperationAction(ISD::FRINT, MVT::v4f32, Legal); |
1020 | setOperationAction(ISD::FROUND, MVT::v4f32, Legal); |
1021 | setOperationAction(ISD::FROUND, MVT::f32, Legal); |
1022 | setOperationAction(ISD::FRINT, MVT::f32, Legal); |
1023 | |
1024 | setOperationAction(ISD::MUL, MVT::v2f64, Legal); |
1025 | setOperationAction(ISD::FMA, MVT::v2f64, Legal); |
1026 | |
1027 | setOperationAction(ISD::FDIV, MVT::v2f64, Legal); |
1028 | setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); |
1029 | |
1030 | // Share the Altivec comparison restrictions. |
1031 | setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); |
1032 | setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); |
1033 | setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); |
1034 | setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); |
1035 | |
1036 | setOperationAction(ISD::LOAD, MVT::v2f64, Legal); |
1037 | setOperationAction(ISD::STORE, MVT::v2f64, Legal); |
1038 | |
1039 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); |
1040 | |
1041 | if (Subtarget.hasP8Vector()) |
1042 | addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); |
1043 | |
1044 | addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); |
1045 | |
1046 | addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); |
1047 | addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); |
1048 | addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); |
1049 | |
1050 | if (Subtarget.hasP8Altivec()) { |
1051 | setOperationAction(ISD::SHL, MVT::v2i64, Legal); |
1052 | setOperationAction(ISD::SRA, MVT::v2i64, Legal); |
1053 | setOperationAction(ISD::SRL, MVT::v2i64, Legal); |
1054 | |
1055 | // 128 bit shifts can be accomplished via 3 instructions for SHL and |
1056 | // SRL, but not for SRA because of the instructions available: |
1057 | // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth |
1058 | // doing |
1059 | setOperationAction(ISD::SHL, MVT::v1i128, Expand); |
1060 | setOperationAction(ISD::SRL, MVT::v1i128, Expand); |
1061 | setOperationAction(ISD::SRA, MVT::v1i128, Expand); |
1062 | |
1063 | setOperationAction(ISD::SETCC, MVT::v2i64, Legal); |
1064 | } |
1065 | else { |
1066 | setOperationAction(ISD::SHL, MVT::v2i64, Expand); |
1067 | setOperationAction(ISD::SRA, MVT::v2i64, Expand); |
1068 | setOperationAction(ISD::SRL, MVT::v2i64, Expand); |
1069 | |
1070 | setOperationAction(ISD::SETCC, MVT::v2i64, Custom); |
1071 | |
1072 | // VSX v2i64 only supports non-arithmetic operations. |
1073 | setOperationAction(ISD::ADD, MVT::v2i64, Expand); |
1074 | setOperationAction(ISD::SUB, MVT::v2i64, Expand); |
1075 | } |
1076 | |
1077 | if (Subtarget.isISA3_1()) |
1078 | setOperationAction(ISD::SETCC, MVT::v1i128, Legal); |
1079 | else |
1080 | setOperationAction(ISD::SETCC, MVT::v1i128, Expand); |
1081 | |
1082 | setOperationAction(ISD::LOAD, MVT::v2i64, Promote); |
1083 | AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); |
1084 | setOperationAction(ISD::STORE, MVT::v2i64, Promote); |
1085 | AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); |
1086 | |
1087 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); |
1088 | |
1089 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal); |
1090 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal); |
1091 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal); |
1092 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal); |
1093 | setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); |
1094 | setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); |
1095 | setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); |
1096 | setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); |
1097 | |
1098 | // Custom handling for partial vectors of integers converted to |
1099 | // floating point. We already have optimal handling for v2i32 through |
1100 | // the DAG combine, so those aren't necessary. |
1101 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i8, Custom); |
1102 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i8, Custom); |
1103 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i16, Custom); |
1104 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i16, Custom); |
1105 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i8, Custom); |
1106 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i8, Custom); |
1107 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i16, Custom); |
1108 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i16, Custom); |
1109 | setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom); |
1110 | setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom); |
1111 | setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom); |
1112 | setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); |
1113 | setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom); |
1114 | setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom); |
1115 | setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom); |
1116 | setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); |
1117 | |
1118 | setOperationAction(ISD::FNEG, MVT::v4f32, Legal); |
1119 | setOperationAction(ISD::FNEG, MVT::v2f64, Legal); |
1120 | setOperationAction(ISD::FABS, MVT::v4f32, Legal); |
1121 | setOperationAction(ISD::FABS, MVT::v2f64, Legal); |
1122 | setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); |
1123 | setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal); |
1124 | |
1125 | setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); |
1126 | setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); |
1127 | |
1128 | // Handle constrained floating-point operations of vector. |
1129 | // The predictor is `hasVSX` because altivec instruction has |
1130 | // no exception but VSX vector instruction has. |
1131 | setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal); |
1132 | setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal); |
1133 | setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal); |
1134 | setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal); |
1135 | setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal); |
1136 | setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal); |
1137 | setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal); |
1138 | setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal); |
1139 | setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal); |
1140 | setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal); |
1141 | setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal); |
1142 | setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal); |
1143 | setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal); |
1144 | |
1145 | setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal); |
1146 | setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal); |
1147 | setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal); |
1148 | setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal); |
1149 | setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal); |
1150 | setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal); |
1151 | setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal); |
1152 | setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal); |
1153 | setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal); |
1154 | setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal); |
1155 | setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal); |
1156 | setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal); |
1157 | setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal); |
1158 | |
1159 | addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); |
1160 | addRegisterClass(MVT::f128, &PPC::VRRCRegClass); |
1161 | |
1162 | for (MVT FPT : MVT::fp_valuetypes()) |
1163 | setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand); |
1164 | |
1165 | // Expand the SELECT to SELECT_CC |
1166 | setOperationAction(ISD::SELECT, MVT::f128, Expand); |
1167 | |
1168 | setTruncStoreAction(MVT::f128, MVT::f64, Expand); |
1169 | setTruncStoreAction(MVT::f128, MVT::f32, Expand); |
1170 | |
1171 | // No implementation for these ops for PowerPC. |
1172 | setOperationAction(ISD::FSIN, MVT::f128, Expand); |
1173 | setOperationAction(ISD::FCOS, MVT::f128, Expand); |
1174 | setOperationAction(ISD::FPOW, MVT::f128, Expand); |
1175 | setOperationAction(ISD::FPOWI, MVT::f128, Expand); |
1176 | setOperationAction(ISD::FREM, MVT::f128, Expand); |
1177 | } |
1178 | |
1179 | if (Subtarget.hasP8Altivec()) { |
1180 | addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); |
1181 | addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); |
1182 | } |
1183 | |
1184 | if (Subtarget.hasP9Vector()) { |
1185 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); |
1186 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); |
1187 | |
1188 | // Test data class instructions store results in CR bits. |
1189 | if (Subtarget.useCRBits()) { |
1190 | setOperationAction(ISD::IS_FPCLASS, MVT::f32, Custom); |
1191 | setOperationAction(ISD::IS_FPCLASS, MVT::f64, Custom); |
1192 | setOperationAction(ISD::IS_FPCLASS, MVT::f128, Custom); |
1193 | } |
1194 | |
1195 | // 128 bit shifts can be accomplished via 3 instructions for SHL and |
1196 | // SRL, but not for SRA because of the instructions available: |
1197 | // VS{RL} and VS{RL}O. |
1198 | setOperationAction(ISD::SHL, MVT::v1i128, Legal); |
1199 | setOperationAction(ISD::SRL, MVT::v1i128, Legal); |
1200 | setOperationAction(ISD::SRA, MVT::v1i128, Expand); |
1201 | |
1202 | setOperationAction(ISD::FADD, MVT::f128, Legal); |
1203 | setOperationAction(ISD::FSUB, MVT::f128, Legal); |
1204 | setOperationAction(ISD::FDIV, MVT::f128, Legal); |
1205 | setOperationAction(ISD::FMUL, MVT::f128, Legal); |
1206 | setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); |
1207 | |
1208 | setOperationAction(ISD::FMA, MVT::f128, Legal); |
1209 | setCondCodeAction(ISD::SETULT, MVT::f128, Expand); |
1210 | setCondCodeAction(ISD::SETUGT, MVT::f128, Expand); |
1211 | setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand); |
1212 | setCondCodeAction(ISD::SETOGE, MVT::f128, Expand); |
1213 | setCondCodeAction(ISD::SETOLE, MVT::f128, Expand); |
1214 | setCondCodeAction(ISD::SETONE, MVT::f128, Expand); |
1215 | |
1216 | setOperationAction(ISD::FTRUNC, MVT::f128, Legal); |
1217 | setOperationAction(ISD::FRINT, MVT::f128, Legal); |
1218 | setOperationAction(ISD::FFLOOR, MVT::f128, Legal); |
1219 | setOperationAction(ISD::FCEIL, MVT::f128, Legal); |
1220 | setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal); |
1221 | setOperationAction(ISD::FROUND, MVT::f128, Legal); |
1222 | |
1223 | setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); |
1224 | setOperationAction(ISD::FP_ROUND, MVT::f32, Legal); |
1225 | setOperationAction(ISD::BITCAST, MVT::i128, Custom); |
1226 | |
1227 | // Handle constrained floating-point operations of fp128 |
1228 | setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal); |
1229 | setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal); |
1230 | setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal); |
1231 | setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal); |
1232 | setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal); |
1233 | setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal); |
1234 | setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal); |
1235 | setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal); |
1236 | setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); |
1237 | setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal); |
1238 | setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal); |
1239 | setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal); |
1240 | setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal); |
1241 | setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal); |
1242 | setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal); |
1243 | setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); |
1244 | setOperationAction(ISD::BSWAP, MVT::v8i16, Legal); |
1245 | setOperationAction(ISD::BSWAP, MVT::v4i32, Legal); |
1246 | setOperationAction(ISD::BSWAP, MVT::v2i64, Legal); |
1247 | setOperationAction(ISD::BSWAP, MVT::v1i128, Legal); |
1248 | } else if (Subtarget.hasVSX()) { |
1249 | setOperationAction(ISD::LOAD, MVT::f128, Promote); |
1250 | setOperationAction(ISD::STORE, MVT::f128, Promote); |
1251 | |
1252 | AddPromotedToType(ISD::LOAD, MVT::f128, MVT::v4i32); |
1253 | AddPromotedToType(ISD::STORE, MVT::f128, MVT::v4i32); |
1254 | |
1255 | // Set FADD/FSUB as libcall to avoid the legalizer to expand the |
1256 | // fp_to_uint and int_to_fp. |
1257 | setOperationAction(ISD::FADD, MVT::f128, LibCall); |
1258 | setOperationAction(ISD::FSUB, MVT::f128, LibCall); |
1259 | |
1260 | setOperationAction(ISD::FMUL, MVT::f128, Expand); |
1261 | setOperationAction(ISD::FDIV, MVT::f128, Expand); |
1262 | setOperationAction(ISD::FNEG, MVT::f128, Expand); |
1263 | setOperationAction(ISD::FABS, MVT::f128, Expand); |
1264 | setOperationAction(ISD::FSQRT, MVT::f128, Expand); |
1265 | setOperationAction(ISD::FMA, MVT::f128, Expand); |
1266 | setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); |
1267 | |
1268 | // Expand the fp_extend if the target type is fp128. |
1269 | setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand); |
1270 | setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Expand); |
1271 | |
1272 | // Expand the fp_round if the source type is fp128. |
1273 | for (MVT VT : {MVT::f32, MVT::f64}) { |
1274 | setOperationAction(ISD::FP_ROUND, VT, Custom); |
1275 | setOperationAction(ISD::STRICT_FP_ROUND, VT, Custom); |
1276 | } |
1277 | |
1278 | setOperationAction(ISD::SETCC, MVT::f128, Custom); |
1279 | setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom); |
1280 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom); |
1281 | setOperationAction(ISD::BR_CC, MVT::f128, Expand); |
1282 | |
1283 | // Lower following f128 select_cc pattern: |
1284 | // select_cc x, y, tv, fv, cc -> select_cc (setcc x, y, cc), 0, tv, fv, NE |
1285 | setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); |
1286 | |
1287 | // We need to handle f128 SELECT_CC with integer result type. |
1288 | setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); |
1289 | setOperationAction(ISD::SELECT_CC, MVT::i64, isPPC64 ? Custom : Expand); |
1290 | } |
1291 | |
1292 | if (Subtarget.hasP9Altivec()) { |
1293 | if (Subtarget.isISA3_1()) { |
1294 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal); |
1295 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Legal); |
1296 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Legal); |
1297 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal); |
1298 | } else { |
1299 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); |
1300 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); |
1301 | } |
1302 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal); |
1303 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal); |
1304 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal); |
1305 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal); |
1306 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal); |
1307 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); |
1308 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); |
1309 | |
1310 | setOperationAction(ISD::ABDU, MVT::v16i8, Legal); |
1311 | setOperationAction(ISD::ABDU, MVT::v8i16, Legal); |
1312 | setOperationAction(ISD::ABDU, MVT::v4i32, Legal); |
1313 | setOperationAction(ISD::ABDS, MVT::v4i32, Legal); |
1314 | } |
1315 | |
1316 | if (Subtarget.hasP10Vector()) { |
1317 | setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); |
1318 | } |
1319 | } |
1320 | |
1321 | if (Subtarget.pairedVectorMemops()) { |
1322 | addRegisterClass(MVT::v256i1, &PPC::VSRpRCRegClass); |
1323 | setOperationAction(ISD::LOAD, MVT::v256i1, Custom); |
1324 | setOperationAction(ISD::STORE, MVT::v256i1, Custom); |
1325 | } |
1326 | if (Subtarget.hasMMA()) { |
1327 | if (Subtarget.isISAFuture()) |
1328 | addRegisterClass(MVT::v512i1, &PPC::WACCRCRegClass); |
1329 | else |
1330 | addRegisterClass(MVT::v512i1, &PPC::UACCRCRegClass); |
1331 | setOperationAction(ISD::LOAD, MVT::v512i1, Custom); |
1332 | setOperationAction(ISD::STORE, MVT::v512i1, Custom); |
1333 | setOperationAction(ISD::BUILD_VECTOR, MVT::v512i1, Custom); |
1334 | } |
1335 | |
1336 | if (Subtarget.has64BitSupport()) |
1337 | setOperationAction(ISD::PREFETCH, MVT::Other, Legal); |
1338 | |
1339 | if (Subtarget.isISA3_1()) |
1340 | setOperationAction(ISD::SRA, MVT::v1i128, Legal); |
1341 | |
1342 | setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); |
1343 | |
1344 | if (!isPPC64) { |
1345 | setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); |
1346 | setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); |
1347 | } |
1348 | |
1349 | if (shouldInlineQuadwordAtomics()) { |
1350 | setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom); |
1351 | setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom); |
1352 | setOperationAction(ISD::INTRINSIC_VOID, MVT::i128, Custom); |
1353 | } |
1354 | |
1355 | setBooleanContents(ZeroOrOneBooleanContent); |
1356 | |
1357 | if (Subtarget.hasAltivec()) { |
1358 | // Altivec instructions set fields to all zeros or all ones. |
1359 | setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); |
1360 | } |
1361 | |
1362 | setLibcallName(RTLIB::MULO_I128, nullptr); |
1363 | if (!isPPC64) { |
1364 | // These libcalls are not available in 32-bit. |
1365 | setLibcallName(RTLIB::SHL_I128, nullptr); |
1366 | setLibcallName(RTLIB::SRL_I128, nullptr); |
1367 | setLibcallName(RTLIB::SRA_I128, nullptr); |
1368 | setLibcallName(RTLIB::MUL_I128, nullptr); |
1369 | setLibcallName(RTLIB::MULO_I64, nullptr); |
1370 | } |
1371 | |
1372 | if (!isPPC64) |
1373 | setMaxAtomicSizeInBitsSupported(32); |
1374 | else if (shouldInlineQuadwordAtomics()) |
1375 | setMaxAtomicSizeInBitsSupported(128); |
1376 | else |
1377 | setMaxAtomicSizeInBitsSupported(64); |
1378 | |
1379 | setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); |
1380 | |
1381 | // We have target-specific dag combine patterns for the following nodes: |
1382 | setTargetDAGCombine({ISD::ADD, ISD::SHL, ISD::SRA, ISD::SRL, ISD::MUL, |
1383 | ISD::FMA, ISD::SINT_TO_FP, ISD::BUILD_VECTOR}); |
1384 | if (Subtarget.hasFPCVT()) |
1385 | setTargetDAGCombine(ISD::UINT_TO_FP); |
1386 | setTargetDAGCombine({ISD::LOAD, ISD::STORE, ISD::BR_CC}); |
1387 | if (Subtarget.useCRBits()) |
1388 | setTargetDAGCombine(ISD::BRCOND); |
1389 | setTargetDAGCombine({ISD::BSWAP, ISD::INTRINSIC_WO_CHAIN, |
1390 | ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID}); |
1391 | |
1392 | setTargetDAGCombine({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, ISD::ANY_EXTEND}); |
1393 | |
1394 | setTargetDAGCombine({ISD::TRUNCATE, ISD::VECTOR_SHUFFLE}); |
1395 | |
1396 | if (Subtarget.useCRBits()) { |
1397 | setTargetDAGCombine({ISD::TRUNCATE, ISD::SETCC, ISD::SELECT_CC}); |
1398 | } |
1399 | |
1400 | setLibcallName(RTLIB::LOG_F128, "logf128"); |
1401 | setLibcallName(RTLIB::LOG2_F128, "log2f128"); |
1402 | setLibcallName(RTLIB::LOG10_F128, "log10f128"); |
1403 | setLibcallName(RTLIB::EXP_F128, "expf128"); |
1404 | setLibcallName(RTLIB::EXP2_F128, "exp2f128"); |
1405 | setLibcallName(RTLIB::SIN_F128, "sinf128"); |
1406 | setLibcallName(RTLIB::COS_F128, "cosf128"); |
1407 | setLibcallName(RTLIB::POW_F128, "powf128"); |
1408 | setLibcallName(RTLIB::FMIN_F128, "fminf128"); |
1409 | setLibcallName(RTLIB::FMAX_F128, "fmaxf128"); |
1410 | setLibcallName(RTLIB::REM_F128, "fmodf128"); |
1411 | setLibcallName(RTLIB::SQRT_F128, "sqrtf128"); |
1412 | setLibcallName(RTLIB::CEIL_F128, "ceilf128"); |
1413 | setLibcallName(RTLIB::FLOOR_F128, "floorf128"); |
1414 | setLibcallName(RTLIB::TRUNC_F128, "truncf128"); |
1415 | setLibcallName(RTLIB::ROUND_F128, "roundf128"); |
1416 | setLibcallName(RTLIB::LROUND_F128, "lroundf128"); |
1417 | setLibcallName(RTLIB::LLROUND_F128, "llroundf128"); |
1418 | setLibcallName(RTLIB::RINT_F128, "rintf128"); |
1419 | setLibcallName(RTLIB::LRINT_F128, "lrintf128"); |
1420 | setLibcallName(RTLIB::LLRINT_F128, "llrintf128"); |
1421 | setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintf128"); |
1422 | setLibcallName(RTLIB::FMA_F128, "fmaf128"); |
1423 | |
1424 | if (Subtarget.isAIXABI()) { |
1425 | setLibcallName(RTLIB::MEMCPY, isPPC64 ? "___memmove64" : "___memmove"); |
1426 | setLibcallName(RTLIB::MEMMOVE, isPPC64 ? "___memmove64" : "___memmove"); |
1427 | setLibcallName(RTLIB::MEMSET, isPPC64 ? "___memset64" : "___memset"); |
1428 | setLibcallName(RTLIB::BZERO, isPPC64 ? "___bzero64" : "___bzero"); |
1429 | } |
1430 | |
1431 | // With 32 condition bits, we don't need to sink (and duplicate) compares |
1432 | // aggressively in CodeGenPrep. |
1433 | if (Subtarget.useCRBits()) { |
1434 | setHasMultipleConditionRegisters(); |
1435 | setJumpIsExpensive(); |
1436 | } |
1437 | |
1438 | setMinFunctionAlignment(Align(4)); |
1439 | |
1440 | switch (Subtarget.getCPUDirective()) { |
1441 | default: break; |
1442 | case PPC::DIR_970: |
1443 | case PPC::DIR_A2: |
1444 | case PPC::DIR_E500: |
1445 | case PPC::DIR_E500mc: |
1446 | case PPC::DIR_E5500: |
1447 | case PPC::DIR_PWR4: |
1448 | case PPC::DIR_PWR5: |
1449 | case PPC::DIR_PWR5X: |
1450 | case PPC::DIR_PWR6: |
1451 | case PPC::DIR_PWR6X: |
1452 | case PPC::DIR_PWR7: |
1453 | case PPC::DIR_PWR8: |
1454 | case PPC::DIR_PWR9: |
1455 | case PPC::DIR_PWR10: |
1456 | case PPC::DIR_PWR_FUTURE: |
1457 | setPrefLoopAlignment(Align(16)); |
1458 | setPrefFunctionAlignment(Align(16)); |
1459 | break; |
1460 | } |
1461 | |
1462 | if (Subtarget.enableMachineScheduler()) |
1463 | setSchedulingPreference(Sched::Source); |
1464 | else |
1465 | setSchedulingPreference(Sched::Hybrid); |
1466 | |
1467 | computeRegisterProperties(STI.getRegisterInfo()); |
1468 | |
1469 | // The Freescale cores do better with aggressive inlining of memcpy and |
1470 | // friends. GCC uses same threshold of 128 bytes (= 32 word stores). |
1471 | if (Subtarget.getCPUDirective() == PPC::DIR_E500mc || |
1472 | Subtarget.getCPUDirective() == PPC::DIR_E5500) { |
1473 | MaxStoresPerMemset = 32; |
1474 | MaxStoresPerMemsetOptSize = 16; |
1475 | MaxStoresPerMemcpy = 32; |
1476 | MaxStoresPerMemcpyOptSize = 8; |
1477 | MaxStoresPerMemmove = 32; |
1478 | MaxStoresPerMemmoveOptSize = 8; |
1479 | } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) { |
1480 | // The A2 also benefits from (very) aggressive inlining of memcpy and |
1481 | // friends. The overhead of a the function call, even when warm, can be |
1482 | // over one hundred cycles. |
1483 | MaxStoresPerMemset = 128; |
1484 | MaxStoresPerMemcpy = 128; |
1485 | MaxStoresPerMemmove = 128; |
1486 | MaxLoadsPerMemcmp = 128; |
1487 | } else { |
1488 | MaxLoadsPerMemcmp = 8; |
1489 | MaxLoadsPerMemcmpOptSize = 4; |
1490 | } |
1491 | |
1492 | IsStrictFPEnabled = true; |
1493 | |
1494 | // Let the subtarget (CPU) decide if a predictable select is more expensive |
1495 | // than the corresponding branch. This information is used in CGP to decide |
1496 | // when to convert selects into branches. |
1497 | PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive(); |
1498 | } |
1499 | |
1500 | // *********************************** NOTE ************************************ |
1501 | // For selecting load and store instructions, the addressing modes are defined |
1502 | // as ComplexPatterns in PPCInstrInfo.td, which are then utilized in the TD |
1503 | // patterns to match the load the store instructions. |
1504 | // |
1505 | // The TD definitions for the addressing modes correspond to their respective |
1506 | // Select<AddrMode>Form() function in PPCISelDAGToDAG.cpp. These functions rely |
1507 | // on SelectOptimalAddrMode(), which calls computeMOFlags() to compute the |
1508 | // address mode flags of a particular node. Afterwards, the computed address |
1509 | // flags are passed into getAddrModeForFlags() in order to retrieve the optimal |
1510 | // addressing mode. SelectOptimalAddrMode() then sets the Base and Displacement |
1511 | // accordingly, based on the preferred addressing mode. |
1512 | // |
1513 | // Within PPCISelLowering.h, there are two enums: MemOpFlags and AddrMode. |
1514 | // MemOpFlags contains all the possible flags that can be used to compute the |
1515 | // optimal addressing mode for load and store instructions. |
1516 | // AddrMode contains all the possible load and store addressing modes available |
1517 | // on Power (such as DForm, DSForm, DQForm, XForm, etc.) |
1518 | // |
1519 | // When adding new load and store instructions, it is possible that new address |
1520 | // flags may need to be added into MemOpFlags, and a new addressing mode will |
1521 | // need to be added to AddrMode. An entry of the new addressing mode (consisting |
1522 | // of the minimal and main distinguishing address flags for the new load/store |
1523 | // instructions) will need to be added into initializeAddrModeMap() below. |
1524 | // Finally, when adding new addressing modes, the getAddrModeForFlags() will |
1525 | // need to be updated to account for selecting the optimal addressing mode. |
1526 | // ***************************************************************************** |
1527 | /// Initialize the map that relates the different addressing modes of the load |
1528 | /// and store instructions to a set of flags. This ensures the load/store |
1529 | /// instruction is correctly matched during instruction selection. |
1530 | void PPCTargetLowering::initializeAddrModeMap() { |
1531 | AddrModesMap[PPC::AM_DForm] = { |
1532 | // LWZ, STW |
1533 | PPC::MOF_ZExt | PPC::MOF_RPlusSImm16 | PPC::MOF_WordInt, |
1534 | PPC::MOF_ZExt | PPC::MOF_RPlusLo | PPC::MOF_WordInt, |
1535 | PPC::MOF_ZExt | PPC::MOF_NotAddNorCst | PPC::MOF_WordInt, |
1536 | PPC::MOF_ZExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_WordInt, |
1537 | // LBZ, LHZ, STB, STH |
1538 | PPC::MOF_ZExt | PPC::MOF_RPlusSImm16 | PPC::MOF_SubWordInt, |
1539 | PPC::MOF_ZExt | PPC::MOF_RPlusLo | PPC::MOF_SubWordInt, |
1540 | PPC::MOF_ZExt | PPC::MOF_NotAddNorCst | PPC::MOF_SubWordInt, |
1541 | PPC::MOF_ZExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_SubWordInt, |
1542 | // LHA |
1543 | PPC::MOF_SExt | PPC::MOF_RPlusSImm16 | PPC::MOF_SubWordInt, |
1544 | PPC::MOF_SExt | PPC::MOF_RPlusLo | PPC::MOF_SubWordInt, |
1545 | PPC::MOF_SExt | PPC::MOF_NotAddNorCst | PPC::MOF_SubWordInt, |
1546 | PPC::MOF_SExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_SubWordInt, |
1547 | // LFS, LFD, STFS, STFD |
1548 | PPC::MOF_RPlusSImm16 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9, |
1549 | PPC::MOF_RPlusLo | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9, |
1550 | PPC::MOF_NotAddNorCst | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9, |
1551 | PPC::MOF_AddrIsSImm32 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9, |
1552 | }; |
1553 | AddrModesMap[PPC::AM_DSForm] = { |
1554 | // LWA |
1555 | PPC::MOF_SExt | PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_WordInt, |
1556 | PPC::MOF_SExt | PPC::MOF_NotAddNorCst | PPC::MOF_WordInt, |
1557 | PPC::MOF_SExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_WordInt, |
1558 | // LD, STD |
1559 | PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_DoubleWordInt, |
1560 | PPC::MOF_NotAddNorCst | PPC::MOF_DoubleWordInt, |
1561 | PPC::MOF_AddrIsSImm32 | PPC::MOF_DoubleWordInt, |
1562 | // DFLOADf32, DFLOADf64, DSTOREf32, DSTOREf64 |
1563 | PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9, |
1564 | PPC::MOF_NotAddNorCst | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9, |
1565 | PPC::MOF_AddrIsSImm32 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9, |
1566 | }; |
1567 | AddrModesMap[PPC::AM_DQForm] = { |
1568 | // LXV, STXV |
1569 | PPC::MOF_RPlusSImm16Mult16 | PPC::MOF_Vector | PPC::MOF_SubtargetP9, |
1570 | PPC::MOF_NotAddNorCst | PPC::MOF_Vector | PPC::MOF_SubtargetP9, |
1571 | PPC::MOF_AddrIsSImm32 | PPC::MOF_Vector | PPC::MOF_SubtargetP9, |
1572 | }; |
1573 | AddrModesMap[PPC::AM_PrefixDForm] = {PPC::MOF_RPlusSImm34 | |
1574 | PPC::MOF_SubtargetP10}; |
1575 | // TODO: Add mapping for quadword load/store. |
1576 | } |
1577 | |
1578 | /// getMaxByValAlign - Helper for getByValTypeAlignment to determine |
1579 | /// the desired ByVal argument alignment. |
1580 | static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) { |
1581 | if (MaxAlign == MaxMaxAlign) |
1582 | return; |
1583 | if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { |
1584 | if (MaxMaxAlign >= 32 && |
1585 | VTy->getPrimitiveSizeInBits().getFixedValue() >= 256) |
1586 | MaxAlign = Align(32); |
1587 | else if (VTy->getPrimitiveSizeInBits().getFixedValue() >= 128 && |
1588 | MaxAlign < 16) |
1589 | MaxAlign = Align(16); |
1590 | } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { |
1591 | Align EltAlign; |
1592 | getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); |
1593 | if (EltAlign > MaxAlign) |
1594 | MaxAlign = EltAlign; |
1595 | } else if (StructType *STy = dyn_cast<StructType>(Ty)) { |
1596 | for (auto *EltTy : STy->elements()) { |
1597 | Align EltAlign; |
1598 | getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); |
1599 | if (EltAlign > MaxAlign) |
1600 | MaxAlign = EltAlign; |
1601 | if (MaxAlign == MaxMaxAlign) |
1602 | break; |
1603 | } |
1604 | } |
1605 | } |
1606 | |
1607 | /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate |
1608 | /// function arguments in the caller parameter area. |
1609 | uint64_t PPCTargetLowering::getByValTypeAlignment(Type *Ty, |
1610 | const DataLayout &DL) const { |
1611 | // 16byte and wider vectors are passed on 16byte boundary. |
1612 | // The rest is 8 on PPC64 and 4 on PPC32 boundary. |
1613 | Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4); |
1614 | if (Subtarget.hasAltivec()) |
1615 | getMaxByValAlign(Ty, Alignment, Align(16)); |
1616 | return Alignment.value(); |
1617 | } |
1618 | |
1619 | bool PPCTargetLowering::useSoftFloat() const { |
1620 | return Subtarget.useSoftFloat(); |
1621 | } |
1622 | |
1623 | bool PPCTargetLowering::hasSPE() const { |
1624 | return Subtarget.hasSPE(); |
1625 | } |
1626 | |
1627 | bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { |
1628 | return VT.isScalarInteger(); |
1629 | } |
1630 | |
1631 | const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { |
1632 | switch ((PPCISD::NodeType)Opcode) { |
1633 | case PPCISD::FIRST_NUMBER: break; |
1634 | case PPCISD::FSEL: return "PPCISD::FSEL"; |
1635 | case PPCISD::XSMAXC: return "PPCISD::XSMAXC"; |
1636 | case PPCISD::XSMINC: return "PPCISD::XSMINC"; |
1637 | case PPCISD::FCFID: return "PPCISD::FCFID"; |
1638 | case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; |
1639 | case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; |
1640 | case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; |
1641 | case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; |
1642 | case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; |
1643 | case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; |
1644 | case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; |
1645 | case PPCISD::FP_TO_UINT_IN_VSR: |
1646 | return "PPCISD::FP_TO_UINT_IN_VSR,"; |
1647 | case PPCISD::FP_TO_SINT_IN_VSR: |
1648 | return "PPCISD::FP_TO_SINT_IN_VSR"; |
1649 | case PPCISD::FRE: return "PPCISD::FRE"; |
1650 | case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; |
1651 | case PPCISD::FTSQRT: |
1652 | return "PPCISD::FTSQRT"; |
1653 | case PPCISD::FSQRT: |
1654 | return "PPCISD::FSQRT"; |
1655 | case PPCISD::STFIWX: return "PPCISD::STFIWX"; |
1656 | case PPCISD::VPERM: return "PPCISD::VPERM"; |
1657 | case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; |
1658 | case PPCISD::XXSPLTI_SP_TO_DP: |
1659 | return "PPCISD::XXSPLTI_SP_TO_DP"; |
1660 | case PPCISD::XXSPLTI32DX: |
1661 | return "PPCISD::XXSPLTI32DX"; |
1662 | case PPCISD::VECINSERT: return "PPCISD::VECINSERT"; |
1663 | case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI"; |
1664 | case PPCISD::XXPERM: |
1665 | return "PPCISD::XXPERM"; |
1666 | case PPCISD::VECSHL: return "PPCISD::VECSHL"; |
1667 | case PPCISD::CMPB: return "PPCISD::CMPB"; |
1668 | case PPCISD::Hi: return "PPCISD::Hi"; |
1669 | case PPCISD::Lo: return "PPCISD::Lo"; |
1670 | case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; |
1671 | case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8"; |
1672 | case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16"; |
1673 | case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; |
1674 | case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; |
1675 | case PPCISD::PROBED_ALLOCA: return "PPCISD::PROBED_ALLOCA"; |
1676 | case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; |
1677 | case PPCISD::SRL: return "PPCISD::SRL"; |
1678 | case PPCISD::SRA: return "PPCISD::SRA"; |
1679 | case PPCISD::SHL: return "PPCISD::SHL"; |
1680 | case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; |
1681 | case PPCISD::CALL: return "PPCISD::CALL"; |
1682 | case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; |
1683 | case PPCISD::CALL_NOTOC: return "PPCISD::CALL_NOTOC"; |
1684 | case PPCISD::CALL_RM: |
1685 | return "PPCISD::CALL_RM"; |
1686 | case PPCISD::CALL_NOP_RM: |
1687 | return "PPCISD::CALL_NOP_RM"; |
1688 | case PPCISD::CALL_NOTOC_RM: |
1689 | return "PPCISD::CALL_NOTOC_RM"; |
1690 | case PPCISD::MTCTR: return "PPCISD::MTCTR"; |
1691 | case PPCISD::BCTRL: return "PPCISD::BCTRL"; |
1692 | case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; |
1693 | case PPCISD::BCTRL_RM: |
1694 | return "PPCISD::BCTRL_RM"; |
1695 | case PPCISD::BCTRL_LOAD_TOC_RM: |
1696 | return "PPCISD::BCTRL_LOAD_TOC_RM"; |
1697 | case PPCISD::RET_GLUE: return "PPCISD::RET_GLUE"; |
1698 | case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; |
1699 | case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; |
1700 | case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; |
1701 | case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; |
1702 | case PPCISD::MFVSR: return "PPCISD::MFVSR"; |
1703 | case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; |
1704 | case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; |
1705 | case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; |
1706 | case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; |
1707 | case PPCISD::SCALAR_TO_VECTOR_PERMUTED: |
1708 | return "PPCISD::SCALAR_TO_VECTOR_PERMUTED"; |
1709 | case PPCISD::ANDI_rec_1_EQ_BIT: |
1710 | return "PPCISD::ANDI_rec_1_EQ_BIT"; |
1711 | case PPCISD::ANDI_rec_1_GT_BIT: |
1712 | return "PPCISD::ANDI_rec_1_GT_BIT"; |
1713 | case PPCISD::VCMP: return "PPCISD::VCMP"; |
1714 | case PPCISD::VCMP_rec: return "PPCISD::VCMP_rec"; |
1715 | case PPCISD::LBRX: return "PPCISD::LBRX"; |
1716 | case PPCISD::STBRX: return "PPCISD::STBRX"; |
1717 | case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; |
1718 | case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; |
1719 | case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; |
1720 | case PPCISD::STXSIX: return "PPCISD::STXSIX"; |
1721 | case PPCISD::VEXTS: return "PPCISD::VEXTS"; |
1722 | case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; |
1723 | case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; |
1724 | case PPCISD::LOAD_VEC_BE: return "PPCISD::LOAD_VEC_BE"; |
1725 | case PPCISD::STORE_VEC_BE: return "PPCISD::STORE_VEC_BE"; |
1726 | case PPCISD::ST_VSR_SCAL_INT: |
1727 | return "PPCISD::ST_VSR_SCAL_INT"; |
1728 | case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; |
1729 | case PPCISD::BDNZ: return "PPCISD::BDNZ"; |
1730 | case PPCISD::BDZ: return "PPCISD::BDZ"; |
1731 | case PPCISD::MFFS: return "PPCISD::MFFS"; |
1732 | case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; |
1733 | case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; |
1734 | case PPCISD::CR6SET: return "PPCISD::CR6SET"; |
1735 | case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; |
1736 | case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; |
1737 | case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; |
1738 | case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; |
1739 | case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; |
1740 | case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; |
1741 | case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; |
1742 | case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; |
1743 | case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; |
1744 | case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; |
1745 | case PPCISD::TLSGD_AIX: return "PPCISD::TLSGD_AIX"; |
1746 | case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; |
1747 | case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; |
1748 | case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; |
1749 | case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; |
1750 | case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; |
1751 | case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; |
1752 | case PPCISD::PADDI_DTPREL: |
1753 | return "PPCISD::PADDI_DTPREL"; |
1754 | case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; |
1755 | case PPCISD::SC: return "PPCISD::SC"; |
1756 | case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; |
1757 | case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; |
1758 | case PPCISD::RFEBB: return "PPCISD::RFEBB"; |
1759 | case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; |
1760 | case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; |
1761 | case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128"; |
1762 | case PPCISD::BUILD_SPE64: return "PPCISD::BUILD_SPE64"; |
1763 | case PPCISD::EXTRACT_SPE: return "PPCISD::EXTRACT_SPE"; |
1764 | case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI"; |
1765 | case PPCISD::LD_VSX_LH: return "PPCISD::LD_VSX_LH"; |
1766 | case PPCISD::FP_EXTEND_HALF: return "PPCISD::FP_EXTEND_HALF"; |
1767 | case PPCISD::MAT_PCREL_ADDR: return "PPCISD::MAT_PCREL_ADDR"; |
1768 | case PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR: |
1769 | return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR"; |
1770 | case PPCISD::TLS_LOCAL_EXEC_MAT_ADDR: |
1771 | return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR"; |
1772 | case PPCISD::ACC_BUILD: return "PPCISD::ACC_BUILD"; |
1773 | case PPCISD::PAIR_BUILD: return "PPCISD::PAIR_BUILD"; |
1774 | case PPCISD::EXTRACT_VSX_REG: return "PPCISD::EXTRACT_VSX_REG"; |
1775 | case PPCISD::XXMFACC: return "PPCISD::XXMFACC"; |
1776 | case PPCISD::LD_SPLAT: return "PPCISD::LD_SPLAT"; |
1777 | case PPCISD::ZEXT_LD_SPLAT: return "PPCISD::ZEXT_LD_SPLAT"; |
1778 | case PPCISD::SEXT_LD_SPLAT: return "PPCISD::SEXT_LD_SPLAT"; |
1779 | case PPCISD::FNMSUB: return "PPCISD::FNMSUB"; |
1780 | case PPCISD::STRICT_FADDRTZ: |
1781 | return "PPCISD::STRICT_FADDRTZ"; |
1782 | case PPCISD::STRICT_FCTIDZ: |
1783 | return "PPCISD::STRICT_FCTIDZ"; |
1784 | case PPCISD::STRICT_FCTIWZ: |
1785 | return "PPCISD::STRICT_FCTIWZ"; |
1786 | case PPCISD::STRICT_FCTIDUZ: |
1787 | return "PPCISD::STRICT_FCTIDUZ"; |
1788 | case PPCISD::STRICT_FCTIWUZ: |
1789 | return "PPCISD::STRICT_FCTIWUZ"; |
1790 | case PPCISD::STRICT_FCFID: |
1791 | return "PPCISD::STRICT_FCFID"; |
1792 | case PPCISD::STRICT_FCFIDU: |
1793 | return "PPCISD::STRICT_FCFIDU"; |
1794 | case PPCISD::STRICT_FCFIDS: |
1795 | return "PPCISD::STRICT_FCFIDS"; |
1796 | case PPCISD::STRICT_FCFIDUS: |
1797 | return "PPCISD::STRICT_FCFIDUS"; |
1798 | case PPCISD::LXVRZX: return "PPCISD::LXVRZX"; |
1799 | case PPCISD::STORE_COND: |
1800 | return "PPCISD::STORE_COND"; |
1801 | } |
1802 | return nullptr; |
1803 | } |
1804 | |
1805 | EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, |
1806 | EVT VT) const { |
1807 | if (!VT.isVector()) |
1808 | return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; |
1809 | |
1810 | return VT.changeVectorElementTypeToInteger(); |
1811 | } |
1812 | |
1813 | bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { |
1814 | assert(VT.isFloatingPoint() && "Non-floating-point FMA?")(static_cast <bool> (VT.isFloatingPoint() && "Non-floating-point FMA?" ) ? void (0) : __assert_fail ("VT.isFloatingPoint() && \"Non-floating-point FMA?\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 1814, __extension__ __PRETTY_FUNCTION__)); |
1815 | return true; |
1816 | } |
1817 | |
1818 | //===----------------------------------------------------------------------===// |
1819 | // Node matching predicates, for use by the tblgen matching code. |
1820 | //===----------------------------------------------------------------------===// |
1821 | |
1822 | /// isFloatingPointZero - Return true if this is 0.0 or -0.0. |
1823 | static bool isFloatingPointZero(SDValue Op) { |
1824 | if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) |
1825 | return CFP->getValueAPF().isZero(); |
1826 | else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { |
1827 | // Maybe this has already been legalized into the constant pool? |
1828 | if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) |
1829 | if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) |
1830 | return CFP->getValueAPF().isZero(); |
1831 | } |
1832 | return false; |
1833 | } |
1834 | |
1835 | /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return |
1836 | /// true if Op is undef or if it matches the specified value. |
1837 | static bool isConstantOrUndef(int Op, int Val) { |
1838 | return Op < 0 || Op == Val; |
1839 | } |
1840 | |
1841 | /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a |
1842 | /// VPKUHUM instruction. |
1843 | /// The ShuffleKind distinguishes between big-endian operations with |
1844 | /// two different inputs (0), either-endian operations with two identical |
1845 | /// inputs (1), and little-endian operations with two different inputs (2). |
1846 | /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). |
1847 | bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, |
1848 | SelectionDAG &DAG) { |
1849 | bool IsLE = DAG.getDataLayout().isLittleEndian(); |
1850 | if (ShuffleKind == 0) { |
1851 | if (IsLE) |
1852 | return false; |
1853 | for (unsigned i = 0; i != 16; ++i) |
1854 | if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) |
1855 | return false; |
1856 | } else if (ShuffleKind == 2) { |
1857 | if (!IsLE) |
1858 | return false; |
1859 | for (unsigned i = 0; i != 16; ++i) |
1860 | if (!isConstantOrUndef(N->getMaskElt(i), i*2)) |
1861 | return false; |
1862 | } else if (ShuffleKind == 1) { |
1863 | unsigned j = IsLE ? 0 : 1; |
1864 | for (unsigned i = 0; i != 8; ++i) |
1865 | if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || |
1866 | !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) |
1867 | return false; |
1868 | } |
1869 | return true; |
1870 | } |
1871 | |
1872 | /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a |
1873 | /// VPKUWUM instruction. |
1874 | /// The ShuffleKind distinguishes between big-endian operations with |
1875 | /// two different inputs (0), either-endian operations with two identical |
1876 | /// inputs (1), and little-endian operations with two different inputs (2). |
1877 | /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). |
1878 | bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, |
1879 | SelectionDAG &DAG) { |
1880 | bool IsLE = DAG.getDataLayout().isLittleEndian(); |
1881 | if (ShuffleKind == 0) { |
1882 | if (IsLE) |
1883 | return false; |
1884 | for (unsigned i = 0; i != 16; i += 2) |
1885 | if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || |
1886 | !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) |
1887 | return false; |
1888 | } else if (ShuffleKind == 2) { |
1889 | if (!IsLE) |
1890 | return false; |
1891 | for (unsigned i = 0; i != 16; i += 2) |
1892 | if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || |
1893 | !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) |
1894 | return false; |
1895 | } else if (ShuffleKind == 1) { |
1896 | unsigned j = IsLE ? 0 : 2; |
1897 | for (unsigned i = 0; i != 8; i += 2) |
1898 | if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || |
1899 | !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || |
1900 | !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || |
1901 | !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) |
1902 | return false; |
1903 | } |
1904 | return true; |
1905 | } |
1906 | |
1907 | /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a |
1908 | /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the |
1909 | /// current subtarget. |
1910 | /// |
1911 | /// The ShuffleKind distinguishes between big-endian operations with |
1912 | /// two different inputs (0), either-endian operations with two identical |
1913 | /// inputs (1), and little-endian operations with two different inputs (2). |
1914 | /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). |
1915 | bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, |
1916 | SelectionDAG &DAG) { |
1917 | const PPCSubtarget &Subtarget = DAG.getSubtarget<PPCSubtarget>(); |
1918 | if (!Subtarget.hasP8Vector()) |
1919 | return false; |
1920 | |
1921 | bool IsLE = DAG.getDataLayout().isLittleEndian(); |
1922 | if (ShuffleKind == 0) { |
1923 | if (IsLE) |
1924 | return false; |
1925 | for (unsigned i = 0; i != 16; i += 4) |
1926 | if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || |
1927 | !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || |
1928 | !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || |
1929 | !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) |
1930 | return false; |
1931 | } else if (ShuffleKind == 2) { |
1932 | if (!IsLE) |
1933 | return false; |
1934 | for (unsigned i = 0; i != 16; i += 4) |
1935 | if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || |
1936 | !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || |
1937 | !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || |
1938 | !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) |
1939 | return false; |
1940 | } else if (ShuffleKind == 1) { |
1941 | unsigned j = IsLE ? 0 : 4; |
1942 | for (unsigned i = 0; i != 8; i += 4) |
1943 | if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || |
1944 | !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || |
1945 | !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || |
1946 | !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || |
1947 | !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || |
1948 | !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || |
1949 | !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || |
1950 | !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) |
1951 | return false; |
1952 | } |
1953 | return true; |
1954 | } |
1955 | |
1956 | /// isVMerge - Common function, used to match vmrg* shuffles. |
1957 | /// |
1958 | static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, |
1959 | unsigned LHSStart, unsigned RHSStart) { |
1960 | if (N->getValueType(0) != MVT::v16i8) |
1961 | return false; |
1962 | assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&(static_cast <bool> ((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && "Unsupported merge size!") ? void ( 0) : __assert_fail ("(UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && \"Unsupported merge size!\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 1963, __extension__ __PRETTY_FUNCTION__)) |
1963 | "Unsupported merge size!")(static_cast <bool> ((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && "Unsupported merge size!") ? void ( 0) : __assert_fail ("(UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && \"Unsupported merge size!\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 1963, __extension__ __PRETTY_FUNCTION__)); |
1964 | |
1965 | for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units |
1966 | for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit |
1967 | if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), |
1968 | LHSStart+j+i*UnitSize) || |
1969 | !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), |
1970 | RHSStart+j+i*UnitSize)) |
1971 | return false; |
1972 | } |
1973 | return true; |
1974 | } |
1975 | |
1976 | /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for |
1977 | /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). |
1978 | /// The ShuffleKind distinguishes between big-endian merges with two |
1979 | /// different inputs (0), either-endian merges with two identical inputs (1), |
1980 | /// and little-endian merges with two different inputs (2). For the latter, |
1981 | /// the input operands are swapped (see PPCInstrAltivec.td). |
1982 | bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, |
1983 | unsigned ShuffleKind, SelectionDAG &DAG) { |
1984 | if (DAG.getDataLayout().isLittleEndian()) { |
1985 | if (ShuffleKind == 1) // unary |
1986 | return isVMerge(N, UnitSize, 0, 0); |
1987 | else if (ShuffleKind == 2) // swapped |
1988 | return isVMerge(N, UnitSize, 0, 16); |
1989 | else |
1990 | return false; |
1991 | } else { |
1992 | if (ShuffleKind == 1) // unary |
1993 | return isVMerge(N, UnitSize, 8, 8); |
1994 | else if (ShuffleKind == 0) // normal |
1995 | return isVMerge(N, UnitSize, 8, 24); |
1996 | else |
1997 | return false; |
1998 | } |
1999 | } |
2000 | |
2001 | /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for |
2002 | /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). |
2003 | /// The ShuffleKind distinguishes between big-endian merges with two |
2004 | /// different inputs (0), either-endian merges with two identical inputs (1), |
2005 | /// and little-endian merges with two different inputs (2). For the latter, |
2006 | /// the input operands are swapped (see PPCInstrAltivec.td). |
2007 | bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, |
2008 | unsigned ShuffleKind, SelectionDAG &DAG) { |
2009 | if (DAG.getDataLayout().isLittleEndian()) { |
2010 | if (ShuffleKind == 1) // unary |
2011 | return isVMerge(N, UnitSize, 8, 8); |
2012 | else if (ShuffleKind == 2) // swapped |
2013 | return isVMerge(N, UnitSize, 8, 24); |
2014 | else |
2015 | return false; |
2016 | } else { |
2017 | if (ShuffleKind == 1) // unary |
2018 | return isVMerge(N, UnitSize, 0, 0); |
2019 | else if (ShuffleKind == 0) // normal |
2020 | return isVMerge(N, UnitSize, 0, 16); |
2021 | else |
2022 | return false; |
2023 | } |
2024 | } |
2025 | |
2026 | /** |
2027 | * Common function used to match vmrgew and vmrgow shuffles |
2028 | * |
2029 | * The indexOffset determines whether to look for even or odd words in |
2030 | * the shuffle mask. This is based on the of the endianness of the target |
2031 | * machine. |
2032 | * - Little Endian: |
2033 | * - Use offset of 0 to check for odd elements |
2034 | * - Use offset of 4 to check for even elements |
2035 | * - Big Endian: |
2036 | * - Use offset of 0 to check for even elements |
2037 | * - Use offset of 4 to check for odd elements |
2038 | * A detailed description of the vector element ordering for little endian and |
2039 | * big endian can be found at |
2040 | * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html |
2041 | * Targeting your applications - what little endian and big endian IBM XL C/C++ |
2042 | * compiler differences mean to you |
2043 | * |
2044 | * The mask to the shuffle vector instruction specifies the indices of the |
2045 | * elements from the two input vectors to place in the result. The elements are |
2046 | * numbered in array-access order, starting with the first vector. These vectors |
2047 | * are always of type v16i8, thus each vector will contain 16 elements of size |
2048 | * 8. More info on the shuffle vector can be found in the |
2049 | * http://llvm.org/docs/LangRef.html#shufflevector-instruction |
2050 | * Language Reference. |
2051 | * |
2052 | * The RHSStartValue indicates whether the same input vectors are used (unary) |
2053 | * or two different input vectors are used, based on the following: |
2054 | * - If the instruction uses the same vector for both inputs, the range of the |
2055 | * indices will be 0 to 15. In this case, the RHSStart value passed should |
2056 | * be 0. |
2057 | * - If the instruction has two different vectors then the range of the |
2058 | * indices will be 0 to 31. In this case, the RHSStart value passed should |
2059 | * be 16 (indices 0-15 specify elements in the first vector while indices 16 |
2060 | * to 31 specify elements in the second vector). |
2061 | * |
2062 | * \param[in] N The shuffle vector SD Node to analyze |
2063 | * \param[in] IndexOffset Specifies whether to look for even or odd elements |
2064 | * \param[in] RHSStartValue Specifies the starting index for the righthand input |
2065 | * vector to the shuffle_vector instruction |
2066 | * \return true iff this shuffle vector represents an even or odd word merge |
2067 | */ |
2068 | static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, |
2069 | unsigned RHSStartValue) { |
2070 | if (N->getValueType(0) != MVT::v16i8) |
2071 | return false; |
2072 | |
2073 | for (unsigned i = 0; i < 2; ++i) |
2074 | for (unsigned j = 0; j < 4; ++j) |
2075 | if (!isConstantOrUndef(N->getMaskElt(i*4+j), |
2076 | i*RHSStartValue+j+IndexOffset) || |
2077 | !isConstantOrUndef(N->getMaskElt(i*4+j+8), |
2078 | i*RHSStartValue+j+IndexOffset+8)) |
2079 | return false; |
2080 | return true; |
2081 | } |
2082 | |
2083 | /** |
2084 | * Determine if the specified shuffle mask is suitable for the vmrgew or |
2085 | * vmrgow instructions. |
2086 | * |
2087 | * \param[in] N The shuffle vector SD Node to analyze |
2088 | * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) |
2089 | * \param[in] ShuffleKind Identify the type of merge: |
2090 | * - 0 = big-endian merge with two different inputs; |
2091 | * - 1 = either-endian merge with two identical inputs; |
2092 | * - 2 = little-endian merge with two different inputs (inputs are swapped for |
2093 | * little-endian merges). |
2094 | * \param[in] DAG The current SelectionDAG |
2095 | * \return true iff this shuffle mask |
2096 | */ |
2097 | bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, |
2098 | unsigned ShuffleKind, SelectionDAG &DAG) { |
2099 | if (DAG.getDataLayout().isLittleEndian()) { |
2100 | unsigned indexOffset = CheckEven ? 4 : 0; |
2101 | if (ShuffleKind == 1) // Unary |
2102 | return isVMerge(N, indexOffset, 0); |
2103 | else if (ShuffleKind == 2) // swapped |
2104 | return isVMerge(N, indexOffset, 16); |
2105 | else |
2106 | return false; |
2107 | } |
2108 | else { |
2109 | unsigned indexOffset = CheckEven ? 0 : 4; |
2110 | if (ShuffleKind == 1) // Unary |
2111 | return isVMerge(N, indexOffset, 0); |
2112 | else if (ShuffleKind == 0) // Normal |
2113 | return isVMerge(N, indexOffset, 16); |
2114 | else |
2115 | return false; |
2116 | } |
2117 | return false; |
2118 | } |
2119 | |
2120 | /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift |
2121 | /// amount, otherwise return -1. |
2122 | /// The ShuffleKind distinguishes between big-endian operations with two |
2123 | /// different inputs (0), either-endian operations with two identical inputs |
2124 | /// (1), and little-endian operations with two different inputs (2). For the |
2125 | /// latter, the input operands are swapped (see PPCInstrAltivec.td). |
2126 | int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, |
2127 | SelectionDAG &DAG) { |
2128 | if (N->getValueType(0) != MVT::v16i8) |
2129 | return -1; |
2130 | |
2131 | ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); |
2132 | |
2133 | // Find the first non-undef value in the shuffle mask. |
2134 | unsigned i; |
2135 | for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) |
2136 | /*search*/; |
2137 | |
2138 | if (i == 16) return -1; // all undef. |
2139 | |
2140 | // Otherwise, check to see if the rest of the elements are consecutively |
2141 | // numbered from this value. |
2142 | unsigned ShiftAmt = SVOp->getMaskElt(i); |
2143 | if (ShiftAmt < i) return -1; |
2144 | |
2145 | ShiftAmt -= i; |
2146 | bool isLE = DAG.getDataLayout().isLittleEndian(); |
2147 | |
2148 | if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { |
2149 | // Check the rest of the elements to see if they are consecutive. |
2150 | for (++i; i != 16; ++i) |
2151 | if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) |
2152 | return -1; |
2153 | } else if (ShuffleKind == 1) { |
2154 | // Check the rest of the elements to see if they are consecutive. |
2155 | for (++i; i != 16; ++i) |
2156 | if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) |
2157 | return -1; |
2158 | } else |
2159 | return -1; |
2160 | |
2161 | if (isLE) |
2162 | ShiftAmt = 16 - ShiftAmt; |
2163 | |
2164 | return ShiftAmt; |
2165 | } |
2166 | |
2167 | /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand |
2168 | /// specifies a splat of a single element that is suitable for input to |
2169 | /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.). |
2170 | bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { |
2171 | EVT VT = N->getValueType(0); |
2172 | if (VT == MVT::v2i64 || VT == MVT::v2f64) |
2173 | return EltSize == 8 && N->getMaskElt(0) == N->getMaskElt(1); |
2174 | |
2175 | assert(VT == MVT::v16i8 && isPowerOf2_32(EltSize) &&(static_cast <bool> (VT == MVT::v16i8 && isPowerOf2_32 (EltSize) && EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes" ) ? void (0) : __assert_fail ("VT == MVT::v16i8 && isPowerOf2_32(EltSize) && EltSize <= 8 && \"Can only handle 1,2,4,8 byte element sizes\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2176, __extension__ __PRETTY_FUNCTION__)) |
2176 | EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes")(static_cast <bool> (VT == MVT::v16i8 && isPowerOf2_32 (EltSize) && EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes" ) ? void (0) : __assert_fail ("VT == MVT::v16i8 && isPowerOf2_32(EltSize) && EltSize <= 8 && \"Can only handle 1,2,4,8 byte element sizes\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2176, __extension__ __PRETTY_FUNCTION__)); |
2177 | |
2178 | // The consecutive indices need to specify an element, not part of two |
2179 | // different elements. So abandon ship early if this isn't the case. |
2180 | if (N->getMaskElt(0) % EltSize != 0) |
2181 | return false; |
2182 | |
2183 | // This is a splat operation if each element of the permute is the same, and |
2184 | // if the value doesn't reference the second vector. |
2185 | unsigned ElementBase = N->getMaskElt(0); |
2186 | |
2187 | // FIXME: Handle UNDEF elements too! |
2188 | if (ElementBase >= 16) |
2189 | return false; |
2190 | |
2191 | // Check that the indices are consecutive, in the case of a multi-byte element |
2192 | // splatted with a v16i8 mask. |
2193 | for (unsigned i = 1; i != EltSize; ++i) |
2194 | if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) |
2195 | return false; |
2196 | |
2197 | for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { |
2198 | if (N->getMaskElt(i) < 0) continue; |
2199 | for (unsigned j = 0; j != EltSize; ++j) |
2200 | if (N->getMaskElt(i+j) != N->getMaskElt(j)) |
2201 | return false; |
2202 | } |
2203 | return true; |
2204 | } |
2205 | |
2206 | /// Check that the mask is shuffling N byte elements. Within each N byte |
2207 | /// element of the mask, the indices could be either in increasing or |
2208 | /// decreasing order as long as they are consecutive. |
2209 | /// \param[in] N the shuffle vector SD Node to analyze |
2210 | /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/ |
2211 | /// Word/DoubleWord/QuadWord). |
2212 | /// \param[in] StepLen the delta indices number among the N byte element, if |
2213 | /// the mask is in increasing/decreasing order then it is 1/-1. |
2214 | /// \return true iff the mask is shuffling N byte elements. |
2215 | static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width, |
2216 | int StepLen) { |
2217 | assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&(static_cast <bool> ((Width == 2 || Width == 4 || Width == 8 || Width == 16) && "Unexpected element width.") ? void (0) : __assert_fail ("(Width == 2 || Width == 4 || Width == 8 || Width == 16) && \"Unexpected element width.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2218, __extension__ __PRETTY_FUNCTION__)) |
2218 | "Unexpected element width.")(static_cast <bool> ((Width == 2 || Width == 4 || Width == 8 || Width == 16) && "Unexpected element width.") ? void (0) : __assert_fail ("(Width == 2 || Width == 4 || Width == 8 || Width == 16) && \"Unexpected element width.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2218, __extension__ __PRETTY_FUNCTION__)); |
2219 | assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.")(static_cast <bool> ((StepLen == 1 || StepLen == -1) && "Unexpected element width.") ? void (0) : __assert_fail ("(StepLen == 1 || StepLen == -1) && \"Unexpected element width.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2219, __extension__ __PRETTY_FUNCTION__)); |
2220 | |
2221 | unsigned NumOfElem = 16 / Width; |
2222 | unsigned MaskVal[16]; // Width is never greater than 16 |
2223 | for (unsigned i = 0; i < NumOfElem; ++i) { |
2224 | MaskVal[0] = N->getMaskElt(i * Width); |
2225 | if ((StepLen == 1) && (MaskVal[0] % Width)) { |
2226 | return false; |
2227 | } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) { |
2228 | return false; |
2229 | } |
2230 | |
2231 | for (unsigned int j = 1; j < Width; ++j) { |
2232 | MaskVal[j] = N->getMaskElt(i * Width + j); |
2233 | if (MaskVal[j] != MaskVal[j-1] + StepLen) { |
2234 | return false; |
2235 | } |
2236 | } |
2237 | } |
2238 | |
2239 | return true; |
2240 | } |
2241 | |
2242 | bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, |
2243 | unsigned &InsertAtByte, bool &Swap, bool IsLE) { |
2244 | if (!isNByteElemShuffleMask(N, 4, 1)) |
2245 | return false; |
2246 | |
2247 | // Now we look at mask elements 0,4,8,12 |
2248 | unsigned M0 = N->getMaskElt(0) / 4; |
2249 | unsigned M1 = N->getMaskElt(4) / 4; |
2250 | unsigned M2 = N->getMaskElt(8) / 4; |
2251 | unsigned M3 = N->getMaskElt(12) / 4; |
2252 | unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; |
2253 | unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; |
2254 | |
2255 | // Below, let H and L be arbitrary elements of the shuffle mask |
2256 | // where H is in the range [4,7] and L is in the range [0,3]. |
2257 | // H, 1, 2, 3 or L, 5, 6, 7 |
2258 | if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || |
2259 | (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { |
2260 | ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; |
2261 | InsertAtByte = IsLE ? 12 : 0; |
2262 | Swap = M0 < 4; |
2263 | return true; |
2264 | } |
2265 | // 0, H, 2, 3 or 4, L, 6, 7 |
2266 | if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || |
2267 | (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { |
2268 | ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; |
2269 | InsertAtByte = IsLE ? 8 : 4; |
2270 | Swap = M1 < 4; |
2271 | return true; |
2272 | } |
2273 | // 0, 1, H, 3 or 4, 5, L, 7 |
2274 | if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || |
2275 | (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { |
2276 | ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; |
2277 | InsertAtByte = IsLE ? 4 : 8; |
2278 | Swap = M2 < 4; |
2279 | return true; |
2280 | } |
2281 | // 0, 1, 2, H or 4, 5, 6, L |
2282 | if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || |
2283 | (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { |
2284 | ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; |
2285 | InsertAtByte = IsLE ? 0 : 12; |
2286 | Swap = M3 < 4; |
2287 | return true; |
2288 | } |
2289 | |
2290 | // If both vector operands for the shuffle are the same vector, the mask will |
2291 | // contain only elements from the first one and the second one will be undef. |
2292 | if (N->getOperand(1).isUndef()) { |
2293 | ShiftElts = 0; |
2294 | Swap = true; |
2295 | unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; |
2296 | if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { |
2297 | InsertAtByte = IsLE ? 12 : 0; |
2298 | return true; |
2299 | } |
2300 | if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { |
2301 | InsertAtByte = IsLE ? 8 : 4; |
2302 | return true; |
2303 | } |
2304 | if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { |
2305 | InsertAtByte = IsLE ? 4 : 8; |
2306 | return true; |
2307 | } |
2308 | if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { |
2309 | InsertAtByte = IsLE ? 0 : 12; |
2310 | return true; |
2311 | } |
2312 | } |
2313 | |
2314 | return false; |
2315 | } |
2316 | |
2317 | bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, |
2318 | bool &Swap, bool IsLE) { |
2319 | assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8")(static_cast <bool> (N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8") ? void (0) : __assert_fail ("N->getValueType(0) == MVT::v16i8 && \"Shuffle vector expects v16i8\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2319, __extension__ __PRETTY_FUNCTION__)); |
2320 | // Ensure each byte index of the word is consecutive. |
2321 | if (!isNByteElemShuffleMask(N, 4, 1)) |
2322 | return false; |
2323 | |
2324 | // Now we look at mask elements 0,4,8,12, which are the beginning of words. |
2325 | unsigned M0 = N->getMaskElt(0) / 4; |
2326 | unsigned M1 = N->getMaskElt(4) / 4; |
2327 | unsigned M2 = N->getMaskElt(8) / 4; |
2328 | unsigned M3 = N->getMaskElt(12) / 4; |
2329 | |
2330 | // If both vector operands for the shuffle are the same vector, the mask will |
2331 | // contain only elements from the first one and the second one will be undef. |
2332 | if (N->getOperand(1).isUndef()) { |
2333 | assert(M0 < 4 && "Indexing into an undef vector?")(static_cast <bool> (M0 < 4 && "Indexing into an undef vector?" ) ? void (0) : __assert_fail ("M0 < 4 && \"Indexing into an undef vector?\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2333, __extension__ __PRETTY_FUNCTION__)); |
2334 | if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4) |
2335 | return false; |
2336 | |
2337 | ShiftElts = IsLE ? (4 - M0) % 4 : M0; |
2338 | Swap = false; |
2339 | return true; |
2340 | } |
2341 | |
2342 | // Ensure each word index of the ShuffleVector Mask is consecutive. |
2343 | if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8) |
2344 | return false; |
2345 | |
2346 | if (IsLE) { |
2347 | if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) { |
2348 | // Input vectors don't need to be swapped if the leading element |
2349 | // of the result is one of the 3 left elements of the second vector |
2350 | // (or if there is no shift to be done at all). |
2351 | Swap = false; |
2352 | ShiftElts = (8 - M0) % 8; |
2353 | } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) { |
2354 | // Input vectors need to be swapped if the leading element |
2355 | // of the result is one of the 3 left elements of the first vector |
2356 | // (or if we're shifting by 4 - thereby simply swapping the vectors). |
2357 | Swap = true; |
2358 | ShiftElts = (4 - M0) % 4; |
2359 | } |
2360 | |
2361 | return true; |
2362 | } else { // BE |
2363 | if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) { |
2364 | // Input vectors don't need to be swapped if the leading element |
2365 | // of the result is one of the 4 elements of the first vector. |
2366 | Swap = false; |
2367 | ShiftElts = M0; |
2368 | } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) { |
2369 | // Input vectors need to be swapped if the leading element |
2370 | // of the result is one of the 4 elements of the right vector. |
2371 | Swap = true; |
2372 | ShiftElts = M0 - 4; |
2373 | } |
2374 | |
2375 | return true; |
2376 | } |
2377 | } |
2378 | |
2379 | bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) { |
2380 | assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8")(static_cast <bool> (N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8") ? void (0) : __assert_fail ("N->getValueType(0) == MVT::v16i8 && \"Shuffle vector expects v16i8\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2380, __extension__ __PRETTY_FUNCTION__)); |
2381 | |
2382 | if (!isNByteElemShuffleMask(N, Width, -1)) |
2383 | return false; |
2384 | |
2385 | for (int i = 0; i < 16; i += Width) |
2386 | if (N->getMaskElt(i) != i + Width - 1) |
2387 | return false; |
2388 | |
2389 | return true; |
2390 | } |
2391 | |
2392 | bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) { |
2393 | return isXXBRShuffleMaskHelper(N, 2); |
2394 | } |
2395 | |
2396 | bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) { |
2397 | return isXXBRShuffleMaskHelper(N, 4); |
2398 | } |
2399 | |
2400 | bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) { |
2401 | return isXXBRShuffleMaskHelper(N, 8); |
2402 | } |
2403 | |
2404 | bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) { |
2405 | return isXXBRShuffleMaskHelper(N, 16); |
2406 | } |
2407 | |
2408 | /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap |
2409 | /// if the inputs to the instruction should be swapped and set \p DM to the |
2410 | /// value for the immediate. |
2411 | /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI |
2412 | /// AND element 0 of the result comes from the first input (LE) or second input |
2413 | /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered. |
2414 | /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle |
2415 | /// mask. |
2416 | bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM, |
2417 | bool &Swap, bool IsLE) { |
2418 | assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8")(static_cast <bool> (N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8") ? void (0) : __assert_fail ("N->getValueType(0) == MVT::v16i8 && \"Shuffle vector expects v16i8\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2418, __extension__ __PRETTY_FUNCTION__)); |
2419 | |
2420 | // Ensure each byte index of the double word is consecutive. |
2421 | if (!isNByteElemShuffleMask(N, 8, 1)) |
2422 | return false; |
2423 | |
2424 | unsigned M0 = N->getMaskElt(0) / 8; |
2425 | unsigned M1 = N->getMaskElt(8) / 8; |
2426 | assert(((M0 | M1) < 4) && "A mask element out of bounds?")(static_cast <bool> (((M0 | M1) < 4) && "A mask element out of bounds?" ) ? void (0) : __assert_fail ("((M0 | M1) < 4) && \"A mask element out of bounds?\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2426, __extension__ __PRETTY_FUNCTION__)); |
2427 | |
2428 | // If both vector operands for the shuffle are the same vector, the mask will |
2429 | // contain only elements from the first one and the second one will be undef. |
2430 | if (N->getOperand(1).isUndef()) { |
2431 | if ((M0 | M1) < 2) { |
2432 | DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1); |
2433 | Swap = false; |
2434 | return true; |
2435 | } else |
2436 | return false; |
2437 | } |
2438 | |
2439 | if (IsLE) { |
2440 | if (M0 > 1 && M1 < 2) { |
2441 | Swap = false; |
2442 | } else if (M0 < 2 && M1 > 1) { |
2443 | M0 = (M0 + 2) % 4; |
2444 | M1 = (M1 + 2) % 4; |
2445 | Swap = true; |
2446 | } else |
2447 | return false; |
2448 | |
2449 | // Note: if control flow comes here that means Swap is already set above |
2450 | DM = (((~M1) & 1) << 1) + ((~M0) & 1); |
2451 | return true; |
2452 | } else { // BE |
2453 | if (M0 < 2 && M1 > 1) { |
2454 | Swap = false; |
2455 | } else if (M0 > 1 && M1 < 2) { |
2456 | M0 = (M0 + 2) % 4; |
2457 | M1 = (M1 + 2) % 4; |
2458 | Swap = true; |
2459 | } else |
2460 | return false; |
2461 | |
2462 | // Note: if control flow comes here that means Swap is already set above |
2463 | DM = (M0 << 1) + (M1 & 1); |
2464 | return true; |
2465 | } |
2466 | } |
2467 | |
2468 | |
2469 | /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is |
2470 | /// appropriate for PPC mnemonics (which have a big endian bias - namely |
2471 | /// elements are counted from the left of the vector register). |
2472 | unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, |
2473 | SelectionDAG &DAG) { |
2474 | ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); |
2475 | assert(isSplatShuffleMask(SVOp, EltSize))(static_cast <bool> (isSplatShuffleMask(SVOp, EltSize)) ? void (0) : __assert_fail ("isSplatShuffleMask(SVOp, EltSize)" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2475, __extension__ __PRETTY_FUNCTION__)); |
2476 | EVT VT = SVOp->getValueType(0); |
2477 | |
2478 | if (VT == MVT::v2i64 || VT == MVT::v2f64) |
2479 | return DAG.getDataLayout().isLittleEndian() ? 1 - SVOp->getMaskElt(0) |
2480 | : SVOp->getMaskElt(0); |
2481 | |
2482 | if (DAG.getDataLayout().isLittleEndian()) |
2483 | return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); |
2484 | else |
2485 | return SVOp->getMaskElt(0) / EltSize; |
2486 | } |
2487 | |
2488 | /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed |
2489 | /// by using a vspltis[bhw] instruction of the specified element size, return |
2490 | /// the constant being splatted. The ByteSize field indicates the number of |
2491 | /// bytes of each element [124] -> [bhw]. |
2492 | SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { |
2493 | SDValue OpVal; |
2494 | |
2495 | // If ByteSize of the splat is bigger than the element size of the |
2496 | // build_vector, then we have a case where we are checking for a splat where |
2497 | // multiple elements of the buildvector are folded together into a single |
2498 | // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). |
2499 | unsigned EltSize = 16/N->getNumOperands(); |
2500 | if (EltSize < ByteSize) { |
2501 | unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. |
2502 | SDValue UniquedVals[4]; |
2503 | assert(Multiple > 1 && Multiple <= 4 && "How can this happen?")(static_cast <bool> (Multiple > 1 && Multiple <= 4 && "How can this happen?") ? void (0) : __assert_fail ("Multiple > 1 && Multiple <= 4 && \"How can this happen?\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2503, __extension__ __PRETTY_FUNCTION__)); |
2504 | |
2505 | // See if all of the elements in the buildvector agree across. |
2506 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
2507 | if (N->getOperand(i).isUndef()) continue; |
2508 | // If the element isn't a constant, bail fully out. |
2509 | if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); |
2510 | |
2511 | if (!UniquedVals[i&(Multiple-1)].getNode()) |
2512 | UniquedVals[i&(Multiple-1)] = N->getOperand(i); |
2513 | else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) |
2514 | return SDValue(); // no match. |
2515 | } |
2516 | |
2517 | // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains |
2518 | // either constant or undef values that are identical for each chunk. See |
2519 | // if these chunks can form into a larger vspltis*. |
2520 | |
2521 | // Check to see if all of the leading entries are either 0 or -1. If |
2522 | // neither, then this won't fit into the immediate field. |
2523 | bool LeadingZero = true; |
2524 | bool LeadingOnes = true; |
2525 | for (unsigned i = 0; i != Multiple-1; ++i) { |
2526 | if (!UniquedVals[i].getNode()) continue; // Must have been undefs. |
2527 | |
2528 | LeadingZero &= isNullConstant(UniquedVals[i]); |
2529 | LeadingOnes &= isAllOnesConstant(UniquedVals[i]); |
2530 | } |
2531 | // Finally, check the least significant entry. |
2532 | if (LeadingZero) { |
2533 | if (!UniquedVals[Multiple-1].getNode()) |
2534 | return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef |
2535 | int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); |
2536 | if (Val < 16) // 0,0,0,4 -> vspltisw(4) |
2537 | return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); |
2538 | } |
2539 | if (LeadingOnes) { |
2540 | if (!UniquedVals[Multiple-1].getNode()) |
2541 | return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef |
2542 | int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); |
2543 | if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) |
2544 | return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); |
2545 | } |
2546 | |
2547 | return SDValue(); |
2548 | } |
2549 | |
2550 | // Check to see if this buildvec has a single non-undef value in its elements. |
2551 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
2552 | if (N->getOperand(i).isUndef()) continue; |
2553 | if (!OpVal.getNode()) |
2554 | OpVal = N->getOperand(i); |
2555 | else if (OpVal != N->getOperand(i)) |
2556 | return SDValue(); |
2557 | } |
2558 | |
2559 | if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. |
2560 | |
2561 | unsigned ValSizeInBytes = EltSize; |
2562 | uint64_t Value = 0; |
2563 | if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { |
2564 | Value = CN->getZExtValue(); |
2565 | } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { |
2566 | assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!")(static_cast <bool> (CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!") ? void (0) : __assert_fail ("CN->getValueType(0) == MVT::f32 && \"Only one legal FP vector type!\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2566, __extension__ __PRETTY_FUNCTION__)); |
2567 | Value = llvm::bit_cast<uint32_t>(CN->getValueAPF().convertToFloat()); |
2568 | } |
2569 | |
2570 | // If the splat value is larger than the element value, then we can never do |
2571 | // this splat. The only case that we could fit the replicated bits into our |
2572 | // immediate field for would be zero, and we prefer to use vxor for it. |
2573 | if (ValSizeInBytes < ByteSize) return SDValue(); |
2574 | |
2575 | // If the element value is larger than the splat value, check if it consists |
2576 | // of a repeated bit pattern of size ByteSize. |
2577 | if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) |
2578 | return SDValue(); |
2579 | |
2580 | // Properly sign extend the value. |
2581 | int MaskVal = SignExtend32(Value, ByteSize * 8); |
2582 | |
2583 | // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. |
2584 | if (MaskVal == 0) return SDValue(); |
2585 | |
2586 | // Finally, if this value fits in a 5 bit sext field, return it |
2587 | if (SignExtend32<5>(MaskVal) == MaskVal) |
2588 | return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); |
2589 | return SDValue(); |
2590 | } |
2591 | |
2592 | //===----------------------------------------------------------------------===// |
2593 | // Addressing Mode Selection |
2594 | //===----------------------------------------------------------------------===// |
2595 | |
2596 | /// isIntS16Immediate - This method tests to see if the node is either a 32-bit |
2597 | /// or 64-bit immediate, and if the value can be accurately represented as a |
2598 | /// sign extension from a 16-bit value. If so, this returns true and the |
2599 | /// immediate. |
2600 | bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) { |
2601 | if (!isa<ConstantSDNode>(N)) |
2602 | return false; |
2603 | |
2604 | Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue(); |
2605 | if (N->getValueType(0) == MVT::i32) |
2606 | return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); |
2607 | else |
2608 | return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); |
2609 | } |
2610 | bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) { |
2611 | return isIntS16Immediate(Op.getNode(), Imm); |
2612 | } |
2613 | |
2614 | /// Used when computing address flags for selecting loads and stores. |
2615 | /// If we have an OR, check if the LHS and RHS are provably disjoint. |
2616 | /// An OR of two provably disjoint values is equivalent to an ADD. |
2617 | /// Most PPC load/store instructions compute the effective address as a sum, |
2618 | /// so doing this conversion is useful. |
2619 | static bool provablyDisjointOr(SelectionDAG &DAG, const SDValue &N) { |
2620 | if (N.getOpcode() != ISD::OR) |
2621 | return false; |
2622 | KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); |
2623 | if (!LHSKnown.Zero.getBoolValue()) |
2624 | return false; |
2625 | KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1)); |
2626 | return (~(LHSKnown.Zero | RHSKnown.Zero) == 0); |
2627 | } |
2628 | |
2629 | /// SelectAddressEVXRegReg - Given the specified address, check to see if it can |
2630 | /// be represented as an indexed [r+r] operation. |
2631 | bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base, |
2632 | SDValue &Index, |
2633 | SelectionDAG &DAG) const { |
2634 | for (SDNode *U : N->uses()) { |
2635 | if (MemSDNode *Memop = dyn_cast<MemSDNode>(U)) { |
2636 | if (Memop->getMemoryVT() == MVT::f64) { |
2637 | Base = N.getOperand(0); |
2638 | Index = N.getOperand(1); |
2639 | return true; |
2640 | } |
2641 | } |
2642 | } |
2643 | return false; |
2644 | } |
2645 | |
2646 | /// isIntS34Immediate - This method tests if value of node given can be |
2647 | /// accurately represented as a sign extension from a 34-bit value. If so, |
2648 | /// this returns true and the immediate. |
2649 | bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) { |
2650 | if (!isa<ConstantSDNode>(N)) |
2651 | return false; |
2652 | |
2653 | Imm = (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); |
2654 | return isInt<34>(Imm); |
2655 | } |
2656 | bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) { |
2657 | return isIntS34Immediate(Op.getNode(), Imm); |
2658 | } |
2659 | |
2660 | /// SelectAddressRegReg - Given the specified addressed, check to see if it |
2661 | /// can be represented as an indexed [r+r] operation. Returns false if it |
2662 | /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is |
2663 | /// non-zero and N can be represented by a base register plus a signed 16-bit |
2664 | /// displacement, make a more precise judgement by checking (displacement % \p |
2665 | /// EncodingAlignment). |
2666 | bool PPCTargetLowering::SelectAddressRegReg( |
2667 | SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG, |
2668 | MaybeAlign EncodingAlignment) const { |
2669 | // If we have a PC Relative target flag don't select as [reg+reg]. It will be |
2670 | // a [pc+imm]. |
2671 | if (SelectAddressPCRel(N, Base)) |
2672 | return false; |
2673 | |
2674 | int16_t Imm = 0; |
2675 | if (N.getOpcode() == ISD::ADD) { |
2676 | // Is there any SPE load/store (f64), which can't handle 16bit offset? |
2677 | // SPE load/store can only handle 8-bit offsets. |
2678 | if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG)) |
2679 | return true; |
2680 | if (isIntS16Immediate(N.getOperand(1), Imm) && |
2681 | (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) |
2682 | return false; // r+i |
2683 | if (N.getOperand(1).getOpcode() == PPCISD::Lo) |
2684 | return false; // r+i |
2685 | |
2686 | Base = N.getOperand(0); |
2687 | Index = N.getOperand(1); |
2688 | return true; |
2689 | } else if (N.getOpcode() == ISD::OR) { |
2690 | if (isIntS16Immediate(N.getOperand(1), Imm) && |
2691 | (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) |
2692 | return false; // r+i can fold it if we can. |
2693 | |
2694 | // If this is an or of disjoint bitfields, we can codegen this as an add |
2695 | // (for better address arithmetic) if the LHS and RHS of the OR are provably |
2696 | // disjoint. |
2697 | KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); |
2698 | |
2699 | if (LHSKnown.Zero.getBoolValue()) { |
2700 | KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1)); |
2701 | // If all of the bits are known zero on the LHS or RHS, the add won't |
2702 | // carry. |
2703 | if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { |
2704 | Base = N.getOperand(0); |
2705 | Index = N.getOperand(1); |
2706 | return true; |
2707 | } |
2708 | } |
2709 | } |
2710 | |
2711 | return false; |
2712 | } |
2713 | |
2714 | // If we happen to be doing an i64 load or store into a stack slot that has |
2715 | // less than a 4-byte alignment, then the frame-index elimination may need to |
2716 | // use an indexed load or store instruction (because the offset may not be a |
2717 | // multiple of 4). The extra register needed to hold the offset comes from the |
2718 | // register scavenger, and it is possible that the scavenger will need to use |
2719 | // an emergency spill slot. As a result, we need to make sure that a spill slot |
2720 | // is allocated when doing an i64 load/store into a less-than-4-byte-aligned |
2721 | // stack slot. |
2722 | static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { |
2723 | // FIXME: This does not handle the LWA case. |
2724 | if (VT != MVT::i64) |
2725 | return; |
2726 | |
2727 | // NOTE: We'll exclude negative FIs here, which come from argument |
2728 | // lowering, because there are no known test cases triggering this problem |
2729 | // using packed structures (or similar). We can remove this exclusion if |
2730 | // we find such a test case. The reason why this is so test-case driven is |
2731 | // because this entire 'fixup' is only to prevent crashes (from the |
2732 | // register scavenger) on not-really-valid inputs. For example, if we have: |
2733 | // %a = alloca i1 |
2734 | // %b = bitcast i1* %a to i64* |
2735 | // store i64* a, i64 b |
2736 | // then the store should really be marked as 'align 1', but is not. If it |
2737 | // were marked as 'align 1' then the indexed form would have been |
2738 | // instruction-selected initially, and the problem this 'fixup' is preventing |
2739 | // won't happen regardless. |
2740 | if (FrameIdx < 0) |
2741 | return; |
2742 | |
2743 | MachineFunction &MF = DAG.getMachineFunction(); |
2744 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
2745 | |
2746 | if (MFI.getObjectAlign(FrameIdx) >= Align(4)) |
2747 | return; |
2748 | |
2749 | PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); |
2750 | FuncInfo->setHasNonRISpills(); |
2751 | } |
2752 | |
2753 | /// Returns true if the address N can be represented by a base register plus |
2754 | /// a signed 16-bit displacement [r+imm], and if it is not better |
2755 | /// represented as reg+reg. If \p EncodingAlignment is non-zero, only accept |
2756 | /// displacements that are multiples of that value. |
2757 | bool PPCTargetLowering::SelectAddressRegImm( |
2758 | SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, |
2759 | MaybeAlign EncodingAlignment) const { |
2760 | // FIXME dl should come from parent load or store, not from address |
2761 | SDLoc dl(N); |
2762 | |
2763 | // If we have a PC Relative target flag don't select as [reg+imm]. It will be |
2764 | // a [pc+imm]. |
2765 | if (SelectAddressPCRel(N, Base)) |
2766 | return false; |
2767 | |
2768 | // If this can be more profitably realized as r+r, fail. |
2769 | if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment)) |
2770 | return false; |
2771 | |
2772 | if (N.getOpcode() == ISD::ADD) { |
2773 | int16_t imm = 0; |
2774 | if (isIntS16Immediate(N.getOperand(1), imm) && |
2775 | (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) { |
2776 | Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); |
2777 | if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { |
2778 | Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); |
2779 | fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); |
2780 | } else { |
2781 | Base = N.getOperand(0); |
2782 | } |
2783 | return true; // [r+i] |
2784 | } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { |
2785 | // Match LOAD (ADD (X, Lo(G))). |
2786 | assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()(static_cast <bool> (!cast<ConstantSDNode>(N.getOperand (1).getOperand(1))->getZExtValue() && "Cannot handle constant offsets yet!" ) ? void (0) : __assert_fail ("!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() && \"Cannot handle constant offsets yet!\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2787, __extension__ __PRETTY_FUNCTION__)) |
2787 | && "Cannot handle constant offsets yet!")(static_cast <bool> (!cast<ConstantSDNode>(N.getOperand (1).getOperand(1))->getZExtValue() && "Cannot handle constant offsets yet!" ) ? void (0) : __assert_fail ("!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() && \"Cannot handle constant offsets yet!\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2787, __extension__ __PRETTY_FUNCTION__)); |
2788 | Disp = N.getOperand(1).getOperand(0); // The global address. |
2789 | assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||(static_cast <bool> (Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode() == ISD::TargetGlobalTLSAddress || Disp.getOpcode () == ISD::TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable ) ? void (0) : __assert_fail ("Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode() == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD::TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2792, __extension__ __PRETTY_FUNCTION__)) |
2790 | Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||(static_cast <bool> (Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode() == ISD::TargetGlobalTLSAddress || Disp.getOpcode () == ISD::TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable ) ? void (0) : __assert_fail ("Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode() == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD::TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2792, __extension__ __PRETTY_FUNCTION__)) |
2791 | Disp.getOpcode() == ISD::TargetConstantPool ||(static_cast <bool> (Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode() == ISD::TargetGlobalTLSAddress || Disp.getOpcode () == ISD::TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable ) ? void (0) : __assert_fail ("Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode() == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD::TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2792, __extension__ __PRETTY_FUNCTION__)) |
2792 | Disp.getOpcode() == ISD::TargetJumpTable)(static_cast <bool> (Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode() == ISD::TargetGlobalTLSAddress || Disp.getOpcode () == ISD::TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable ) ? void (0) : __assert_fail ("Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode() == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD::TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 2792, __extension__ __PRETTY_FUNCTION__)); |
2793 | Base = N.getOperand(0); |
2794 | return true; // [&g+r] |
2795 | } |
2796 | } else if (N.getOpcode() == ISD::OR) { |
2797 | int16_t imm = 0; |
2798 | if (isIntS16Immediate(N.getOperand(1), imm) && |
2799 | (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) { |
2800 | // If this is an or of disjoint bitfields, we can codegen this as an add |
2801 | // (for better address arithmetic) if the LHS and RHS of the OR are |
2802 | // provably disjoint. |
2803 | KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); |
2804 | |
2805 | if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { |
2806 | // If all of the bits are known zero on the LHS or RHS, the add won't |
2807 | // carry. |
2808 | if (FrameIndexSDNode *FI = |
2809 | dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { |
2810 | Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); |
2811 | fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); |
2812 | } else { |
2813 | Base = N.getOperand(0); |
2814 | } |
2815 | Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); |
2816 | return true; |
2817 | } |
2818 | } |
2819 | } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { |
2820 | // Loading from a constant address. |
2821 | |
2822 | // If this address fits entirely in a 16-bit sext immediate field, codegen |
2823 | // this as "d, 0" |
2824 | int16_t Imm; |
2825 | if (isIntS16Immediate(CN, Imm) && |
2826 | (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) { |
2827 | Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); |
2828 | Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, |
2829 | CN->getValueType(0)); |
2830 | return true; |
2831 | } |
2832 | |
2833 | // Handle 32-bit sext immediates with LIS + addr mode. |
2834 | if ((CN->getValueType(0) == MVT::i32 || |
2835 | (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && |
2836 | (!EncodingAlignment || |
2837 | isAligned(*EncodingAlignment, CN->getZExtValue()))) { |
2838 | int Addr = (int)CN->getZExtValue(); |
2839 | |
2840 | // Otherwise, break this down into an LIS + disp. |
2841 | Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); |
2842 | |
2843 | Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, |
2844 | MVT::i32); |
2845 | unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; |
2846 | Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); |
2847 | return true; |
2848 | } |
2849 | } |
2850 | |
2851 | Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); |
2852 | if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { |
2853 | Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); |
2854 | fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); |
2855 | } else |
2856 | Base = N; |
2857 | return true; // [r+0] |
2858 | } |
2859 | |
2860 | /// Similar to the 16-bit case but for instructions that take a 34-bit |
2861 | /// displacement field (prefixed loads/stores). |
2862 | bool PPCTargetLowering::SelectAddressRegImm34(SDValue N, SDValue &Disp, |
2863 | SDValue &Base, |
2864 | SelectionDAG &DAG) const { |
2865 | // Only on 64-bit targets. |
2866 | if (N.getValueType() != MVT::i64) |
2867 | return false; |
2868 | |
2869 | SDLoc dl(N); |
2870 | int64_t Imm = 0; |
2871 | |
2872 | if (N.getOpcode() == ISD::ADD) { |
2873 | if (!isIntS34Immediate(N.getOperand(1), Imm)) |
2874 | return false; |
2875 | Disp = DAG.getTargetConstant(Imm, dl, N.getValueType()); |
2876 | if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) |
2877 | Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); |
2878 | else |
2879 | Base = N.getOperand(0); |
2880 | return true; |
2881 | } |
2882 | |
2883 | if (N.getOpcode() == ISD::OR) { |
2884 | if (!isIntS34Immediate(N.getOperand(1), Imm)) |
2885 | return false; |
2886 | // If this is an or of disjoint bitfields, we can codegen this as an add |
2887 | // (for better address arithmetic) if the LHS and RHS of the OR are |
2888 | // provably disjoint. |
2889 | KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); |
2890 | if ((LHSKnown.Zero.getZExtValue() | ~(uint64_t)Imm) != ~0ULL) |
2891 | return false; |
2892 | if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) |
2893 | Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); |
2894 | else |
2895 | Base = N.getOperand(0); |
2896 | Disp = DAG.getTargetConstant(Imm, dl, N.getValueType()); |
2897 | return true; |
2898 | } |
2899 | |
2900 | if (isIntS34Immediate(N, Imm)) { // If the address is a 34-bit const. |
2901 | Disp = DAG.getTargetConstant(Imm, dl, N.getValueType()); |
2902 | Base = DAG.getRegister(PPC::ZERO8, N.getValueType()); |
2903 | return true; |
2904 | } |
2905 | |
2906 | return false; |
2907 | } |
2908 | |
2909 | /// SelectAddressRegRegOnly - Given the specified addressed, force it to be |
2910 | /// represented as an indexed [r+r] operation. |
2911 | bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, |
2912 | SDValue &Index, |
2913 | SelectionDAG &DAG) const { |
2914 | // Check to see if we can easily represent this as an [r+r] address. This |
2915 | // will fail if it thinks that the address is more profitably represented as |
2916 | // reg+imm, e.g. where imm = 0. |
2917 | if (SelectAddressRegReg(N, Base, Index, DAG)) |
2918 | return true; |
2919 | |
2920 | // If the address is the result of an add, we will utilize the fact that the |
2921 | // address calculation includes an implicit add. However, we can reduce |
2922 | // register pressure if we do not materialize a constant just for use as the |
2923 | // index register. We only get rid of the add if it is not an add of a |
2924 | // value and a 16-bit signed constant and both have a single use. |
2925 | int16_t imm = 0; |
2926 | if (N.getOpcode() == ISD::ADD && |
2927 | (!isIntS16Immediate(N.getOperand(1), imm) || |
2928 | !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) { |
2929 | Base = N.getOperand(0); |
2930 | Index = N.getOperand(1); |
2931 | return true; |
2932 | } |
2933 | |
2934 | // Otherwise, do it the hard way, using R0 as the base register. |
2935 | Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, |
2936 | N.getValueType()); |
2937 | Index = N; |
2938 | return true; |
2939 | } |
2940 | |
2941 | template <typename Ty> static bool isValidPCRelNode(SDValue N) { |
2942 | Ty *PCRelCand = dyn_cast<Ty>(N); |
2943 | return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG); |
2944 | } |
2945 | |
2946 | /// Returns true if this address is a PC Relative address. |
2947 | /// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG |
2948 | /// or if the node opcode is PPCISD::MAT_PCREL_ADDR. |
2949 | bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const { |
2950 | // This is a materialize PC Relative node. Always select this as PC Relative. |
2951 | Base = N; |
2952 | if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR) |
2953 | return true; |
2954 | if (isValidPCRelNode<ConstantPoolSDNode>(N) || |
2955 | isValidPCRelNode<GlobalAddressSDNode>(N) || |
2956 | isValidPCRelNode<JumpTableSDNode>(N) || |
2957 | isValidPCRelNode<BlockAddressSDNode>(N)) |
2958 | return true; |
2959 | return false; |
2960 | } |
2961 | |
2962 | /// Returns true if we should use a direct load into vector instruction |
2963 | /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence. |
2964 | static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) { |
2965 | |
2966 | // If there are any other uses other than scalar to vector, then we should |
2967 | // keep it as a scalar load -> direct move pattern to prevent multiple |
2968 | // loads. |
2969 | LoadSDNode *LD = dyn_cast<LoadSDNode>(N); |
2970 | if (!LD) |
2971 | return false; |
2972 | |
2973 | EVT MemVT = LD->getMemoryVT(); |
2974 | if (!MemVT.isSimple()) |
2975 | return false; |
2976 | switch(MemVT.getSimpleVT().SimpleTy) { |
2977 | case MVT::i64: |
2978 | break; |
2979 | case MVT::i32: |
2980 | if (!ST.hasP8Vector()) |
2981 | return false; |
2982 | break; |
2983 | case MVT::i16: |
2984 | case MVT::i8: |
2985 | if (!ST.hasP9Vector()) |
2986 | return false; |
2987 | break; |
2988 | default: |
2989 | return false; |
2990 | } |
2991 | |
2992 | SDValue LoadedVal(N, 0); |
2993 | if (!LoadedVal.hasOneUse()) |
2994 | return false; |
2995 | |
2996 | for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); |
2997 | UI != UE; ++UI) |
2998 | if (UI.getUse().get().getResNo() == 0 && |
2999 | UI->getOpcode() != ISD::SCALAR_TO_VECTOR && |
3000 | UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED) |
3001 | return false; |
3002 | |
3003 | return true; |
3004 | } |
3005 | |
3006 | /// getPreIndexedAddressParts - returns true by value, base pointer and |
3007 | /// offset pointer and addressing mode by reference if the node's address |
3008 | /// can be legally represented as pre-indexed load / store address. |
3009 | bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, |
3010 | SDValue &Offset, |
3011 | ISD::MemIndexedMode &AM, |
3012 | SelectionDAG &DAG) const { |
3013 | if (DisablePPCPreinc) return false; |
3014 | |
3015 | bool isLoad = true; |
3016 | SDValue Ptr; |
3017 | EVT VT; |
3018 | Align Alignment; |
3019 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { |
3020 | Ptr = LD->getBasePtr(); |
3021 | VT = LD->getMemoryVT(); |
3022 | Alignment = LD->getAlign(); |
3023 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { |
3024 | Ptr = ST->getBasePtr(); |
3025 | VT = ST->getMemoryVT(); |
3026 | Alignment = ST->getAlign(); |
3027 | isLoad = false; |
3028 | } else |
3029 | return false; |
3030 | |
3031 | // Do not generate pre-inc forms for specific loads that feed scalar_to_vector |
3032 | // instructions because we can fold these into a more efficient instruction |
3033 | // instead, (such as LXSD). |
3034 | if (isLoad && usePartialVectorLoads(N, Subtarget)) { |
3035 | return false; |
3036 | } |
3037 | |
3038 | // PowerPC doesn't have preinc load/store instructions for vectors |
3039 | if (VT.isVector()) |
3040 | return false; |
3041 | |
3042 | if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { |
3043 | // Common code will reject creating a pre-inc form if the base pointer |
3044 | // is a frame index, or if N is a store and the base pointer is either |
3045 | // the same as or a predecessor of the value being stored. Check for |
3046 | // those situations here, and try with swapped Base/Offset instead. |
3047 | bool Swap = false; |
3048 | |
3049 | if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) |
3050 | Swap = true; |
3051 | else if (!isLoad) { |
3052 | SDValue Val = cast<StoreSDNode>(N)->getValue(); |
3053 | if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) |
3054 | Swap = true; |
3055 | } |
3056 | |
3057 | if (Swap) |
3058 | std::swap(Base, Offset); |
3059 | |
3060 | AM = ISD::PRE_INC; |
3061 | return true; |
3062 | } |
3063 | |
3064 | // LDU/STU can only handle immediates that are a multiple of 4. |
3065 | if (VT != MVT::i64) { |
3066 | if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, std::nullopt)) |
3067 | return false; |
3068 | } else { |
3069 | // LDU/STU need an address with at least 4-byte alignment. |
3070 | if (Alignment < Align(4)) |
3071 | return false; |
3072 | |
3073 | if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4))) |
3074 | return false; |
3075 | } |
3076 | |
3077 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { |
3078 | // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of |
3079 | // sext i32 to i64 when addr mode is r+i. |
3080 | if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && |
3081 | LD->getExtensionType() == ISD::SEXTLOAD && |
3082 | isa<ConstantSDNode>(Offset)) |
3083 | return false; |
3084 | } |
3085 | |
3086 | AM = ISD::PRE_INC; |
3087 | return true; |
3088 | } |
3089 | |
3090 | //===----------------------------------------------------------------------===// |
3091 | // LowerOperation implementation |
3092 | //===----------------------------------------------------------------------===// |
3093 | |
3094 | /// Return true if we should reference labels using a PICBase, set the HiOpFlags |
3095 | /// and LoOpFlags to the target MO flags. |
3096 | static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, |
3097 | unsigned &HiOpFlags, unsigned &LoOpFlags, |
3098 | const GlobalValue *GV = nullptr) { |
3099 | HiOpFlags = PPCII::MO_HA; |
3100 | LoOpFlags = PPCII::MO_LO; |
3101 | |
3102 | // Don't use the pic base if not in PIC relocation model. |
3103 | if (IsPIC) { |
3104 | HiOpFlags |= PPCII::MO_PIC_FLAG; |
3105 | LoOpFlags |= PPCII::MO_PIC_FLAG; |
3106 | } |
3107 | } |
3108 | |
3109 | static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, |
3110 | SelectionDAG &DAG) { |
3111 | SDLoc DL(HiPart); |
3112 | EVT PtrVT = HiPart.getValueType(); |
3113 | SDValue Zero = DAG.getConstant(0, DL, PtrVT); |
3114 | |
3115 | SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); |
3116 | SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); |
3117 | |
3118 | // With PIC, the first instruction is actually "GR+hi(&G)". |
3119 | if (isPIC) |
3120 | Hi = DAG.getNode(ISD::ADD, DL, PtrVT, |
3121 | DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); |
3122 | |
3123 | // Generate non-pic code that has direct accesses to the constant pool. |
3124 | // The address of the global is just (hi(&g)+lo(&g)). |
3125 | return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); |
3126 | } |
3127 | |
3128 | static void setUsesTOCBasePtr(MachineFunction &MF) { |
3129 | PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); |
3130 | FuncInfo->setUsesTOCBasePtr(); |
3131 | } |
3132 | |
3133 | static void setUsesTOCBasePtr(SelectionDAG &DAG) { |
3134 | setUsesTOCBasePtr(DAG.getMachineFunction()); |
3135 | } |
3136 | |
3137 | SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, |
3138 | SDValue GA) const { |
3139 | const bool Is64Bit = Subtarget.isPPC64(); |
3140 | EVT VT = Is64Bit ? MVT::i64 : MVT::i32; |
3141 | SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) |
3142 | : Subtarget.isAIXABI() |
3143 | ? DAG.getRegister(PPC::R2, VT) |
3144 | : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); |
3145 | SDValue Ops[] = { GA, Reg }; |
3146 | return DAG.getMemIntrinsicNode( |
3147 | PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, |
3148 | MachinePointerInfo::getGOT(DAG.getMachineFunction()), std::nullopt, |
3149 | MachineMemOperand::MOLoad); |
3150 | } |
3151 | |
3152 | SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, |
3153 | SelectionDAG &DAG) const { |
3154 | EVT PtrVT = Op.getValueType(); |
3155 | ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); |
3156 | const Constant *C = CP->getConstVal(); |
3157 | |
3158 | // 64-bit SVR4 ABI and AIX ABI code are always position-independent. |
3159 | // The actual address of the GlobalValue is stored in the TOC. |
3160 | if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { |
3161 | if (Subtarget.isUsingPCRelativeCalls()) { |
3162 | SDLoc DL(CP); |
3163 | EVT Ty = getPointerTy(DAG.getDataLayout()); |
3164 | SDValue ConstPool = DAG.getTargetConstantPool( |
3165 | C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG); |
3166 | return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool); |
3167 | } |
3168 | setUsesTOCBasePtr(DAG); |
3169 | SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0); |
3170 | return getTOCEntry(DAG, SDLoc(CP), GA); |
3171 | } |
3172 | |
3173 | unsigned MOHiFlag, MOLoFlag; |
3174 | bool IsPIC = isPositionIndependent(); |
3175 | getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); |
3176 | |
3177 | if (IsPIC && Subtarget.isSVR4ABI()) { |
3178 | SDValue GA = |
3179 | DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG); |
3180 | return getTOCEntry(DAG, SDLoc(CP), GA); |
3181 | } |
3182 | |
3183 | SDValue CPIHi = |
3184 | DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag); |
3185 | SDValue CPILo = |
3186 | DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag); |
3187 | return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); |
3188 | } |
3189 | |
3190 | // For 64-bit PowerPC, prefer the more compact relative encodings. |
3191 | // This trades 32 bits per jump table entry for one or two instructions |
3192 | // on the jump site. |
3193 | unsigned PPCTargetLowering::getJumpTableEncoding() const { |
3194 | if (isJumpTableRelative()) |
3195 | return MachineJumpTableInfo::EK_LabelDifference32; |
3196 | |
3197 | return TargetLowering::getJumpTableEncoding(); |
3198 | } |
3199 | |
3200 | bool PPCTargetLowering::isJumpTableRelative() const { |
3201 | if (UseAbsoluteJumpTables) |
3202 | return false; |
3203 | if (Subtarget.isPPC64() || Subtarget.isAIXABI()) |
3204 | return true; |
3205 | return TargetLowering::isJumpTableRelative(); |
3206 | } |
3207 | |
3208 | SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, |
3209 | SelectionDAG &DAG) const { |
3210 | if (!Subtarget.isPPC64() || Subtarget.isAIXABI()) |
3211 | return TargetLowering::getPICJumpTableRelocBase(Table, DAG); |
3212 | |
3213 | switch (getTargetMachine().getCodeModel()) { |
3214 | case CodeModel::Small: |
3215 | case CodeModel::Medium: |
3216 | return TargetLowering::getPICJumpTableRelocBase(Table, DAG); |
3217 | default: |
3218 | return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), |
3219 | getPointerTy(DAG.getDataLayout())); |
3220 | } |
3221 | } |
3222 | |
3223 | const MCExpr * |
3224 | PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, |
3225 | unsigned JTI, |
3226 | MCContext &Ctx) const { |
3227 | if (!Subtarget.isPPC64() || Subtarget.isAIXABI()) |
3228 | return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); |
3229 | |
3230 | switch (getTargetMachine().getCodeModel()) { |
3231 | case CodeModel::Small: |
3232 | case CodeModel::Medium: |
3233 | return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); |
3234 | default: |
3235 | return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); |
3236 | } |
3237 | } |
3238 | |
3239 | SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { |
3240 | EVT PtrVT = Op.getValueType(); |
3241 | JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); |
3242 | |
3243 | // isUsingPCRelativeCalls() returns true when PCRelative is enabled |
3244 | if (Subtarget.isUsingPCRelativeCalls()) { |
3245 | SDLoc DL(JT); |
3246 | EVT Ty = getPointerTy(DAG.getDataLayout()); |
3247 | SDValue GA = |
3248 | DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG); |
3249 | SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); |
3250 | return MatAddr; |
3251 | } |
3252 | |
3253 | // 64-bit SVR4 ABI and AIX ABI code are always position-independent. |
3254 | // The actual address of the GlobalValue is stored in the TOC. |
3255 | if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { |
3256 | setUsesTOCBasePtr(DAG); |
3257 | SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); |
3258 | return getTOCEntry(DAG, SDLoc(JT), GA); |
3259 | } |
3260 | |
3261 | unsigned MOHiFlag, MOLoFlag; |
3262 | bool IsPIC = isPositionIndependent(); |
3263 | getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); |
3264 | |
3265 | if (IsPIC && Subtarget.isSVR4ABI()) { |
3266 | SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, |
3267 | PPCII::MO_PIC_FLAG); |
3268 | return getTOCEntry(DAG, SDLoc(GA), GA); |
3269 | } |
3270 | |
3271 | SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); |
3272 | SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); |
3273 | return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); |
3274 | } |
3275 | |
3276 | SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, |
3277 | SelectionDAG &DAG) const { |
3278 | EVT PtrVT = Op.getValueType(); |
3279 | BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); |
3280 | const BlockAddress *BA = BASDN->getBlockAddress(); |
3281 | |
3282 | // isUsingPCRelativeCalls() returns true when PCRelative is enabled |
3283 | if (Subtarget.isUsingPCRelativeCalls()) { |
3284 | SDLoc DL(BASDN); |
3285 | EVT Ty = getPointerTy(DAG.getDataLayout()); |
3286 | SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(), |
3287 | PPCII::MO_PCREL_FLAG); |
3288 | SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); |
3289 | return MatAddr; |
3290 | } |
3291 | |
3292 | // 64-bit SVR4 ABI and AIX ABI code are always position-independent. |
3293 | // The actual BlockAddress is stored in the TOC. |
3294 | if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { |
3295 | setUsesTOCBasePtr(DAG); |
3296 | SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); |
3297 | return getTOCEntry(DAG, SDLoc(BASDN), GA); |
3298 | } |
3299 | |
3300 | // 32-bit position-independent ELF stores the BlockAddress in the .got. |
3301 | if (Subtarget.is32BitELFABI() && isPositionIndependent()) |
3302 | return getTOCEntry( |
3303 | DAG, SDLoc(BASDN), |
3304 | DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset())); |
3305 | |
3306 | unsigned MOHiFlag, MOLoFlag; |
3307 | bool IsPIC = isPositionIndependent(); |
3308 | getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); |
3309 | SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); |
3310 | SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); |
3311 | return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); |
3312 | } |
3313 | |
3314 | SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, |
3315 | SelectionDAG &DAG) const { |
3316 | if (Subtarget.isAIXABI()) |
3317 | return LowerGlobalTLSAddressAIX(Op, DAG); |
3318 | |
3319 | return LowerGlobalTLSAddressLinux(Op, DAG); |
3320 | } |
3321 | |
3322 | SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op, |
3323 | SelectionDAG &DAG) const { |
3324 | GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); |
3325 | |
3326 | if (DAG.getTarget().useEmulatedTLS()) |
3327 | report_fatal_error("Emulated TLS is not yet supported on AIX"); |
3328 | |
3329 | SDLoc dl(GA); |
3330 | const GlobalValue *GV = GA->getGlobal(); |
3331 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3332 | |
3333 | // The general-dynamic model is the only access model supported for now, so |
3334 | // all the GlobalTLSAddress nodes are lowered with this model. |
3335 | // We need to generate two TOC entries, one for the variable offset, one for |
3336 | // the region handle. The global address for the TOC entry of the region |
3337 | // handle is created with the MO_TLSGDM_FLAG flag and the global address |
3338 | // for the TOC entry of the variable offset is created with MO_TLSGD_FLAG. |
3339 | SDValue VariableOffsetTGA = |
3340 | DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSGD_FLAG); |
3341 | SDValue RegionHandleTGA = |
3342 | DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSGDM_FLAG); |
3343 | SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA); |
3344 | SDValue RegionHandle = getTOCEntry(DAG, dl, RegionHandleTGA); |
3345 | return DAG.getNode(PPCISD::TLSGD_AIX, dl, PtrVT, VariableOffset, |
3346 | RegionHandle); |
3347 | } |
3348 | |
3349 | SDValue PPCTargetLowering::LowerGlobalTLSAddressLinux(SDValue Op, |
3350 | SelectionDAG &DAG) const { |
3351 | // FIXME: TLS addresses currently use medium model code sequences, |
3352 | // which is the most useful form. Eventually support for small and |
3353 | // large models could be added if users need it, at the cost of |
3354 | // additional complexity. |
3355 | GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); |
3356 | if (DAG.getTarget().useEmulatedTLS()) |
3357 | return LowerToTLSEmulatedModel(GA, DAG); |
3358 | |
3359 | SDLoc dl(GA); |
3360 | const GlobalValue *GV = GA->getGlobal(); |
3361 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3362 | bool is64bit = Subtarget.isPPC64(); |
3363 | const Module *M = DAG.getMachineFunction().getFunction().getParent(); |
3364 | PICLevel::Level picLevel = M->getPICLevel(); |
3365 | |
3366 | const TargetMachine &TM = getTargetMachine(); |
3367 | TLSModel::Model Model = TM.getTLSModel(GV); |
3368 | |
3369 | if (Model == TLSModel::LocalExec) { |
3370 | if (Subtarget.isUsingPCRelativeCalls()) { |
3371 | SDValue TLSReg = DAG.getRegister(PPC::X13, MVT::i64); |
3372 | SDValue TGA = DAG.getTargetGlobalAddress( |
3373 | GV, dl, PtrVT, 0, (PPCII::MO_PCREL_FLAG | PPCII::MO_TPREL_FLAG)); |
3374 | SDValue MatAddr = |
3375 | DAG.getNode(PPCISD::TLS_LOCAL_EXEC_MAT_ADDR, dl, PtrVT, TGA); |
3376 | return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, MatAddr); |
3377 | } |
3378 | |
3379 | SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, |
3380 | PPCII::MO_TPREL_HA); |
3381 | SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, |
3382 | PPCII::MO_TPREL_LO); |
3383 | SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64) |
3384 | : DAG.getRegister(PPC::R2, MVT::i32); |
3385 | |
3386 | SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); |
3387 | return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); |
3388 | } |
3389 | |
3390 | if (Model == TLSModel::InitialExec) { |
3391 | bool IsPCRel = Subtarget.isUsingPCRelativeCalls(); |
3392 | SDValue TGA = DAG.getTargetGlobalAddress( |
3393 | GV, dl, PtrVT, 0, IsPCRel ? PPCII::MO_GOT_TPREL_PCREL_FLAG : 0); |
3394 | SDValue TGATLS = DAG.getTargetGlobalAddress( |
3395 | GV, dl, PtrVT, 0, |
3396 | IsPCRel ? (PPCII::MO_TLS | PPCII::MO_PCREL_FLAG) : PPCII::MO_TLS); |
3397 | SDValue TPOffset; |
3398 | if (IsPCRel) { |
3399 | SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, dl, PtrVT, TGA); |
3400 | TPOffset = DAG.getLoad(MVT::i64, dl, DAG.getEntryNode(), MatPCRel, |
3401 | MachinePointerInfo()); |
3402 | } else { |
3403 | SDValue GOTPtr; |
3404 | if (is64bit) { |
3405 | setUsesTOCBasePtr(DAG); |
3406 | SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); |
3407 | GOTPtr = |
3408 | DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, PtrVT, GOTReg, TGA); |
3409 | } else { |
3410 | if (!TM.isPositionIndependent()) |
3411 | GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); |
3412 | else if (picLevel == PICLevel::SmallPIC) |
3413 | GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); |
3414 | else |
3415 | GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); |
3416 | } |
3417 | TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, PtrVT, TGA, GOTPtr); |
3418 | } |
3419 | return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); |
3420 | } |
3421 | |
3422 | if (Model == TLSModel::GeneralDynamic) { |
3423 | if (Subtarget.isUsingPCRelativeCalls()) { |
3424 | SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, |
3425 | PPCII::MO_GOT_TLSGD_PCREL_FLAG); |
3426 | return DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA); |
3427 | } |
3428 | |
3429 | SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); |
3430 | SDValue GOTPtr; |
3431 | if (is64bit) { |
3432 | setUsesTOCBasePtr(DAG); |
3433 | SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); |
3434 | GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, |
3435 | GOTReg, TGA); |
3436 | } else { |
3437 | if (picLevel == PICLevel::SmallPIC) |
3438 | GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); |
3439 | else |
3440 | GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); |
3441 | } |
3442 | return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, |
3443 | GOTPtr, TGA, TGA); |
3444 | } |
3445 | |
3446 | if (Model == TLSModel::LocalDynamic) { |
3447 | if (Subtarget.isUsingPCRelativeCalls()) { |
3448 | SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, |
3449 | PPCII::MO_GOT_TLSLD_PCREL_FLAG); |
3450 | SDValue MatPCRel = |
3451 | DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA); |
3452 | return DAG.getNode(PPCISD::PADDI_DTPREL, dl, PtrVT, MatPCRel, TGA); |
3453 | } |
3454 | |
3455 | SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); |
3456 | SDValue GOTPtr; |
3457 | if (is64bit) { |
3458 | setUsesTOCBasePtr(DAG); |
3459 | SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); |
3460 | GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, |
3461 | GOTReg, TGA); |
3462 | } else { |
3463 | if (picLevel == PICLevel::SmallPIC) |
3464 | GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); |
3465 | else |
3466 | GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); |
3467 | } |
3468 | SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, |
3469 | PtrVT, GOTPtr, TGA, TGA); |
3470 | SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, |
3471 | PtrVT, TLSAddr, TGA); |
3472 | return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); |
3473 | } |
3474 | |
3475 | llvm_unreachable("Unknown TLS model!")::llvm::llvm_unreachable_internal("Unknown TLS model!", "llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 3475); |
3476 | } |
3477 | |
3478 | SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, |
3479 | SelectionDAG &DAG) const { |
3480 | EVT PtrVT = Op.getValueType(); |
3481 | GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); |
3482 | SDLoc DL(GSDN); |
3483 | const GlobalValue *GV = GSDN->getGlobal(); |
3484 | |
3485 | // 64-bit SVR4 ABI & AIX ABI code is always position-independent. |
3486 | // The actual address of the GlobalValue is stored in the TOC. |
3487 | if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { |
3488 | if (Subtarget.isUsingPCRelativeCalls()) { |
3489 | EVT Ty = getPointerTy(DAG.getDataLayout()); |
3490 | if (isAccessedAsGotIndirect(Op)) { |
3491 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(), |
3492 | PPCII::MO_PCREL_FLAG | |
3493 | PPCII::MO_GOT_FLAG); |
3494 | SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); |
3495 | SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel, |
3496 | MachinePointerInfo()); |
3497 | return Load; |
3498 | } else { |
3499 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(), |
3500 | PPCII::MO_PCREL_FLAG); |
3501 | return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); |
3502 | } |
3503 | } |
3504 | setUsesTOCBasePtr(DAG); |
3505 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); |
3506 | return getTOCEntry(DAG, DL, GA); |
3507 | } |
3508 | |
3509 | unsigned MOHiFlag, MOLoFlag; |
3510 | bool IsPIC = isPositionIndependent(); |
3511 | getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); |
3512 | |
3513 | if (IsPIC && Subtarget.isSVR4ABI()) { |
3514 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, |
3515 | GSDN->getOffset(), |
3516 | PPCII::MO_PIC_FLAG); |
3517 | return getTOCEntry(DAG, DL, GA); |
3518 | } |
3519 | |
3520 | SDValue GAHi = |
3521 | DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); |
3522 | SDValue GALo = |
3523 | DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); |
3524 | |
3525 | return LowerLabelRef(GAHi, GALo, IsPIC, DAG); |
3526 | } |
3527 | |
3528 | SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { |
3529 | bool IsStrict = Op->isStrictFPOpcode(); |
3530 | ISD::CondCode CC = |
3531 | cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get(); |
3532 | SDValue LHS = Op.getOperand(IsStrict ? 1 : 0); |
3533 | SDValue RHS = Op.getOperand(IsStrict ? 2 : 1); |
3534 | SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); |
3535 | EVT LHSVT = LHS.getValueType(); |
3536 | SDLoc dl(Op); |
3537 | |
3538 | // Soften the setcc with libcall if it is fp128. |
3539 | if (LHSVT == MVT::f128) { |
3540 | assert(!Subtarget.hasP9Vector() &&(static_cast <bool> (!Subtarget.hasP9Vector() && "SETCC for f128 is already legal under Power9!") ? void (0) : __assert_fail ("!Subtarget.hasP9Vector() && \"SETCC for f128 is already legal under Power9!\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 3541, __extension__ __PRETTY_FUNCTION__)) |
3541 | "SETCC for f128 is already legal under Power9!")(static_cast <bool> (!Subtarget.hasP9Vector() && "SETCC for f128 is already legal under Power9!") ? void (0) : __assert_fail ("!Subtarget.hasP9Vector() && \"SETCC for f128 is already legal under Power9!\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 3541, __extension__ __PRETTY_FUNCTION__)); |
3542 | softenSetCCOperands(DAG, LHSVT, LHS, RHS, CC, dl, LHS, RHS, Chain, |
3543 | Op->getOpcode() == ISD::STRICT_FSETCCS); |
3544 | if (RHS.getNode()) |
3545 | LHS = DAG.getNode(ISD::SETCC, dl, Op.getValueType(), LHS, RHS, |
3546 | DAG.getCondCode(CC)); |
3547 | if (IsStrict) |
3548 | return DAG.getMergeValues({LHS, Chain}, dl); |
3549 | return LHS; |
3550 | } |
3551 | |
3552 | assert(!IsStrict && "Don't know how to handle STRICT_FSETCC!")(static_cast <bool> (!IsStrict && "Don't know how to handle STRICT_FSETCC!" ) ? void (0) : __assert_fail ("!IsStrict && \"Don't know how to handle STRICT_FSETCC!\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 3552, __extension__ __PRETTY_FUNCTION__)); |
3553 | |
3554 | if (Op.getValueType() == MVT::v2i64) { |
3555 | // When the operands themselves are v2i64 values, we need to do something |
3556 | // special because VSX has no underlying comparison operations for these. |
3557 | if (LHS.getValueType() == MVT::v2i64) { |
3558 | // Equality can be handled by casting to the legal type for Altivec |
3559 | // comparisons, everything else needs to be expanded. |
3560 | if (CC != ISD::SETEQ && CC != ISD::SETNE) |
3561 | return SDValue(); |
3562 | SDValue SetCC32 = DAG.getSetCC( |
3563 | dl, MVT::v4i32, DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, LHS), |
3564 | DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, RHS), CC); |
3565 | int ShuffV[] = {1, 0, 3, 2}; |
3566 | SDValue Shuff = |
3567 | DAG.getVectorShuffle(MVT::v4i32, dl, SetCC32, SetCC32, ShuffV); |
3568 | return DAG.getBitcast(MVT::v2i64, |
3569 | DAG.getNode(CC == ISD::SETEQ ? ISD::AND : ISD::OR, |
3570 | dl, MVT::v4i32, Shuff, SetCC32)); |
3571 | } |
3572 | |
3573 | // We handle most of these in the usual way. |
3574 | return Op; |
3575 | } |
3576 | |
3577 | // If we're comparing for equality to zero, expose the fact that this is |
3578 | // implemented as a ctlz/srl pair on ppc, so that the dag combiner can |
3579 | // fold the new nodes. |
3580 | if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) |
3581 | return V; |
3582 | |
3583 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { |
3584 | // Leave comparisons against 0 and -1 alone for now, since they're usually |
3585 | // optimized. FIXME: revisit this when we can custom lower all setcc |
3586 | // optimizations. |
3587 | if (C->isAllOnes() || C->isZero()) |
3588 | return SDValue(); |
3589 | } |
3590 | |
3591 | // If we have an integer seteq/setne, turn it into a compare against zero |
3592 | // by xor'ing the rhs with the lhs, which is faster than setting a |
3593 | // condition register, reading it back out, and masking the correct bit. The |
3594 | // normal approach here uses sub to do this instead of xor. Using xor exposes |
3595 | // the result to other bit-twiddling opportunities. |
3596 | if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { |
3597 | EVT VT = Op.getValueType(); |
3598 | SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, LHS, RHS); |
3599 | return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); |
3600 | } |
3601 | return SDValue(); |
3602 | } |
3603 | |
3604 | SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { |
3605 | SDNode *Node = Op.getNode(); |
3606 | EVT VT = Node->getValueType(0); |
3607 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3608 | SDValue InChain = Node->getOperand(0); |
3609 | SDValue VAListPtr = Node->getOperand(1); |
3610 | const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); |
3611 | SDLoc dl(Node); |
3612 | |
3613 | assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only")(static_cast <bool> (!Subtarget.isPPC64() && "LowerVAARG is PPC32 only" ) ? void (0) : __assert_fail ("!Subtarget.isPPC64() && \"LowerVAARG is PPC32 only\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 3613, __extension__ __PRETTY_FUNCTION__)); |
3614 | |
3615 | // gpr_index |
3616 | SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, |
3617 | VAListPtr, MachinePointerInfo(SV), MVT::i8); |
3618 | InChain = GprIndex.getValue(1); |
3619 | |
3620 | if (VT == MVT::i64) { |
3621 | // Check if GprIndex is even |
3622 | SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, |
3623 | DAG.getConstant(1, dl, MVT::i32)); |
3624 | SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, |
3625 | DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); |
3626 | SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, |
3627 | DAG.getConstant(1, dl, MVT::i32)); |
3628 | // Align GprIndex to be even if it isn't |
3629 | GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, |
3630 | GprIndex); |
3631 | } |
3632 | |
3633 | // fpr index is 1 byte after gpr |
3634 | SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, |
3635 | DAG.getConstant(1, dl, MVT::i32)); |
3636 | |
3637 | // fpr |
3638 | SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, |
3639 | FprPtr, MachinePointerInfo(SV), MVT::i8); |
3640 | InChain = FprIndex.getValue(1); |
3641 | |
3642 | SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, |
3643 | DAG.getConstant(8, dl, MVT::i32)); |
3644 | |
3645 | SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, |
3646 | DAG.getConstant(4, dl, MVT::i32)); |
3647 | |
3648 | // areas |
3649 | SDValue OverflowArea = |
3650 | DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); |
3651 | InChain = OverflowArea.getValue(1); |
3652 | |
3653 | SDValue RegSaveArea = |
3654 | DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); |
3655 | InChain = RegSaveArea.getValue(1); |
3656 | |
3657 | // select overflow_area if index > 8 |
3658 | SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, |
3659 | DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); |
3660 | |
3661 | // adjustment constant gpr_index * 4/8 |
3662 | SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, |
3663 | VT.isInteger() ? GprIndex : FprIndex, |
3664 | DAG.getConstant(VT.isInteger() ? 4 : 8, dl, |
3665 | MVT::i32)); |
3666 | |
3667 | // OurReg = RegSaveArea + RegConstant |
3668 | SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, |
3669 | RegConstant); |
3670 | |
3671 | // Floating types are 32 bytes into RegSaveArea |
3672 | if (VT.isFloatingPoint()) |
3673 | OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, |
3674 | DAG.getConstant(32, dl, MVT::i32)); |
3675 | |
3676 | // increase {f,g}pr_index by 1 (or 2 if VT is i64) |
3677 | SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, |
3678 | VT.isInteger() ? GprIndex : FprIndex, |
3679 | DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, |
3680 | MVT::i32)); |
3681 | |
3682 | InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, |
3683 | VT.isInteger() ? VAListPtr : FprPtr, |
3684 | MachinePointerInfo(SV), MVT::i8); |
3685 | |
3686 | // determine if we should load from reg_save_area or overflow_area |
3687 | SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); |
3688 | |
3689 | // increase overflow_area by 4/8 if gpr/fpr > 8 |
3690 | SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, |
3691 | DAG.getConstant(VT.isInteger() ? 4 : 8, |
3692 | dl, MVT::i32)); |
3693 | |
3694 | OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, |
3695 | OverflowAreaPlusN); |
3696 | |
3697 | InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, |
3698 | MachinePointerInfo(), MVT::i32); |
3699 | |
3700 | return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); |
3701 | } |
3702 | |
3703 | SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { |
3704 | assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only")(static_cast <bool> (!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only" ) ? void (0) : __assert_fail ("!Subtarget.isPPC64() && \"LowerVACOPY is PPC32 only\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 3704, __extension__ __PRETTY_FUNCTION__)); |
3705 | |
3706 | // We have to copy the entire va_list struct: |
3707 | // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte |
3708 | return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2), |
3709 | DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8), |
3710 | false, true, false, MachinePointerInfo(), |
3711 | MachinePointerInfo()); |
3712 | } |
3713 | |
3714 | SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, |
3715 | SelectionDAG &DAG) const { |
3716 | if (Subtarget.isAIXABI()) |
3717 | report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX."); |
3718 | |
3719 | return Op.getOperand(0); |
3720 | } |
3721 | |
3722 | SDValue PPCTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const { |
3723 | MachineFunction &MF = DAG.getMachineFunction(); |
3724 | PPCFunctionInfo &MFI = *MF.getInfo<PPCFunctionInfo>(); |
3725 | |
3726 | assert((Op.getOpcode() == ISD::INLINEASM ||(static_cast <bool> ((Op.getOpcode() == ISD::INLINEASM || Op.getOpcode() == ISD::INLINEASM_BR) && "Expecting Inline ASM node." ) ? void (0) : __assert_fail ("(Op.getOpcode() == ISD::INLINEASM || Op.getOpcode() == ISD::INLINEASM_BR) && \"Expecting Inline ASM node.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 3728, __extension__ __PRETTY_FUNCTION__)) |
3727 | Op.getOpcode() == ISD::INLINEASM_BR) &&(static_cast <bool> ((Op.getOpcode() == ISD::INLINEASM || Op.getOpcode() == ISD::INLINEASM_BR) && "Expecting Inline ASM node." ) ? void (0) : __assert_fail ("(Op.getOpcode() == ISD::INLINEASM || Op.getOpcode() == ISD::INLINEASM_BR) && \"Expecting Inline ASM node.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 3728, __extension__ __PRETTY_FUNCTION__)) |
3728 | "Expecting Inline ASM node.")(static_cast <bool> ((Op.getOpcode() == ISD::INLINEASM || Op.getOpcode() == ISD::INLINEASM_BR) && "Expecting Inline ASM node." ) ? void (0) : __assert_fail ("(Op.getOpcode() == ISD::INLINEASM || Op.getOpcode() == ISD::INLINEASM_BR) && \"Expecting Inline ASM node.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 3728, __extension__ __PRETTY_FUNCTION__)); |
3729 | |
3730 | // If an LR store is already known to be required then there is not point in |
3731 | // checking this ASM as well. |
3732 | if (MFI.isLRStoreRequired()) |
3733 | return Op; |
3734 | |
3735 | // Inline ASM nodes have an optional last operand that is an incoming Flag of |
3736 | // type MVT::Glue. We want to ignore this last operand if that is the case. |
3737 | unsigned NumOps = Op.getNumOperands(); |
3738 | if (Op.getOperand(NumOps - 1).getValueType() == MVT::Glue) |
3739 | --NumOps; |
3740 | |
3741 | // Check all operands that may contain the LR. |
3742 | for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { |
3743 | unsigned Flags = cast<ConstantSDNode>(Op.getOperand(i))->getZExtValue(); |
3744 | unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); |
3745 | ++i; // Skip the ID value. |
3746 | |
3747 | switch (InlineAsm::getKind(Flags)) { |
3748 | default: |
3749 | llvm_unreachable("Bad flags!")::llvm::llvm_unreachable_internal("Bad flags!", "llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 3749); |
3750 | case InlineAsm::Kind_RegUse: |
3751 | case InlineAsm::Kind_Imm: |
3752 | case InlineAsm::Kind_Mem: |
3753 | i += NumVals; |
3754 | break; |
3755 | case InlineAsm::Kind_Clobber: |
3756 | case InlineAsm::Kind_RegDef: |
3757 | case InlineAsm::Kind_RegDefEarlyClobber: { |
3758 | for (; NumVals; --NumVals, ++i) { |
3759 | Register Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg(); |
3760 | if (Reg != PPC::LR && Reg != PPC::LR8) |
3761 | continue; |
3762 | MFI.setLRStoreRequired(); |
3763 | return Op; |
3764 | } |
3765 | break; |
3766 | } |
3767 | } |
3768 | } |
3769 | |
3770 | return Op; |
3771 | } |
3772 | |
3773 | SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, |
3774 | SelectionDAG &DAG) const { |
3775 | if (Subtarget.isAIXABI()) |
3776 | report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX."); |
3777 | |
3778 | SDValue Chain = Op.getOperand(0); |
3779 | SDValue Trmp = Op.getOperand(1); // trampoline |
3780 | SDValue FPtr = Op.getOperand(2); // nested function |
3781 | SDValue Nest = Op.getOperand(3); // 'nest' parameter value |
3782 | SDLoc dl(Op); |
3783 | |
3784 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3785 | bool isPPC64 = (PtrVT == MVT::i64); |
3786 | Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); |
3787 | |
3788 | TargetLowering::ArgListTy Args; |
3789 | TargetLowering::ArgListEntry Entry; |
3790 | |
3791 | Entry.Ty = IntPtrTy; |
3792 | Entry.Node = Trmp; Args.push_back(Entry); |
3793 | |
3794 | // TrampSize == (isPPC64 ? 48 : 40); |
3795 | Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, |
3796 | isPPC64 ? MVT::i64 : MVT::i32); |
3797 | Args.push_back(Entry); |
3798 | |
3799 | Entry.Node = FPtr; Args.push_back(Entry); |
3800 | Entry.Node = Nest; Args.push_back(Entry); |
3801 | |
3802 | // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) |
3803 | TargetLowering::CallLoweringInfo CLI(DAG); |
3804 | CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( |
3805 | CallingConv::C, Type::getVoidTy(*DAG.getContext()), |
3806 | DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); |
3807 | |
3808 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
3809 | return CallResult.second; |
3810 | } |
3811 | |
3812 | SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { |
3813 | MachineFunction &MF = DAG.getMachineFunction(); |
3814 | PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); |
3815 | EVT PtrVT = getPointerTy(MF.getDataLayout()); |
3816 | |
3817 | SDLoc dl(Op); |
3818 | |
3819 | if (Subtarget.isPPC64() || Subtarget.isAIXABI()) { |
3820 | // vastart just stores the address of the VarArgsFrameIndex slot into the |
3821 | // memory location argument. |
3822 | SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); |
3823 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); |
3824 | return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), |
3825 | MachinePointerInfo(SV)); |
3826 | } |
3827 | |
3828 | // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. |
3829 | // We suppose the given va_list is already allocated. |
3830 | // |
3831 | // typedef struct { |
3832 | // char gpr; /* index into the array of 8 GPRs |
3833 | // * stored in the register save area |
3834 | // * gpr=0 corresponds to r3, |
3835 | // * gpr=1 to r4, etc. |
3836 | // */ |
3837 | // char fpr; /* index into the array of 8 FPRs |
3838 | // * stored in the register save area |
3839 | // * fpr=0 corresponds to f1, |
3840 | // * fpr=1 to f2, etc. |
3841 | // */ |
3842 | // char *overflow_arg_area; |
3843 | // /* location on stack that holds |
3844 | // * the next overflow argument |
3845 | // */ |
3846 | // char *reg_save_area; |
3847 | // /* where r3:r10 and f1:f8 (if saved) |
3848 | // * are stored |
3849 | // */ |
3850 | // } va_list[1]; |
3851 | |
3852 | SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); |
3853 | SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); |
3854 | SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), |
3855 | PtrVT); |
3856 | SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), |
3857 | PtrVT); |
3858 | |
3859 | uint64_t FrameOffset = PtrVT.getSizeInBits()/8; |
3860 | SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); |
3861 | |
3862 | uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; |
3863 | SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); |
3864 | |
3865 | uint64_t FPROffset = 1; |
3866 | SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); |
3867 | |
3868 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); |
3869 | |
3870 | // Store first byte : number of int regs |
3871 | SDValue firstStore = |
3872 | DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), |
3873 | MachinePointerInfo(SV), MVT::i8); |
3874 | uint64_t nextOffset = FPROffset; |
3875 | SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), |
3876 | ConstFPROffset); |
3877 | |
3878 | // Store second byte : number of float regs |
3879 | SDValue secondStore = |
3880 | DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, |
3881 | MachinePointerInfo(SV, nextOffset), MVT::i8); |
3882 | nextOffset += StackOffset; |
3883 | nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); |
3884 | |
3885 | // Store second word : arguments given on stack |
3886 | SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, |
3887 | MachinePointerInfo(SV, nextOffset)); |
3888 | nextOffset += FrameOffset; |
3889 | nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); |
3890 | |
3891 | // Store third word : arguments given in registers |
3892 | return DAG.getStore(thirdStore, dl, FR, nextPtr, |
3893 | MachinePointerInfo(SV, nextOffset)); |
3894 | } |
3895 | |
3896 | /// FPR - The set of FP registers that should be allocated for arguments |
3897 | /// on Darwin and AIX. |
3898 | static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, |
3899 | PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, |
3900 | PPC::F11, PPC::F12, PPC::F13}; |
3901 | |
3902 | /// CalculateStackSlotSize - Calculates the size reserved for this argument on |
3903 | /// the stack. |
3904 | static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, |
3905 | unsigned PtrByteSize) { |
3906 | unsigned ArgSize = ArgVT.getStoreSize(); |
3907 | if (Flags.isByVal()) |
3908 | ArgSize = Flags.getByValSize(); |
3909 | |
3910 | // Round up to multiples of the pointer size, except for array members, |
3911 | // which are always packed. |
3912 | if (!Flags.isInConsecutiveRegs()) |
3913 | ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; |
3914 | |
3915 | return ArgSize; |
3916 | } |
3917 | |
3918 | /// CalculateStackSlotAlignment - Calculates the alignment of this argument |
3919 | /// on the stack. |
3920 | static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, |
3921 | ISD::ArgFlagsTy Flags, |
3922 | unsigned PtrByteSize) { |
3923 | Align Alignment(PtrByteSize); |
3924 | |
3925 | // Altivec parameters are padded to a 16 byte boundary. |
3926 | if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || |
3927 | ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || |
3928 | ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || |
3929 | ArgVT == MVT::v1i128 || ArgVT == MVT::f128) |
3930 | Alignment = Align(16); |
3931 | |
3932 | // ByVal parameters are aligned as requested. |
3933 | if (Flags.isByVal()) { |
3934 | auto BVAlign = Flags.getNonZeroByValAlign(); |
3935 | if (BVAlign > PtrByteSize) { |
3936 | if (BVAlign.value() % PtrByteSize != 0) |
3937 | llvm_unreachable(::llvm::llvm_unreachable_internal("ByVal alignment is not a multiple of the pointer size" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 3938) |
3938 | "ByVal alignment is not a multiple of the pointer size")::llvm::llvm_unreachable_internal("ByVal alignment is not a multiple of the pointer size" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 3938); |
3939 | |
3940 | Alignment = BVAlign; |
3941 | } |
3942 | } |
3943 | |
3944 | // Array members are always packed to their original alignment. |
3945 | if (Flags.isInConsecutiveRegs()) { |
3946 | // If the array member was split into multiple registers, the first |
3947 | // needs to be aligned to the size of the full type. (Except for |
3948 | // ppcf128, which is only aligned as its f64 components.) |
3949 | if (Flags.isSplit() && OrigVT != MVT::ppcf128) |
3950 | Alignment = Align(OrigVT.getStoreSize()); |
3951 | else |
3952 | Alignment = Align(ArgVT.getStoreSize()); |
3953 | } |
3954 | |
3955 | return Alignment; |
3956 | } |
3957 | |
3958 | /// CalculateStackSlotUsed - Return whether this argument will use its |
3959 | /// stack slot (instead of being passed in registers). ArgOffset, |
3960 | /// AvailableFPRs, and AvailableVRs must hold the current argument |
3961 | /// position, and will be updated to account for this argument. |
3962 | static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, |
3963 | unsigned PtrByteSize, unsigned LinkageSize, |
3964 | unsigned ParamAreaSize, unsigned &ArgOffset, |
3965 | unsigned &AvailableFPRs, |
3966 | unsigned &AvailableVRs) { |
3967 | bool UseMemory = false; |
3968 | |
3969 | // Respect alignment of argument on the stack. |
3970 | Align Alignment = |
3971 | CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); |
3972 | ArgOffset = alignTo(ArgOffset, Alignment); |
3973 | // If there's no space left in the argument save area, we must |
3974 | // use memory (this check also catches zero-sized arguments). |
3975 | if (ArgOffset >= LinkageSize + ParamAreaSize) |
3976 | UseMemory = true; |
3977 | |
3978 | // Allocate argument on the stack. |
3979 | ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); |
3980 | if (Flags.isInConsecutiveRegsLast()) |
3981 | ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; |
3982 | // If we overran the argument save area, we must use memory |
3983 | // (this check catches arguments passed partially in memory) |
3984 | if (ArgOffset > LinkageSize + ParamAreaSize) |
3985 | UseMemory = true; |
3986 | |
3987 | // However, if the argument is actually passed in an FPR or a VR, |
3988 | // we don't use memory after all. |
3989 | if (!Flags.isByVal()) { |
3990 | if (ArgVT == MVT::f32 || ArgVT == MVT::f64) |
3991 | if (AvailableFPRs > 0) { |
3992 | --AvailableFPRs; |
3993 | return false; |
3994 | } |
3995 | if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || |
3996 | ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || |
3997 | ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || |
3998 | ArgVT == MVT::v1i128 || ArgVT == MVT::f128) |
3999 | if (AvailableVRs > 0) { |
4000 | --AvailableVRs; |
4001 | return false; |
4002 | } |
4003 | } |
4004 | |
4005 | return UseMemory; |
4006 | } |
4007 | |
4008 | /// EnsureStackAlignment - Round stack frame size up from NumBytes to |
4009 | /// ensure minimum alignment required for target. |
4010 | static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, |
4011 | unsigned NumBytes) { |
4012 | return alignTo(NumBytes, Lowering->getStackAlign()); |
4013 | } |
4014 | |
4015 | SDValue PPCTargetLowering::LowerFormalArguments( |
4016 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
4017 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
4018 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
4019 | if (Subtarget.isAIXABI()) |
4020 | return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG, |
4021 | InVals); |
4022 | if (Subtarget.is64BitELFABI()) |
4023 | return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, |
4024 | InVals); |
4025 | assert(Subtarget.is32BitELFABI())(static_cast <bool> (Subtarget.is32BitELFABI()) ? void ( 0) : __assert_fail ("Subtarget.is32BitELFABI()", "llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 4025, __extension__ __PRETTY_FUNCTION__)); |
4026 | return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, |
4027 | InVals); |
4028 | } |
4029 | |
4030 | SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( |
4031 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
4032 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
4033 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
4034 | |
4035 | // 32-bit SVR4 ABI Stack Frame Layout: |
4036 | // +-----------------------------------+ |
4037 | // +--> | Back chain | |
4038 | // | +-----------------------------------+ |
4039 | // | | Floating-point register save area | |
4040 | // | +-----------------------------------+ |
4041 | // | | General register save area | |
4042 | // | +-----------------------------------+ |
4043 | // | | CR save word | |
4044 | // | +-----------------------------------+ |
4045 | // | | VRSAVE save word | |
4046 | // | +-----------------------------------+ |
4047 | // | | Alignment padding | |
4048 | // | +-----------------------------------+ |
4049 | // | | Vector register save area | |
4050 | // | +-----------------------------------+ |
4051 | // | | Local variable space | |
4052 | // | +-----------------------------------+ |
4053 | // | | Parameter list area | |
4054 | // | +-----------------------------------+ |
4055 | // | | LR save word | |
4056 | // | +-----------------------------------+ |
4057 | // SP--> +--- | Back chain | |
4058 | // +-----------------------------------+ |
4059 | // |
4060 | // Specifications: |
4061 | // System V Application Binary Interface PowerPC Processor Supplement |
4062 | // AltiVec Technology Programming Interface Manual |
4063 | |
4064 | MachineFunction &MF = DAG.getMachineFunction(); |
4065 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
4066 | PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); |
4067 | |
4068 | EVT PtrVT = getPointerTy(MF.getDataLayout()); |
4069 | // Potential tail calls could cause overwriting of argument stack slots. |
4070 | bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && |
4071 | (CallConv == CallingConv::Fast)); |
4072 | const Align PtrAlign(4); |
4073 | |
4074 | // Assign locations to all of the incoming arguments. |
4075 | SmallVector<CCValAssign, 16> ArgLocs; |
4076 | PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
4077 | *DAG.getContext()); |
4078 | |
4079 | // Reserve space for the linkage area on the stack. |
4080 | unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); |
4081 | CCInfo.AllocateStack(LinkageSize, PtrAlign); |
4082 | if (useSoftFloat()) |
4083 | CCInfo.PreAnalyzeFormalArguments(Ins); |
4084 | |
4085 | CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); |
4086 | CCInfo.clearWasPPCF128(); |
4087 | |
4088 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
4089 | CCValAssign &VA = ArgLocs[i]; |
4090 | |
4091 | // Arguments stored in registers. |
4092 | if (VA.isRegLoc()) { |
4093 | const TargetRegisterClass *RC; |
4094 | EVT ValVT = VA.getValVT(); |
4095 | |
4096 | switch (ValVT.getSimpleVT().SimpleTy) { |
4097 | default: |
4098 | llvm_unreachable("ValVT not supported by formal arguments Lowering")::llvm::llvm_unreachable_internal("ValVT not supported by formal arguments Lowering" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 4098); |
4099 | case MVT::i1: |
4100 | case MVT::i32: |
4101 | RC = &PPC::GPRCRegClass; |
4102 | break; |
4103 | case MVT::f32: |
4104 | if (Subtarget.hasP8Vector()) |
4105 | RC = &PPC::VSSRCRegClass; |
4106 | else if (Subtarget.hasSPE()) |
4107 | RC = &PPC::GPRCRegClass; |
4108 | else |
4109 | RC = &PPC::F4RCRegClass; |
4110 | break; |
4111 | case MVT::f64: |
4112 | if (Subtarget.hasVSX()) |
4113 | RC = &PPC::VSFRCRegClass; |
4114 | else if (Subtarget.hasSPE()) |
4115 | // SPE passes doubles in GPR pairs. |
4116 | RC = &PPC::GPRCRegClass; |
4117 | else |
4118 | RC = &PPC::F8RCRegClass; |
4119 | break; |
4120 | case MVT::v16i8: |
4121 | case MVT::v8i16: |
4122 | case MVT::v4i32: |
4123 | RC = &PPC::VRRCRegClass; |
4124 | break; |
4125 | case MVT::v4f32: |
4126 | RC = &PPC::VRRCRegClass; |
4127 | break; |
4128 | case MVT::v2f64: |
4129 | case MVT::v2i64: |
4130 | RC = &PPC::VRRCRegClass; |
4131 | break; |
4132 | } |
4133 | |
4134 | SDValue ArgValue; |
4135 | // Transform the arguments stored in physical registers into |
4136 | // virtual ones. |
4137 | if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) { |
4138 | assert(i + 1 < e && "No second half of double precision argument")(static_cast <bool> (i + 1 < e && "No second half of double precision argument" ) ? void (0) : __assert_fail ("i + 1 < e && \"No second half of double precision argument\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 4138, __extension__ __PRETTY_FUNCTION__)); |
4139 | Register RegLo = MF.addLiveIn(VA.getLocReg(), RC); |
4140 | Register RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC); |
4141 | SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32); |
4142 | SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32); |
4143 | if (!Subtarget.isLittleEndian()) |
4144 | std::swap (ArgValueLo, ArgValueHi); |
4145 | ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo, |
4146 | ArgValueHi); |
4147 | } else { |
4148 | Register Reg = MF.addLiveIn(VA.getLocReg(), RC); |
4149 | ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, |
4150 | ValVT == MVT::i1 ? MVT::i32 : ValVT); |
4151 | if (ValVT == MVT::i1) |
4152 | ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); |
4153 | } |
4154 | |
4155 | InVals.push_back(ArgValue); |
4156 | } else { |
4157 | // Argument stored in memory. |
4158 | assert(VA.isMemLoc())(static_cast <bool> (VA.isMemLoc()) ? void (0) : __assert_fail ("VA.isMemLoc()", "llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 4158, __extension__ __PRETTY_FUNCTION__)); |
4159 | |
4160 | // Get the extended size of the argument type in stack |
4161 | unsigned ArgSize = VA.getLocVT().getStoreSize(); |
4162 | // Get the actual size of the argument type |
4163 | unsigned ObjSize = VA.getValVT().getStoreSize(); |
4164 | unsigned ArgOffset = VA.getLocMemOffset(); |
4165 | // Stack objects in PPC32 are right justified. |
4166 | ArgOffset += ArgSize - ObjSize; |
4167 | int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable); |
4168 | |
4169 | // Create load nodes to retrieve arguments from the stack. |
4170 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); |
4171 | InVals.push_back( |
4172 | DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); |
4173 | } |
4174 | } |
4175 | |
4176 | // Assign locations to all of the incoming aggregate by value arguments. |
4177 | // Aggregates passed by value are stored in the local variable space of the |
4178 | // caller's stack frame, right above the parameter list area. |
4179 | SmallVector<CCValAssign, 16> ByValArgLocs; |
4180 | CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), |
4181 | ByValArgLocs, *DAG.getContext()); |
4182 | |
4183 | // Reserve stack space for the allocations in CCInfo. |
4184 | CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign); |
4185 | |
4186 | CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); |
4187 | |
4188 | // Area that is at least reserved in the caller of this function. |
4189 | unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); |
4190 | MinReservedArea = std::max(MinReservedArea, LinkageSize); |
4191 | |
4192 | // Set the size that is at least reserved in caller of this function. Tail |
4193 | // call optimized function's reserved stack space needs to be aligned so that |
4194 | // taking the difference between two stack areas will result in an aligned |
4195 | // stack. |
4196 | MinReservedArea = |
4197 | EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); |
4198 | FuncInfo->setMinReservedArea(MinReservedArea); |
4199 | |
4200 | SmallVector<SDValue, 8> MemOps; |
4201 | |
4202 | // If the function takes variable number of arguments, make a frame index for |
4203 | // the start of the first vararg value... for expansion of llvm.va_start. |
4204 | if (isVarArg) { |
4205 | static const MCPhysReg GPArgRegs[] = { |
4206 | PPC::R3, PPC::R4, PPC::R5, PPC::R6, |
4207 | PPC::R7, PPC::R8, PPC::R9, PPC::R10, |
4208 | }; |
4209 | const unsigned NumGPArgRegs = std::size(GPArgRegs); |
4210 | |
4211 | static const MCPhysReg FPArgRegs[] = { |
4212 | PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, |
4213 | PPC::F8 |
4214 | }; |
4215 | unsigned NumFPArgRegs = std::size(FPArgRegs); |
4216 | |
4217 | if (useSoftFloat() || hasSPE()) |
4218 | NumFPArgRegs = 0; |
4219 | |
4220 | FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); |
4221 | FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); |
4222 | |
4223 | // Make room for NumGPArgRegs and NumFPArgRegs. |
4224 | int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + |
4225 | NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; |
4226 | |
4227 | FuncInfo->setVarArgsStackOffset( |
4228 | MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, |
4229 | CCInfo.getNextStackOffset(), true)); |
4230 | |
4231 | FuncInfo->setVarArgsFrameIndex( |
4232 | MFI.CreateStackObject(Depth, Align(8), false)); |
4233 | SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); |
4234 | |
4235 | // The fixed integer arguments of a variadic function are stored to the |
4236 | // VarArgsFrameIndex on the stack so that they may be loaded by |
4237 | // dereferencing the result of va_next. |
4238 | for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { |
4239 | // Get an existing live-in vreg, or add a new one. |
4240 | Register VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); |
4241 | if (!VReg) |
4242 | VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); |
4243 | |
4244 | SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); |
4245 | SDValue Store = |
4246 | DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); |
4247 | MemOps.push_back(Store); |
4248 | // Increment the address by four for the next argument to store |
4249 | SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); |
4250 | FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); |
4251 | } |
4252 | |
4253 | // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 |
4254 | // is set. |
4255 | // The double arguments are stored to the VarArgsFrameIndex |
4256 | // on the stack. |
4257 | for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { |
4258 | // Get an existing live-in vreg, or add a new one. |
4259 | Register VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); |
4260 | if (!VReg) |
4261 | VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); |
4262 | |
4263 | SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); |
4264 | SDValue Store = |
4265 | DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); |
4266 | MemOps.push_back(Store); |
4267 | // Increment the address by eight for the next argument to store |
4268 | SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, |
4269 | PtrVT); |
4270 | FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); |
4271 | } |
4272 | } |
4273 | |
4274 | if (!MemOps.empty()) |
4275 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); |
4276 | |
4277 | return Chain; |
4278 | } |
4279 | |
4280 | // PPC64 passes i8, i16, and i32 values in i64 registers. Promote |
4281 | // value to MVT::i64 and then truncate to the correct register size. |
4282 | SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, |
4283 | EVT ObjectVT, SelectionDAG &DAG, |
4284 | SDValue ArgVal, |
4285 | const SDLoc &dl) const { |
4286 | if (Flags.isSExt()) |
4287 | ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, |
4288 | DAG.getValueType(ObjectVT)); |
4289 | else if (Flags.isZExt()) |
4290 | ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, |
4291 | DAG.getValueType(ObjectVT)); |
4292 | |
4293 | return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); |
4294 | } |
4295 | |
4296 | SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( |
4297 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
4298 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
4299 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
4300 | // TODO: add description of PPC stack frame format, or at least some docs. |
4301 | // |
4302 | bool isELFv2ABI = Subtarget.isELFv2ABI(); |
4303 | bool isLittleEndian = Subtarget.isLittleEndian(); |
4304 | MachineFunction &MF = DAG.getMachineFunction(); |
4305 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
4306 | PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); |
4307 | |
4308 | assert(!(CallConv == CallingConv::Fast && isVarArg) &&(static_cast <bool> (!(CallConv == CallingConv::Fast && isVarArg) && "fastcc not supported on varargs functions" ) ? void (0) : __assert_fail ("!(CallConv == CallingConv::Fast && isVarArg) && \"fastcc not supported on varargs functions\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 4309, __extension__ __PRETTY_FUNCTION__)) |
4309 | "fastcc not supported on varargs functions")(static_cast <bool> (!(CallConv == CallingConv::Fast && isVarArg) && "fastcc not supported on varargs functions" ) ? void (0) : __assert_fail ("!(CallConv == CallingConv::Fast && isVarArg) && \"fastcc not supported on varargs functions\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 4309, __extension__ __PRETTY_FUNCTION__)); |
4310 | |
4311 | EVT PtrVT = getPointerTy(MF.getDataLayout()); |
4312 | // Potential tail calls could cause overwriting of argument stack slots. |
4313 | bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && |
4314 | (CallConv == CallingConv::Fast)); |
4315 | unsigned PtrByteSize = 8; |
4316 | unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); |
4317 | |
4318 | static const MCPhysReg GPR[] = { |
4319 | PPC::X3, PPC::X4, PPC::X5, PPC::X6, |
4320 | PPC::X7, PPC::X8, PPC::X9, PPC::X10, |
4321 | }; |
4322 | static const MCPhysReg VR[] = { |
4323 | PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, |
4324 | PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 |
4325 | }; |
4326 | |
4327 | const unsigned Num_GPR_Regs = std::size(GPR); |
4328 | const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; |
4329 | const unsigned Num_VR_Regs = std::size(VR); |
4330 | |
4331 | // Do a first pass over the arguments to determine whether the ABI |
4332 | // guarantees that our caller has allocated the parameter save area |
4333 | // on its stack frame. In the ELFv1 ABI, this is always the case; |
4334 | // in the ELFv2 ABI, it is true if this is a vararg function or if |
4335 | // any parameter is located in a stack slot. |
4336 | |
4337 | bool HasParameterArea = !isELFv2ABI || isVarArg; |
4338 | unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; |
4339 | unsigned NumBytes = LinkageSize; |
4340 | unsigned AvailableFPRs = Num_FPR_Regs; |
4341 | unsigned AvailableVRs = Num_VR_Regs; |
4342 | for (unsigned i = 0, e = Ins.size(); i != e; ++i) { |
4343 | if (Ins[i].Flags.isNest()) |
4344 | continue; |
4345 | |
4346 | if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, |
4347 | PtrByteSize, LinkageSize, ParamAreaSize, |
4348 | NumBytes, AvailableFPRs, AvailableVRs)) |
4349 | HasParameterArea = true; |
4350 | } |
4351 | |
4352 | // Add DAG nodes to load the arguments or copy them out of registers. On |
4353 | // entry to a function on PPC, the arguments start after the linkage area, |
4354 | // although the first ones are often in registers. |
4355 | |
4356 | unsigned ArgOffset = LinkageSize; |
4357 | unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; |
4358 | SmallVector<SDValue, 8> MemOps; |
4359 | Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); |
4360 | unsigned CurArgIdx = 0; |
4361 | for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { |
4362 | SDValue ArgVal; |
4363 | bool needsLoad = false; |
4364 | EVT ObjectVT = Ins[ArgNo].VT; |
4365 | EVT OrigVT = Ins[ArgNo].ArgVT; |
4366 | unsigned ObjSize = ObjectVT.getStoreSize(); |
4367 | unsigned ArgSize = ObjSize; |
4368 | ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; |
4369 | if (Ins[ArgNo].isOrigArg()) { |
4370 | std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); |
4371 | CurArgIdx = Ins[ArgNo].getOrigArgIndex(); |
4372 | } |
4373 | // We re-align the argument offset for each argument, except when using the |
4374 | // fast calling convention, when we need to make sure we do that only when |
4375 | // we'll actually use a stack slot. |
4376 | unsigned CurArgOffset; |
4377 | Align Alignment; |
4378 | auto ComputeArgOffset = [&]() { |
4379 | /* Respect alignment of argument on the stack. */ |
4380 | Alignment = |
4381 | CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); |
4382 | ArgOffset = alignTo(ArgOffset, Alignment); |
4383 | CurArgOffset = ArgOffset; |
4384 | }; |
4385 | |
4386 | if (CallConv != CallingConv::Fast) { |
4387 | ComputeArgOffset(); |
4388 | |
4389 | /* Compute GPR index associated with argument offset. */ |
4390 | GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; |
4391 | GPR_idx = std::min(GPR_idx, Num_GPR_Regs); |
4392 | } |
4393 | |
4394 | // FIXME the codegen can be much improved in some cases. |
4395 | // We do not have to keep everything in memory. |
4396 | if (Flags.isByVal()) { |
4397 | assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit")(static_cast <bool> (Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit" ) ? void (0) : __assert_fail ("Ins[ArgNo].isOrigArg() && \"Byval arguments cannot be implicit\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 4397, __extension__ __PRETTY_FUNCTION__)); |
4398 | |
4399 | if (CallConv == CallingConv::Fast) |
4400 | ComputeArgOffset(); |
4401 | |
4402 | // ObjSize is the true size, ArgSize rounded up to multiple of registers. |
4403 | ObjSize = Flags.getByValSize(); |
4404 | ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; |
4405 | // Empty aggregate parameters do not take up registers. Examples: |
4406 | // struct { } a; |
4407 | // union { } b; |
4408 | // int c[0]; |
4409 | // etc. However, we have to provide a place-holder in InVals, so |
4410 | // pretend we have an 8-byte item at the current address for that |
4411 | // purpose. |
4412 | if (!ObjSize) { |
4413 | int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); |
4414 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); |
4415 | InVals.push_back(FIN); |
4416 | continue; |
4417 | } |
4418 | |
4419 | // Create a stack object covering all stack doublewords occupied |
4420 | // by the argument. If the argument is (fully or partially) on |
4421 | // the stack, or if the argument is fully in registers but the |
4422 | // caller has allocated the parameter save anyway, we can refer |
4423 | // directly to the caller's stack frame. Otherwise, create a |
4424 | // local copy in our own frame. |
4425 | int FI; |
4426 | if (HasParameterArea || |
4427 | ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) |
4428 | FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); |
4429 | else |
4430 | FI = MFI.CreateStackObject(ArgSize, Alignment, false); |
4431 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); |
4432 | |
4433 | // Handle aggregates smaller than 8 bytes. |
4434 | if (ObjSize < PtrByteSize) { |
4435 | // The value of the object is its address, which differs from the |
4436 | // address of the enclosing doubleword on big-endian systems. |
4437 | SDValue Arg = FIN; |
4438 | if (!isLittleEndian) { |
4439 | SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); |
4440 | Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); |
4441 | } |
4442 | InVals.push_back(Arg); |
4443 | |
4444 | if (GPR_idx != Num_GPR_Regs) { |
4445 | Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); |
4446 | FuncInfo->addLiveInAttr(VReg, Flags); |
4447 | SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); |
4448 | EVT ObjType = EVT::getIntegerVT(*DAG.getContext(), ObjSize * 8); |
4449 | SDValue Store = |
4450 | DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, |
4451 | MachinePointerInfo(&*FuncArg), ObjType); |
4452 | MemOps.push_back(Store); |
4453 | } |
4454 | // Whether we copied from a register or not, advance the offset |
4455 | // into the parameter save area by a full doubleword. |
4456 | ArgOffset += PtrByteSize; |
4457 | continue; |
4458 | } |
4459 | |
4460 | // The value of the object is its address, which is the address of |
4461 | // its first stack doubleword. |
4462 | InVals.push_back(FIN); |
4463 | |
4464 | // Store whatever pieces of the object are in registers to memory. |
4465 | for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { |
4466 | if (GPR_idx == Num_GPR_Regs) |
4467 | break; |
4468 | |
4469 | Register VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); |
4470 | FuncInfo->addLiveInAttr(VReg, Flags); |
4471 | SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); |
4472 | SDValue Addr = FIN; |
4473 | if (j) { |
4474 | SDValue Off = DAG.getConstant(j, dl, PtrVT); |
4475 | Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); |
4476 | } |
4477 | unsigned StoreSizeInBits = std::min(PtrByteSize, (ObjSize - j)) * 8; |
4478 | EVT ObjType = EVT::getIntegerVT(*DAG.getContext(), StoreSizeInBits); |
4479 | SDValue Store = |
4480 | DAG.getTruncStore(Val.getValue(1), dl, Val, Addr, |
4481 | MachinePointerInfo(&*FuncArg, j), ObjType); |
4482 | MemOps.push_back(Store); |
4483 | ++GPR_idx; |
4484 | } |
4485 | ArgOffset += ArgSize; |
4486 | continue; |
4487 | } |
4488 | |
4489 | switch (ObjectVT.getSimpleVT().SimpleTy) { |
4490 | default: llvm_unreachable("Unhandled argument type!")::llvm::llvm_unreachable_internal("Unhandled argument type!", "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 4490); |
4491 | case MVT::i1: |
4492 | case MVT::i32: |
4493 | case MVT::i64: |
4494 | if (Flags.isNest()) { |
4495 | // The 'nest' parameter, if any, is passed in R11. |
4496 | Register VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); |
4497 | ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); |
4498 | |
4499 | if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) |
4500 | ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); |
4501 | |
4502 | break; |
4503 | } |
4504 | |
4505 | // These can be scalar arguments or elements of an integer array type |
4506 | // passed directly. Clang may use those instead of "byval" aggregate |
4507 | // types to avoid forcing arguments to memory unnecessarily. |
4508 | if (GPR_idx != Num_GPR_Regs) { |
4509 | Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); |
4510 | FuncInfo->addLiveInAttr(VReg, Flags); |
4511 | ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); |
4512 | |
4513 | if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) |
4514 | // PPC64 passes i8, i16, and i32 values in i64 registers. Promote |
4515 | // value to MVT::i64 and then truncate to the correct register size. |
4516 | ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); |
4517 | } else { |
4518 | if (CallConv == CallingConv::Fast) |
4519 | ComputeArgOffset(); |
4520 | |
4521 | needsLoad = true; |
4522 | ArgSize = PtrByteSize; |
4523 | } |
4524 | if (CallConv != CallingConv::Fast || needsLoad) |
4525 | ArgOffset += 8; |
4526 | break; |
4527 | |
4528 | case MVT::f32: |
4529 | case MVT::f64: |
4530 | // These can be scalar arguments or elements of a float array type |
4531 | // passed directly. The latter are used to implement ELFv2 homogenous |
4532 | // float aggregates. |
4533 | if (FPR_idx != Num_FPR_Regs) { |
4534 | unsigned VReg; |
4535 | |
4536 | if (ObjectVT == MVT::f32) |
4537 | VReg = MF.addLiveIn(FPR[FPR_idx], |
4538 | Subtarget.hasP8Vector() |
4539 | ? &PPC::VSSRCRegClass |
4540 | : &PPC::F4RCRegClass); |
4541 | else |
4542 | VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() |
4543 | ? &PPC::VSFRCRegClass |
4544 | : &PPC::F8RCRegClass); |
4545 | |
4546 | ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); |
4547 | ++FPR_idx; |
4548 | } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { |
4549 | // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 |
4550 | // once we support fp <-> gpr moves. |
4551 | |
4552 | // This can only ever happen in the presence of f32 array types, |
4553 | // since otherwise we never run out of FPRs before running out |
4554 | // of GPRs. |
4555 | Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); |
4556 | FuncInfo->addLiveInAttr(VReg, Flags); |
4557 | ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); |
4558 | |
4559 | if (ObjectVT == MVT::f32) { |
4560 | if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) |
4561 | ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, |
4562 | DAG.getConstant(32, dl, MVT::i32)); |
4563 | ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); |
4564 | } |
4565 | |
4566 | ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); |
4567 | } else { |
4568 | if (CallConv == CallingConv::Fast) |
4569 | ComputeArgOffset(); |
4570 | |
4571 | needsLoad = true; |
4572 | } |
4573 | |
4574 | // When passing an array of floats, the array occupies consecutive |
4575 | // space in the argument area; only round up to the next doubleword |
4576 | // at the end of the array. Otherwise, each float takes 8 bytes. |
4577 | if (CallConv != CallingConv::Fast || needsLoad) { |
4578 | ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; |
4579 | ArgOffset += ArgSize; |
4580 | if (Flags.isInConsecutiveRegsLast()) |
4581 | ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; |
4582 | } |
4583 | break; |
4584 | case MVT::v4f32: |
4585 | case MVT::v4i32: |
4586 | case MVT::v8i16: |
4587 | case MVT::v16i8: |
4588 | case MVT::v2f64: |
4589 | case MVT::v2i64: |
4590 | case MVT::v1i128: |
4591 | case MVT::f128: |
4592 | // These can be scalar arguments or elements of a vector array type |
4593 | // passed directly. The latter are used to implement ELFv2 homogenous |
4594 | // vector aggregates. |
4595 | if (VR_idx != Num_VR_Regs) { |
4596 | Register VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); |
4597 | ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); |
4598 | ++VR_idx; |
4599 | } else { |
4600 | if (CallConv == CallingConv::Fast) |
4601 | ComputeArgOffset(); |
4602 | needsLoad = true; |
4603 | } |
4604 | if (CallConv != CallingConv::Fast || needsLoad) |
4605 | ArgOffset += 16; |
4606 | break; |
4607 | } |
4608 | |
4609 | // We need to load the argument to a virtual register if we determined |
4610 | // above that we ran out of physical registers of the appropriate type. |
4611 | if (needsLoad) { |
4612 | if (ObjSize < ArgSize && !isLittleEndian) |
4613 | CurArgOffset += ArgSize - ObjSize; |
4614 | int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); |
4615 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); |
4616 | ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); |
4617 | } |
4618 | |
4619 | InVals.push_back(ArgVal); |
4620 | } |
4621 | |
4622 | // Area that is at least reserved in the caller of this function. |
4623 | unsigned MinReservedArea; |
4624 | if (HasParameterArea) |
4625 | MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); |
4626 | else |
4627 | MinReservedArea = LinkageSize; |
4628 | |
4629 | // Set the size that is at least reserved in caller of this function. Tail |
4630 | // call optimized functions' reserved stack space needs to be aligned so that |
4631 | // taking the difference between two stack areas will result in an aligned |
4632 | // stack. |
4633 | MinReservedArea = |
4634 | EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); |
4635 | FuncInfo->setMinReservedArea(MinReservedArea); |
4636 | |
4637 | // If the function takes variable number of arguments, make a frame index for |
4638 | // the start of the first vararg value... for expansion of llvm.va_start. |
4639 | // On ELFv2ABI spec, it writes: |
4640 | // C programs that are intended to be *portable* across different compilers |
4641 | // and architectures must use the header file <stdarg.h> to deal with variable |
4642 | // argument lists. |
4643 | if (isVarArg && MFI.hasVAStart()) { |
4644 | int Depth = ArgOffset; |
4645 | |
4646 | FuncInfo->setVarArgsFrameIndex( |
4647 | MFI.CreateFixedObject(PtrByteSize, Depth, true)); |
4648 | SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); |
4649 | |
4650 | // If this function is vararg, store any remaining integer argument regs |
4651 | // to their spots on the stack so that they may be loaded by dereferencing |
4652 | // the result of va_next. |
4653 | for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; |
4654 | GPR_idx < Num_GPR_Regs; ++GPR_idx) { |
4655 | Register VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); |
4656 | SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); |
4657 | SDValue Store = |
4658 | DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); |
4659 | MemOps.push_back(Store); |
4660 | // Increment the address by four for the next argument to store |
4661 | SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); |
4662 | FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); |
4663 | } |
4664 | } |
4665 | |
4666 | if (!MemOps.empty()) |
4667 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); |
4668 | |
4669 | return Chain; |
4670 | } |
4671 | |
4672 | /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be |
4673 | /// adjusted to accommodate the arguments for the tailcall. |
4674 | static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, |
4675 | unsigned ParamSize) { |
4676 | |
4677 | if (!isTailCall) return 0; |
4678 | |
4679 | PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); |
4680 | unsigned CallerMinReservedArea = FI->getMinReservedArea(); |
4681 | int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; |
4682 | // Remember only if the new adjustment is bigger. |
4683 | if (SPDiff < FI->getTailCallSPDelta()) |
4684 | FI->setTailCallSPDelta(SPDiff); |
4685 | |
4686 | return SPDiff; |
4687 | } |
4688 | |
4689 | static bool isFunctionGlobalAddress(const GlobalValue *CalleeGV); |
4690 | |
4691 | static bool callsShareTOCBase(const Function *Caller, |
4692 | const GlobalValue *CalleeGV, |
4693 | const TargetMachine &TM) { |
4694 | // It does not make sense to call callsShareTOCBase() with a caller that |
4695 | // is PC Relative since PC Relative callers do not have a TOC. |
4696 | #ifndef NDEBUG |
4697 | const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller); |
4698 | assert(!STICaller->isUsingPCRelativeCalls() &&(static_cast <bool> (!STICaller->isUsingPCRelativeCalls () && "PC Relative callers do not have a TOC and cannot share a TOC Base" ) ? void (0) : __assert_fail ("!STICaller->isUsingPCRelativeCalls() && \"PC Relative callers do not have a TOC and cannot share a TOC Base\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 4699, __extension__ __PRETTY_FUNCTION__)) |
4699 | "PC Relative callers do not have a TOC and cannot share a TOC Base")(static_cast <bool> (!STICaller->isUsingPCRelativeCalls () && "PC Relative callers do not have a TOC and cannot share a TOC Base" ) ? void (0) : __assert_fail ("!STICaller->isUsingPCRelativeCalls() && \"PC Relative callers do not have a TOC and cannot share a TOC Base\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 4699, __extension__ __PRETTY_FUNCTION__)); |
4700 | #endif |
4701 | |
4702 | // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols |
4703 | // don't have enough information to determine if the caller and callee share |
4704 | // the same TOC base, so we have to pessimistically assume they don't for |
4705 | // correctness. |
4706 | if (!CalleeGV) |
4707 | return false; |
4708 | |
4709 | // If the callee is preemptable, then the static linker will use a plt-stub |
4710 | // which saves the toc to the stack, and needs a nop after the call |
4711 | // instruction to convert to a toc-restore. |
4712 | if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), CalleeGV)) |
4713 | return false; |
4714 | |
4715 | // Functions with PC Relative enabled may clobber the TOC in the same DSO. |
4716 | // We may need a TOC restore in the situation where the caller requires a |
4717 | // valid TOC but the callee is PC Relative and does not. |
4718 | const Function *F = dyn_cast<Function>(CalleeGV); |
4719 | const GlobalAlias *Alias = dyn_cast<GlobalAlias>(CalleeGV); |
4720 | |
4721 | // If we have an Alias we can try to get the function from there. |
4722 | if (Alias) { |
4723 | const GlobalObject *GlobalObj = Alias->getAliaseeObject(); |
4724 | F = dyn_cast<Function>(GlobalObj); |
4725 | } |
4726 | |
4727 | // If we still have no valid function pointer we do not have enough |
4728 | // information to determine if the callee uses PC Relative calls so we must |
4729 | // assume that it does. |
4730 | if (!F) |
4731 | return false; |
4732 | |
4733 | // If the callee uses PC Relative we cannot guarantee that the callee won't |
4734 | // clobber the TOC of the caller and so we must assume that the two |
4735 | // functions do not share a TOC base. |
4736 | const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F); |
4737 | if (STICallee->isUsingPCRelativeCalls()) |
4738 | return false; |
4739 | |
4740 | // If the GV is not a strong definition then we need to assume it can be |
4741 | // replaced by another function at link time. The function that replaces |
4742 | // it may not share the same TOC as the caller since the callee may be |
4743 | // replaced by a PC Relative version of the same function. |
4744 | if (!CalleeGV->isStrongDefinitionForLinker()) |
4745 | return false; |
4746 | |
4747 | // The medium and large code models are expected to provide a sufficiently |
4748 | // large TOC to provide all data addressing needs of a module with a |
4749 | // single TOC. |
4750 | if (CodeModel::Medium == TM.getCodeModel() || |
4751 | CodeModel::Large == TM.getCodeModel()) |
4752 | return true; |
4753 | |
4754 | // Any explicitly-specified sections and section prefixes must also match. |
4755 | // Also, if we're using -ffunction-sections, then each function is always in |
4756 | // a different section (the same is true for COMDAT functions). |
4757 | if (TM.getFunctionSections() || CalleeGV->hasComdat() || |
4758 | Caller->hasComdat() || CalleeGV->getSection() != Caller->getSection()) |
4759 | return false; |
4760 | if (const auto *F = dyn_cast<Function>(CalleeGV)) { |
4761 | if (F->getSectionPrefix() != Caller->getSectionPrefix()) |
4762 | return false; |
4763 | } |
4764 | |
4765 | return true; |
4766 | } |
4767 | |
4768 | static bool |
4769 | needStackSlotPassParameters(const PPCSubtarget &Subtarget, |
4770 | const SmallVectorImpl<ISD::OutputArg> &Outs) { |
4771 | assert(Subtarget.is64BitELFABI())(static_cast <bool> (Subtarget.is64BitELFABI()) ? void ( 0) : __assert_fail ("Subtarget.is64BitELFABI()", "llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 4771, __extension__ __PRETTY_FUNCTION__)); |
4772 | |
4773 | const unsigned PtrByteSize = 8; |
4774 | const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); |
4775 | |
4776 | static const MCPhysReg GPR[] = { |
4777 | PPC::X3, PPC::X4, PPC::X5, PPC::X6, |
4778 | PPC::X7, PPC::X8, PPC::X9, PPC::X10, |
4779 | }; |
4780 | static const MCPhysReg VR[] = { |
4781 | PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, |
4782 | PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 |
4783 | }; |
4784 | |
4785 | const unsigned NumGPRs = std::size(GPR); |
4786 | const unsigned NumFPRs = 13; |
4787 | const unsigned NumVRs = std::size(VR); |
4788 | const unsigned ParamAreaSize = NumGPRs * PtrByteSize; |
4789 | |
4790 | unsigned NumBytes = LinkageSize; |
4791 | unsigned AvailableFPRs = NumFPRs; |
4792 | unsigned AvailableVRs = NumVRs; |
4793 | |
4794 | for (const ISD::OutputArg& Param : Outs) { |
4795 | if (Param.Flags.isNest()) continue; |
4796 | |
4797 | if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, PtrByteSize, |
4798 | LinkageSize, ParamAreaSize, NumBytes, |
4799 | AvailableFPRs, AvailableVRs)) |
4800 | return true; |
4801 | } |
4802 | return false; |
4803 | } |
4804 | |
4805 | static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) { |
4806 | if (CB.arg_size() != CallerFn->arg_size()) |
4807 | return false; |
4808 | |
4809 | auto CalleeArgIter = CB.arg_begin(); |
4810 | auto CalleeArgEnd = CB.arg_end(); |
4811 | Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); |
4812 | |
4813 | for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { |
4814 | const Value* CalleeArg = *CalleeArgIter; |
4815 | const Value* CallerArg = &(*CallerArgIter); |
4816 | if (CalleeArg == CallerArg) |
4817 | continue; |
4818 | |
4819 | // e.g. @caller([4 x i64] %a, [4 x i64] %b) { |
4820 | // tail call @callee([4 x i64] undef, [4 x i64] %b) |
4821 | // } |
4822 | // 1st argument of callee is undef and has the same type as caller. |
4823 | if (CalleeArg->getType() == CallerArg->getType() && |
4824 | isa<UndefValue>(CalleeArg)) |
4825 | continue; |
4826 | |
4827 | return false; |
4828 | } |
4829 | |
4830 | return true; |
4831 | } |
4832 | |
4833 | // Returns true if TCO is possible between the callers and callees |
4834 | // calling conventions. |
4835 | static bool |
4836 | areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, |
4837 | CallingConv::ID CalleeCC) { |
4838 | // Tail calls are possible with fastcc and ccc. |
4839 | auto isTailCallableCC = [] (CallingConv::ID CC){ |
4840 | return CC == CallingConv::C || CC == CallingConv::Fast; |
4841 | }; |
4842 | if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC)) |
4843 | return false; |
4844 | |
4845 | // We can safely tail call both fastcc and ccc callees from a c calling |
4846 | // convention caller. If the caller is fastcc, we may have less stack space |
4847 | // than a non-fastcc caller with the same signature so disable tail-calls in |
4848 | // that case. |
4849 | return CallerCC == CallingConv::C || CallerCC == CalleeCC; |
4850 | } |
4851 | |
4852 | bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( |
4853 | const GlobalValue *CalleeGV, CallingConv::ID CalleeCC, |
4854 | CallingConv::ID CallerCC, const CallBase *CB, bool isVarArg, |
4855 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
4856 | const SmallVectorImpl<ISD::InputArg> &Ins, const Function *CallerFunc, |
4857 | bool isCalleeExternalSymbol) const { |
4858 | bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; |
4859 | |
4860 | if (DisableSCO && !TailCallOpt) return false; |
4861 | |
4862 | // Variadic argument functions are not supported. |
4863 | if (isVarArg) return false; |
4864 | |
4865 | // Check that the calling conventions are compatible for tco. |
4866 | if (!areCallingConvEligibleForTCO_64SVR4(CallerCC, CalleeCC)) |
4867 | return false; |
4868 | |
4869 | // Caller contains any byval parameter is not supported. |
4870 | if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) |
4871 | return false; |
4872 | |
4873 | // Callee contains any byval parameter is not supported, too. |
4874 | // Note: This is a quick work around, because in some cases, e.g. |
4875 | // caller's stack size > callee's stack size, we are still able to apply |
4876 | // sibling call optimization. For example, gcc is able to do SCO for caller1 |
4877 | // in the following example, but not for caller2. |
4878 | // struct test { |
4879 | // long int a; |
4880 | // char ary[56]; |
4881 | // } gTest; |
4882 | // __attribute__((noinline)) int callee(struct test v, struct test *b) { |
4883 | // b->a = v.a; |
4884 | // return 0; |
4885 | // } |
4886 | // void caller1(struct test a, struct test c, struct test *b) { |
4887 | // callee(gTest, b); } |
4888 | // void caller2(struct test *b) { callee(gTest, b); } |
4889 | if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) |
4890 | return false; |
4891 | |
4892 | // If callee and caller use different calling conventions, we cannot pass |
4893 | // parameters on stack since offsets for the parameter area may be different. |
4894 | if (CallerCC != CalleeCC && needStackSlotPassParameters(Subtarget, Outs)) |
4895 | return false; |
4896 | |
4897 | // All variants of 64-bit ELF ABIs without PC-Relative addressing require that |
4898 | // the caller and callee share the same TOC for TCO/SCO. If the caller and |
4899 | // callee potentially have different TOC bases then we cannot tail call since |
4900 | // we need to restore the TOC pointer after the call. |
4901 | // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 |
4902 | // We cannot guarantee this for indirect calls or calls to external functions. |
4903 | // When PC-Relative addressing is used, the concept of the TOC is no longer |
4904 | // applicable so this check is not required. |
4905 | // Check first for indirect calls. |
4906 | if (!Subtarget.isUsingPCRelativeCalls() && |
4907 | !isFunctionGlobalAddress(CalleeGV) && !isCalleeExternalSymbol) |
4908 | return false; |
4909 | |
4910 | // Check if we share the TOC base. |
4911 | if (!Subtarget.isUsingPCRelativeCalls() && |
4912 | !callsShareTOCBase(CallerFunc, CalleeGV, getTargetMachine())) |
4913 | return false; |
4914 | |
4915 | // TCO allows altering callee ABI, so we don't have to check further. |
4916 | if (CalleeCC == CallingConv::Fast && TailCallOpt) |
4917 | return true; |
4918 | |
4919 | if (DisableSCO) return false; |
4920 | |
4921 | // If callee use the same argument list that caller is using, then we can |
4922 | // apply SCO on this case. If it is not, then we need to check if callee needs |
4923 | // stack for passing arguments. |
4924 | // PC Relative tail calls may not have a CallBase. |
4925 | // If there is no CallBase we cannot verify if we have the same argument |
4926 | // list so assume that we don't have the same argument list. |
4927 | if (CB && !hasSameArgumentList(CallerFunc, *CB) && |
4928 | needStackSlotPassParameters(Subtarget, Outs)) |
4929 | return false; |
4930 | else if (!CB && needStackSlotPassParameters(Subtarget, Outs)) |
4931 | return false; |
4932 | |
4933 | return true; |
4934 | } |
4935 | |
4936 | /// IsEligibleForTailCallOptimization - Check whether the call is eligible |
4937 | /// for tail call optimization. Targets which want to do tail call |
4938 | /// optimization should implement this function. |
4939 | bool PPCTargetLowering::IsEligibleForTailCallOptimization( |
4940 | const GlobalValue *CalleeGV, CallingConv::ID CalleeCC, |
4941 | CallingConv::ID CallerCC, bool isVarArg, |
4942 | const SmallVectorImpl<ISD::InputArg> &Ins) const { |
4943 | if (!getTargetMachine().Options.GuaranteedTailCallOpt) |
4944 | return false; |
4945 | |
4946 | // Variable argument functions are not supported. |
4947 | if (isVarArg) |
4948 | return false; |
4949 | |
4950 | if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { |
4951 | // Functions containing by val parameters are not supported. |
4952 | if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) |
4953 | return false; |
4954 | |
4955 | // Non-PIC/GOT tail calls are supported. |
4956 | if (getTargetMachine().getRelocationModel() != Reloc::PIC_) |
4957 | return true; |
4958 | |
4959 | // At the moment we can only do local tail calls (in same module, hidden |
4960 | // or protected) if we are generating PIC. |
4961 | if (CalleeGV) |
4962 | return CalleeGV->hasHiddenVisibility() || |
4963 | CalleeGV->hasProtectedVisibility(); |
4964 | } |
4965 | |
4966 | return false; |
4967 | } |
4968 | |
4969 | /// isCallCompatibleAddress - Return the immediate to use if the specified |
4970 | /// 32-bit value is representable in the immediate field of a BxA instruction. |
4971 | static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { |
4972 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); |
4973 | if (!C) return nullptr; |
4974 | |
4975 | int Addr = C->getZExtValue(); |
4976 | if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. |
4977 | SignExtend32<26>(Addr) != Addr) |
4978 | return nullptr; // Top 6 bits have to be sext of immediate. |
4979 | |
4980 | return DAG |
4981 | .getConstant( |
4982 | (int)C->getZExtValue() >> 2, SDLoc(Op), |
4983 | DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) |
4984 | .getNode(); |
4985 | } |
4986 | |
4987 | namespace { |
4988 | |
4989 | struct TailCallArgumentInfo { |
4990 | SDValue Arg; |
4991 | SDValue FrameIdxOp; |
4992 | int FrameIdx = 0; |
4993 | |
4994 | TailCallArgumentInfo() = default; |
4995 | }; |
4996 | |
4997 | } // end anonymous namespace |
4998 | |
4999 | /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. |
5000 | static void StoreTailCallArgumentsToStackSlot( |
5001 | SelectionDAG &DAG, SDValue Chain, |
5002 | const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, |
5003 | SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { |
5004 | for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { |
5005 | SDValue Arg = TailCallArgs[i].Arg; |
5006 | SDValue FIN = TailCallArgs[i].FrameIdxOp; |
5007 | int FI = TailCallArgs[i].FrameIdx; |
5008 | // Store relative to framepointer. |
5009 | MemOpChains.push_back(DAG.getStore( |
5010 | Chain, dl, Arg, FIN, |
5011 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); |
5012 | } |
5013 | } |
5014 | |
5015 | /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to |
5016 | /// the appropriate stack slot for the tail call optimized function call. |
5017 | static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, |
5018 | SDValue OldRetAddr, SDValue OldFP, |
5019 | int SPDiff, const SDLoc &dl) { |
5020 | if (SPDiff) { |
5021 | // Calculate the new stack slot for the return address. |
5022 | MachineFunction &MF = DAG.getMachineFunction(); |
5023 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
5024 | const PPCFrameLowering *FL = Subtarget.getFrameLowering(); |
5025 | bool isPPC64 = Subtarget.isPPC64(); |
5026 | int SlotSize = isPPC64 ? 8 : 4; |
5027 | int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); |
5028 | int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, |
5029 | NewRetAddrLoc, true); |
5030 | EVT VT = isPPC64 ? MVT::i64 : MVT::i32; |
5031 | SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); |
5032 | Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, |
5033 | MachinePointerInfo::getFixedStack(MF, NewRetAddr)); |
5034 | } |
5035 | return Chain; |
5036 | } |
5037 | |
5038 | /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate |
5039 | /// the position of the argument. |
5040 | static void |
5041 | CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, |
5042 | SDValue Arg, int SPDiff, unsigned ArgOffset, |
5043 | SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { |
5044 | int Offset = ArgOffset + SPDiff; |
5045 | uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; |
5046 | int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); |
5047 | EVT VT = isPPC64 ? MVT::i64 : MVT::i32; |
5048 | SDValue FIN = DAG.getFrameIndex(FI, VT); |
5049 | TailCallArgumentInfo Info; |
5050 | Info.Arg = Arg; |
5051 | Info.FrameIdxOp = FIN; |
5052 | Info.FrameIdx = FI; |
5053 | TailCallArguments.push_back(Info); |
5054 | } |
5055 | |
5056 | /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address |
5057 | /// stack slot. Returns the chain as result and the loaded frame pointers in |
5058 | /// LROpOut/FPOpout. Used when tail calling. |
5059 | SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( |
5060 | SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, |
5061 | SDValue &FPOpOut, const SDLoc &dl) const { |
5062 | if (SPDiff) { |
5063 | // Load the LR and FP stack slot for later adjusting. |
5064 | EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; |
5065 | LROpOut = getReturnAddrFrameIndex(DAG); |
5066 | LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); |
5067 | Chain = SDValue(LROpOut.getNode(), 1); |
5068 | } |
5069 | return Chain; |
5070 | } |
5071 | |
5072 | /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified |
5073 | /// by "Src" to address "Dst" of size "Size". Alignment information is |
5074 | /// specified by the specific parameter attribute. The copy will be passed as |
5075 | /// a byval function parameter. |
5076 | /// Sometimes what we are copying is the end of a larger object, the part that |
5077 | /// does not fit in registers. |
5078 | static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, |
5079 | SDValue Chain, ISD::ArgFlagsTy Flags, |
5080 | SelectionDAG &DAG, const SDLoc &dl) { |
5081 | SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); |
5082 | return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, |
5083 | Flags.getNonZeroByValAlign(), false, false, false, |
5084 | MachinePointerInfo(), MachinePointerInfo()); |
5085 | } |
5086 | |
5087 | /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of |
5088 | /// tail calls. |
5089 | static void LowerMemOpCallTo( |
5090 | SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, |
5091 | SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, |
5092 | bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, |
5093 | SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { |
5094 | EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); |
5095 | if (!isTailCall) { |
5096 | if (isVector) { |
5097 | SDValue StackPtr; |
5098 | if (isPPC64) |
5099 | StackPtr = DAG.getRegister(PPC::X1, MVT::i64); |
5100 | else |
5101 | StackPtr = DAG.getRegister(PPC::R1, MVT::i32); |
5102 | PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, |
5103 | DAG.getConstant(ArgOffset, dl, PtrVT)); |
5104 | } |
5105 | MemOpChains.push_back( |
5106 | DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); |
5107 | // Calculate and remember argument location. |
5108 | } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, |
5109 | TailCallArguments); |
5110 | } |
5111 | |
5112 | static void |
5113 | PrepareTailCall(SelectionDAG &DAG, SDValue &InGlue, SDValue &Chain, |
5114 | const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, |
5115 | SDValue FPOp, |
5116 | SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { |
5117 | // Emit a sequence of copyto/copyfrom virtual registers for arguments that |
5118 | // might overwrite each other in case of tail call optimization. |
5119 | SmallVector<SDValue, 8> MemOpChains2; |
5120 | // Do not flag preceding copytoreg stuff together with the following stuff. |
5121 | InGlue = SDValue(); |
5122 | StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, |
5123 | MemOpChains2, dl); |
5124 | if (!MemOpChains2.empty()) |
5125 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); |
5126 | |
5127 | // Store the return address to the appropriate stack slot. |
5128 | Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); |
5129 | |
5130 | // Emit callseq_end just before tailcall node. |
5131 | Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, dl); |
5132 | InGlue = Chain.getValue(1); |
5133 | } |
5134 | |
5135 | // Is this global address that of a function that can be called by name? (as |
5136 | // opposed to something that must hold a descriptor for an indirect call). |
5137 | static bool isFunctionGlobalAddress(const GlobalValue *GV) { |
5138 | if (GV) { |
5139 | if (GV->isThreadLocal()) |
5140 | return false; |
5141 | |
5142 | return GV->getValueType()->isFunctionTy(); |
5143 | } |
5144 | |
5145 | return false; |
5146 | } |
5147 | |
5148 | SDValue PPCTargetLowering::LowerCallResult( |
5149 | SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg, |
5150 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
5151 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
5152 | SmallVector<CCValAssign, 16> RVLocs; |
5153 | CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
5154 | *DAG.getContext()); |
5155 | |
5156 | CCRetInfo.AnalyzeCallResult( |
5157 | Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) |
5158 | ? RetCC_PPC_Cold |
5159 | : RetCC_PPC); |
5160 | |
5161 | // Copy all of the result registers out of their specified physreg. |
5162 | for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { |
5163 | CCValAssign &VA = RVLocs[i]; |
5164 | assert(VA.isRegLoc() && "Can only return in registers!")(static_cast <bool> (VA.isRegLoc() && "Can only return in registers!" ) ? void (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5164, __extension__ __PRETTY_FUNCTION__)); |
5165 | |
5166 | SDValue Val; |
5167 | |
5168 | if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) { |
5169 | SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, |
5170 | InGlue); |
5171 | Chain = Lo.getValue(1); |
5172 | InGlue = Lo.getValue(2); |
5173 | VA = RVLocs[++i]; // skip ahead to next loc |
5174 | SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, |
5175 | InGlue); |
5176 | Chain = Hi.getValue(1); |
5177 | InGlue = Hi.getValue(2); |
5178 | if (!Subtarget.isLittleEndian()) |
5179 | std::swap (Lo, Hi); |
5180 | Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi); |
5181 | } else { |
5182 | Val = DAG.getCopyFromReg(Chain, dl, |
5183 | VA.getLocReg(), VA.getLocVT(), InGlue); |
5184 | Chain = Val.getValue(1); |
5185 | InGlue = Val.getValue(2); |
5186 | } |
5187 | |
5188 | switch (VA.getLocInfo()) { |
5189 | default: llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5189); |
5190 | case CCValAssign::Full: break; |
5191 | case CCValAssign::AExt: |
5192 | Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); |
5193 | break; |
5194 | case CCValAssign::ZExt: |
5195 | Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, |
5196 | DAG.getValueType(VA.getValVT())); |
5197 | Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); |
5198 | break; |
5199 | case CCValAssign::SExt: |
5200 | Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, |
5201 | DAG.getValueType(VA.getValVT())); |
5202 | Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); |
5203 | break; |
5204 | } |
5205 | |
5206 | InVals.push_back(Val); |
5207 | } |
5208 | |
5209 | return Chain; |
5210 | } |
5211 | |
5212 | static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG, |
5213 | const PPCSubtarget &Subtarget, bool isPatchPoint) { |
5214 | auto *G = dyn_cast<GlobalAddressSDNode>(Callee); |
5215 | const GlobalValue *GV = G ? G->getGlobal() : nullptr; |
5216 | |
5217 | // PatchPoint calls are not indirect. |
5218 | if (isPatchPoint) |
5219 | return false; |
5220 | |
5221 | if (isFunctionGlobalAddress(GV) || isa<ExternalSymbolSDNode>(Callee)) |
5222 | return false; |
5223 | |
5224 | // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not |
5225 | // becuase the immediate function pointer points to a descriptor instead of |
5226 | // a function entry point. The ELFv2 ABI cannot use a BLA because the function |
5227 | // pointer immediate points to the global entry point, while the BLA would |
5228 | // need to jump to the local entry point (see rL211174). |
5229 | if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() && |
5230 | isBLACompatibleAddress(Callee, DAG)) |
5231 | return false; |
5232 | |
5233 | return true; |
5234 | } |
5235 | |
5236 | // AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls. |
5237 | static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) { |
5238 | return Subtarget.isAIXABI() || |
5239 | (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()); |
5240 | } |
5241 | |
5242 | static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags, |
5243 | const Function &Caller, const SDValue &Callee, |
5244 | const PPCSubtarget &Subtarget, |
5245 | const TargetMachine &TM, |
5246 | bool IsStrictFPCall = false) { |
5247 | if (CFlags.IsTailCall) |
5248 | return PPCISD::TC_RETURN; |
5249 | |
5250 | unsigned RetOpc = 0; |
5251 | // This is a call through a function pointer. |
5252 | if (CFlags.IsIndirect) { |
5253 | // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross |
5254 | // indirect calls. The save of the caller's TOC pointer to the stack will be |
5255 | // inserted into the DAG as part of call lowering. The restore of the TOC |
5256 | // pointer is modeled by using a pseudo instruction for the call opcode that |
5257 | // represents the 2 instruction sequence of an indirect branch and link, |
5258 | // immediately followed by a load of the TOC pointer from the the stack save |
5259 | // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC |
5260 | // as it is not saved or used. |
5261 | RetOpc = isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC |
5262 | : PPCISD::BCTRL; |
5263 | } else if (Subtarget.isUsingPCRelativeCalls()) { |
5264 | assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.")(static_cast <bool> (Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.") ? void (0) : __assert_fail ("Subtarget.is64BitELFABI() && \"PC Relative is only on ELF ABI.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5264, __extension__ __PRETTY_FUNCTION__)); |
5265 | RetOpc = PPCISD::CALL_NOTOC; |
5266 | } else if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI()) { |
5267 | // The ABIs that maintain a TOC pointer accross calls need to have a nop |
5268 | // immediately following the call instruction if the caller and callee may |
5269 | // have different TOC bases. At link time if the linker determines the calls |
5270 | // may not share a TOC base, the call is redirected to a trampoline inserted |
5271 | // by the linker. The trampoline will (among other things) save the callers |
5272 | // TOC pointer at an ABI designated offset in the linkage area and the |
5273 | // linker will rewrite the nop to be a load of the TOC pointer from the |
5274 | // linkage area into gpr2. |
5275 | auto *G = dyn_cast<GlobalAddressSDNode>(Callee); |
5276 | const GlobalValue *GV = G ? G->getGlobal() : nullptr; |
5277 | RetOpc = |
5278 | callsShareTOCBase(&Caller, GV, TM) ? PPCISD::CALL : PPCISD::CALL_NOP; |
5279 | } else |
5280 | RetOpc = PPCISD::CALL; |
5281 | if (IsStrictFPCall) { |
5282 | switch (RetOpc) { |
5283 | default: |
5284 | llvm_unreachable("Unknown call opcode")::llvm::llvm_unreachable_internal("Unknown call opcode", "llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5284); |
5285 | case PPCISD::BCTRL_LOAD_TOC: |
5286 | RetOpc = PPCISD::BCTRL_LOAD_TOC_RM; |
5287 | break; |
5288 | case PPCISD::BCTRL: |
5289 | RetOpc = PPCISD::BCTRL_RM; |
5290 | break; |
5291 | case PPCISD::CALL_NOTOC: |
5292 | RetOpc = PPCISD::CALL_NOTOC_RM; |
5293 | break; |
5294 | case PPCISD::CALL: |
5295 | RetOpc = PPCISD::CALL_RM; |
5296 | break; |
5297 | case PPCISD::CALL_NOP: |
5298 | RetOpc = PPCISD::CALL_NOP_RM; |
5299 | break; |
5300 | } |
5301 | } |
5302 | return RetOpc; |
5303 | } |
5304 | |
5305 | static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG, |
5306 | const SDLoc &dl, const PPCSubtarget &Subtarget) { |
5307 | if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI()) |
5308 | if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) |
5309 | return SDValue(Dest, 0); |
5310 | |
5311 | // Returns true if the callee is local, and false otherwise. |
5312 | auto isLocalCallee = [&]() { |
5313 | const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); |
5314 | const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); |
5315 | const GlobalValue *GV = G ? G->getGlobal() : nullptr; |
5316 | |
5317 | return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) && |
5318 | !isa_and_nonnull<GlobalIFunc>(GV); |
5319 | }; |
5320 | |
5321 | // The PLT is only used in 32-bit ELF PIC mode. Attempting to use the PLT in |
5322 | // a static relocation model causes some versions of GNU LD (2.17.50, at |
5323 | // least) to force BSS-PLT, instead of secure-PLT, even if all objects are |
5324 | // built with secure-PLT. |
5325 | bool UsePlt = |
5326 | Subtarget.is32BitELFABI() && !isLocalCallee() && |
5327 | Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_; |
5328 | |
5329 | const auto getAIXFuncEntryPointSymbolSDNode = [&](const GlobalValue *GV) { |
5330 | const TargetMachine &TM = Subtarget.getTargetMachine(); |
5331 | const TargetLoweringObjectFile *TLOF = TM.getObjFileLowering(); |
5332 | MCSymbolXCOFF *S = |
5333 | cast<MCSymbolXCOFF>(TLOF->getFunctionEntryPointSymbol(GV, TM)); |
5334 | |
5335 | MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); |
5336 | return DAG.getMCSymbol(S, PtrVT); |
5337 | }; |
5338 | |
5339 | auto *G = dyn_cast<GlobalAddressSDNode>(Callee); |
5340 | const GlobalValue *GV = G ? G->getGlobal() : nullptr; |
5341 | if (isFunctionGlobalAddress(GV)) { |
5342 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); |
5343 | |
5344 | if (Subtarget.isAIXABI()) { |
5345 | assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.")(static_cast <bool> (!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.") ? void (0) : __assert_fail ("!isa<GlobalIFunc>(GV) && \"IFunc is not supported on AIX.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5345, __extension__ __PRETTY_FUNCTION__)); |
5346 | return getAIXFuncEntryPointSymbolSDNode(GV); |
5347 | } |
5348 | return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0, |
5349 | UsePlt ? PPCII::MO_PLT : 0); |
5350 | } |
5351 | |
5352 | if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { |
5353 | const char *SymName = S->getSymbol(); |
5354 | if (Subtarget.isAIXABI()) { |
5355 | // If there exists a user-declared function whose name is the same as the |
5356 | // ExternalSymbol's, then we pick up the user-declared version. |
5357 | const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); |
5358 | if (const Function *F = |
5359 | dyn_cast_or_null<Function>(Mod->getNamedValue(SymName))) |
5360 | return getAIXFuncEntryPointSymbolSDNode(F); |
5361 | |
5362 | // On AIX, direct function calls reference the symbol for the function's |
5363 | // entry point, which is named by prepending a "." before the function's |
5364 | // C-linkage name. A Qualname is returned here because an external |
5365 | // function entry point is a csect with XTY_ER property. |
5366 | const auto getExternalFunctionEntryPointSymbol = [&](StringRef SymName) { |
5367 | auto &Context = DAG.getMachineFunction().getMMI().getContext(); |
5368 | MCSectionXCOFF *Sec = Context.getXCOFFSection( |
5369 | (Twine(".") + Twine(SymName)).str(), SectionKind::getMetadata(), |
5370 | XCOFF::CsectProperties(XCOFF::XMC_PR, XCOFF::XTY_ER)); |
5371 | return Sec->getQualNameSymbol(); |
5372 | }; |
5373 | |
5374 | SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data(); |
5375 | } |
5376 | return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(), |
5377 | UsePlt ? PPCII::MO_PLT : 0); |
5378 | } |
5379 | |
5380 | // No transformation needed. |
5381 | assert(Callee.getNode() && "What no callee?")(static_cast <bool> (Callee.getNode() && "What no callee?" ) ? void (0) : __assert_fail ("Callee.getNode() && \"What no callee?\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5381, __extension__ __PRETTY_FUNCTION__)); |
5382 | return Callee; |
5383 | } |
5384 | |
5385 | static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) { |
5386 | assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&(static_cast <bool> (CallSeqStart.getOpcode() == ISD::CALLSEQ_START && "Expected a CALLSEQ_STARTSDNode.") ? void (0) : __assert_fail ("CallSeqStart.getOpcode() == ISD::CALLSEQ_START && \"Expected a CALLSEQ_STARTSDNode.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5387, __extension__ __PRETTY_FUNCTION__)) |
5387 | "Expected a CALLSEQ_STARTSDNode.")(static_cast <bool> (CallSeqStart.getOpcode() == ISD::CALLSEQ_START && "Expected a CALLSEQ_STARTSDNode.") ? void (0) : __assert_fail ("CallSeqStart.getOpcode() == ISD::CALLSEQ_START && \"Expected a CALLSEQ_STARTSDNode.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5387, __extension__ __PRETTY_FUNCTION__)); |
5388 | |
5389 | // The last operand is the chain, except when the node has glue. If the node |
5390 | // has glue, then the last operand is the glue, and the chain is the second |
5391 | // last operand. |
5392 | SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1); |
5393 | if (LastValue.getValueType() != MVT::Glue) |
5394 | return LastValue; |
5395 | |
5396 | return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2); |
5397 | } |
5398 | |
5399 | // Creates the node that moves a functions address into the count register |
5400 | // to prepare for an indirect call instruction. |
5401 | static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee, |
5402 | SDValue &Glue, SDValue &Chain, |
5403 | const SDLoc &dl) { |
5404 | SDValue MTCTROps[] = {Chain, Callee, Glue}; |
5405 | EVT ReturnTypes[] = {MVT::Other, MVT::Glue}; |
5406 | Chain = DAG.getNode(PPCISD::MTCTR, dl, ArrayRef(ReturnTypes, 2), |
5407 | ArrayRef(MTCTROps, Glue.getNode() ? 3 : 2)); |
5408 | // The glue is the second value produced. |
5409 | Glue = Chain.getValue(1); |
5410 | } |
5411 | |
5412 | static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee, |
5413 | SDValue &Glue, SDValue &Chain, |
5414 | SDValue CallSeqStart, |
5415 | const CallBase *CB, const SDLoc &dl, |
5416 | bool hasNest, |
5417 | const PPCSubtarget &Subtarget) { |
5418 | // Function pointers in the 64-bit SVR4 ABI do not point to the function |
5419 | // entry point, but to the function descriptor (the function entry point |
5420 | // address is part of the function descriptor though). |
5421 | // The function descriptor is a three doubleword structure with the |
5422 | // following fields: function entry point, TOC base address and |
5423 | // environment pointer. |
5424 | // Thus for a call through a function pointer, the following actions need |
5425 | // to be performed: |
5426 | // 1. Save the TOC of the caller in the TOC save area of its stack |
5427 | // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). |
5428 | // 2. Load the address of the function entry point from the function |
5429 | // descriptor. |
5430 | // 3. Load the TOC of the callee from the function descriptor into r2. |
5431 | // 4. Load the environment pointer from the function descriptor into |
5432 | // r11. |
5433 | // 5. Branch to the function entry point address. |
5434 | // 6. On return of the callee, the TOC of the caller needs to be |
5435 | // restored (this is done in FinishCall()). |
5436 | // |
5437 | // The loads are scheduled at the beginning of the call sequence, and the |
5438 | // register copies are flagged together to ensure that no other |
5439 | // operations can be scheduled in between. E.g. without flagging the |
5440 | // copies together, a TOC access in the caller could be scheduled between |
5441 | // the assignment of the callee TOC and the branch to the callee, which leads |
5442 | // to incorrect code. |
5443 | |
5444 | // Start by loading the function address from the descriptor. |
5445 | SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart); |
5446 | auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() |
5447 | ? (MachineMemOperand::MODereferenceable | |
5448 | MachineMemOperand::MOInvariant) |
5449 | : MachineMemOperand::MONone; |
5450 | |
5451 | MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr); |
5452 | |
5453 | // Registers used in building the DAG. |
5454 | const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister(); |
5455 | const MCRegister TOCReg = Subtarget.getTOCPointerRegister(); |
5456 | |
5457 | // Offsets of descriptor members. |
5458 | const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset(); |
5459 | const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset(); |
5460 | |
5461 | const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; |
5462 | const Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4); |
5463 | |
5464 | // One load for the functions entry point address. |
5465 | SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI, |
5466 | Alignment, MMOFlags); |
5467 | |
5468 | // One for loading the TOC anchor for the module that contains the called |
5469 | // function. |
5470 | SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl); |
5471 | SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff); |
5472 | SDValue TOCPtr = |
5473 | DAG.getLoad(RegVT, dl, LDChain, AddTOC, |
5474 | MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags); |
5475 | |
5476 | // One for loading the environment pointer. |
5477 | SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl); |
5478 | SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff); |
5479 | SDValue LoadEnvPtr = |
5480 | DAG.getLoad(RegVT, dl, LDChain, AddPtr, |
5481 | MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags); |
5482 | |
5483 | |
5484 | // Then copy the newly loaded TOC anchor to the TOC pointer. |
5485 | SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue); |
5486 | Chain = TOCVal.getValue(0); |
5487 | Glue = TOCVal.getValue(1); |
5488 | |
5489 | // If the function call has an explicit 'nest' parameter, it takes the |
5490 | // place of the environment pointer. |
5491 | assert((!hasNest || !Subtarget.isAIXABI()) &&(static_cast <bool> ((!hasNest || !Subtarget.isAIXABI() ) && "Nest parameter is not supported on AIX.") ? void (0) : __assert_fail ("(!hasNest || !Subtarget.isAIXABI()) && \"Nest parameter is not supported on AIX.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5492, __extension__ __PRETTY_FUNCTION__)) |
5492 | "Nest parameter is not supported on AIX.")(static_cast <bool> ((!hasNest || !Subtarget.isAIXABI() ) && "Nest parameter is not supported on AIX.") ? void (0) : __assert_fail ("(!hasNest || !Subtarget.isAIXABI()) && \"Nest parameter is not supported on AIX.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5492, __extension__ __PRETTY_FUNCTION__)); |
5493 | if (!hasNest) { |
5494 | SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue); |
5495 | Chain = EnvVal.getValue(0); |
5496 | Glue = EnvVal.getValue(1); |
5497 | } |
5498 | |
5499 | // The rest of the indirect call sequence is the same as the non-descriptor |
5500 | // DAG. |
5501 | prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl); |
5502 | } |
5503 | |
5504 | static void |
5505 | buildCallOperands(SmallVectorImpl<SDValue> &Ops, |
5506 | PPCTargetLowering::CallFlags CFlags, const SDLoc &dl, |
5507 | SelectionDAG &DAG, |
5508 | SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, |
5509 | SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff, |
5510 | const PPCSubtarget &Subtarget) { |
5511 | const bool IsPPC64 = Subtarget.isPPC64(); |
5512 | // MVT for a general purpose register. |
5513 | const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; |
5514 | |
5515 | // First operand is always the chain. |
5516 | Ops.push_back(Chain); |
5517 | |
5518 | // If it's a direct call pass the callee as the second operand. |
5519 | if (!CFlags.IsIndirect) |
5520 | Ops.push_back(Callee); |
5521 | else { |
5522 | assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.")(static_cast <bool> (!CFlags.IsPatchPoint && "Patch point calls are not indirect." ) ? void (0) : __assert_fail ("!CFlags.IsPatchPoint && \"Patch point calls are not indirect.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5522, __extension__ __PRETTY_FUNCTION__)); |
5523 | |
5524 | // For the TOC based ABIs, we have saved the TOC pointer to the linkage area |
5525 | // on the stack (this would have been done in `LowerCall_64SVR4` or |
5526 | // `LowerCall_AIX`). The call instruction is a pseudo instruction that |
5527 | // represents both the indirect branch and a load that restores the TOC |
5528 | // pointer from the linkage area. The operand for the TOC restore is an add |
5529 | // of the TOC save offset to the stack pointer. This must be the second |
5530 | // operand: after the chain input but before any other variadic arguments. |
5531 | // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not |
5532 | // saved or used. |
5533 | if (isTOCSaveRestoreRequired(Subtarget)) { |
5534 | const MCRegister StackPtrReg = Subtarget.getStackPointerRegister(); |
5535 | |
5536 | SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT); |
5537 | unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); |
5538 | SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); |
5539 | SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff); |
5540 | Ops.push_back(AddTOC); |
5541 | } |
5542 | |
5543 | // Add the register used for the environment pointer. |
5544 | if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest) |
5545 | Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(), |
5546 | RegVT)); |
5547 | |
5548 | |
5549 | // Add CTR register as callee so a bctr can be emitted later. |
5550 | if (CFlags.IsTailCall) |
5551 | Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT)); |
5552 | } |
5553 | |
5554 | // If this is a tail call add stack pointer delta. |
5555 | if (CFlags.IsTailCall) |
5556 | Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); |
5557 | |
5558 | // Add argument registers to the end of the list so that they are known live |
5559 | // into the call. |
5560 | for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) |
5561 | Ops.push_back(DAG.getRegister(RegsToPass[i].first, |
5562 | RegsToPass[i].second.getValueType())); |
5563 | |
5564 | // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is |
5565 | // no way to mark dependencies as implicit here. |
5566 | // We will add the R2/X2 dependency in EmitInstrWithCustomInserter. |
5567 | if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) && |
5568 | !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls()) |
5569 | Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT)); |
5570 | |
5571 | // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls |
5572 | if (CFlags.IsVarArg && Subtarget.is32BitELFABI()) |
5573 | Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); |
5574 | |
5575 | // Add a register mask operand representing the call-preserved registers. |
5576 | const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); |
5577 | const uint32_t *Mask = |
5578 | TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv); |
5579 | assert(Mask && "Missing call preserved mask for calling convention")(static_cast <bool> (Mask && "Missing call preserved mask for calling convention" ) ? void (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5579, __extension__ __PRETTY_FUNCTION__)); |
5580 | Ops.push_back(DAG.getRegisterMask(Mask)); |
5581 | |
5582 | // If the glue is valid, it is the last operand. |
5583 | if (Glue.getNode()) |
5584 | Ops.push_back(Glue); |
5585 | } |
5586 | |
5587 | SDValue PPCTargetLowering::FinishCall( |
5588 | CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG, |
5589 | SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue, |
5590 | SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, |
5591 | unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, |
5592 | SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const { |
5593 | |
5594 | if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) || |
5595 | Subtarget.isAIXABI()) |
5596 | setUsesTOCBasePtr(DAG); |
5597 | |
5598 | unsigned CallOpc = |
5599 | getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee, |
5600 | Subtarget, DAG.getTarget(), CB ? CB->isStrictFP() : false); |
5601 | |
5602 | if (!CFlags.IsIndirect) |
5603 | Callee = transformCallee(Callee, DAG, dl, Subtarget); |
5604 | else if (Subtarget.usesFunctionDescriptors()) |
5605 | prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB, |
5606 | dl, CFlags.HasNest, Subtarget); |
5607 | else |
5608 | prepareIndirectCall(DAG, Callee, Glue, Chain, dl); |
5609 | |
5610 | // Build the operand list for the call instruction. |
5611 | SmallVector<SDValue, 8> Ops; |
5612 | buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee, |
5613 | SPDiff, Subtarget); |
5614 | |
5615 | // Emit tail call. |
5616 | if (CFlags.IsTailCall) { |
5617 | // Indirect tail call when using PC Relative calls do not have the same |
5618 | // constraints. |
5619 | assert(((Callee.getOpcode() == ISD::Register &&(static_cast <bool> (((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa< ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget .isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? void (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5627, __extension__ __PRETTY_FUNCTION__)) |
5620 | cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||(static_cast <bool> (((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa< ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget .isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? void (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5627, __extension__ __PRETTY_FUNCTION__)) |
5621 | Callee.getOpcode() == ISD::TargetExternalSymbol ||(static_cast <bool> (((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa< ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget .isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? void (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5627, __extension__ __PRETTY_FUNCTION__)) |
5622 | Callee.getOpcode() == ISD::TargetGlobalAddress ||(static_cast <bool> (((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa< ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget .isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? void (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5627, __extension__ __PRETTY_FUNCTION__)) |
5623 | isa<ConstantSDNode>(Callee) ||(static_cast <bool> (((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa< ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget .isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? void (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5627, __extension__ __PRETTY_FUNCTION__)) |
5624 | (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&(static_cast <bool> (((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa< ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget .isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? void (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5627, __extension__ __PRETTY_FUNCTION__)) |
5625 | "Expecting a global address, external symbol, absolute value, "(static_cast <bool> (((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa< ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget .isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? void (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5627, __extension__ __PRETTY_FUNCTION__)) |
5626 | "register or an indirect tail call when PC Relative calls are "(static_cast <bool> (((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa< ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget .isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? void (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5627, __extension__ __PRETTY_FUNCTION__)) |
5627 | "used.")(static_cast <bool> (((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa< ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget .isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? void (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5627, __extension__ __PRETTY_FUNCTION__)); |
5628 | // PC Relative calls also use TC_RETURN as the way to mark tail calls. |
5629 | assert(CallOpc == PPCISD::TC_RETURN &&(static_cast <bool> (CallOpc == PPCISD::TC_RETURN && "Unexpected call opcode for a tail call.") ? void (0) : __assert_fail ("CallOpc == PPCISD::TC_RETURN && \"Unexpected call opcode for a tail call.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5630, __extension__ __PRETTY_FUNCTION__)) |
5630 | "Unexpected call opcode for a tail call.")(static_cast <bool> (CallOpc == PPCISD::TC_RETURN && "Unexpected call opcode for a tail call.") ? void (0) : __assert_fail ("CallOpc == PPCISD::TC_RETURN && \"Unexpected call opcode for a tail call.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5630, __extension__ __PRETTY_FUNCTION__)); |
5631 | DAG.getMachineFunction().getFrameInfo().setHasTailCall(); |
5632 | return DAG.getNode(CallOpc, dl, MVT::Other, Ops); |
5633 | } |
5634 | |
5635 | std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}}; |
5636 | Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops); |
5637 | DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge); |
5638 | Glue = Chain.getValue(1); |
5639 | |
5640 | // When performing tail call optimization the callee pops its arguments off |
5641 | // the stack. Account for this here so these bytes can be pushed back on in |
5642 | // PPCFrameLowering::eliminateCallFramePseudoInstr. |
5643 | int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast && |
5644 | getTargetMachine().Options.GuaranteedTailCallOpt) |
5645 | ? NumBytes |
5646 | : 0; |
5647 | |
5648 | Chain = DAG.getCALLSEQ_END(Chain, NumBytes, BytesCalleePops, Glue, dl); |
5649 | Glue = Chain.getValue(1); |
5650 | |
5651 | return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl, |
5652 | DAG, InVals); |
5653 | } |
5654 | |
5655 | bool PPCTargetLowering::supportsTailCallFor(const CallBase *CB) const { |
5656 | CallingConv::ID CalleeCC = CB->getCallingConv(); |
5657 | const Function *CallerFunc = CB->getCaller(); |
5658 | CallingConv::ID CallerCC = CallerFunc->getCallingConv(); |
5659 | const Function *CalleeFunc = CB->getCalledFunction(); |
5660 | if (!CalleeFunc) |
5661 | return false; |
5662 | const GlobalValue *CalleeGV = dyn_cast<GlobalValue>(CalleeFunc); |
5663 | |
5664 | SmallVector<ISD::OutputArg, 2> Outs; |
5665 | SmallVector<ISD::InputArg, 2> Ins; |
5666 | |
5667 | GetReturnInfo(CalleeCC, CalleeFunc->getReturnType(), |
5668 | CalleeFunc->getAttributes(), Outs, *this, |
5669 | CalleeFunc->getParent()->getDataLayout()); |
5670 | |
5671 | return isEligibleForTCO(CalleeGV, CalleeCC, CallerCC, CB, |
5672 | CalleeFunc->isVarArg(), Outs, Ins, CallerFunc, |
5673 | false /*isCalleeExternalSymbol*/); |
5674 | } |
5675 | |
5676 | bool PPCTargetLowering::isEligibleForTCO( |
5677 | const GlobalValue *CalleeGV, CallingConv::ID CalleeCC, |
5678 | CallingConv::ID CallerCC, const CallBase *CB, bool isVarArg, |
5679 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
5680 | const SmallVectorImpl<ISD::InputArg> &Ins, const Function *CallerFunc, |
5681 | bool isCalleeExternalSymbol) const { |
5682 | if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall())) |
5683 | return false; |
5684 | |
5685 | if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) |
5686 | return IsEligibleForTailCallOptimization_64SVR4( |
5687 | CalleeGV, CalleeCC, CallerCC, CB, isVarArg, Outs, Ins, CallerFunc, |
5688 | isCalleeExternalSymbol); |
5689 | else |
5690 | return IsEligibleForTailCallOptimization(CalleeGV, CalleeCC, CallerCC, |
5691 | isVarArg, Ins); |
5692 | } |
5693 | |
5694 | SDValue |
5695 | PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, |
5696 | SmallVectorImpl<SDValue> &InVals) const { |
5697 | SelectionDAG &DAG = CLI.DAG; |
5698 | SDLoc &dl = CLI.DL; |
5699 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; |
5700 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; |
5701 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; |
5702 | SDValue Chain = CLI.Chain; |
5703 | SDValue Callee = CLI.Callee; |
5704 | bool &isTailCall = CLI.IsTailCall; |
5705 | CallingConv::ID CallConv = CLI.CallConv; |
5706 | bool isVarArg = CLI.IsVarArg; |
5707 | bool isPatchPoint = CLI.IsPatchPoint; |
5708 | const CallBase *CB = CLI.CB; |
5709 | |
5710 | if (isTailCall) { |
5711 | MachineFunction &MF = DAG.getMachineFunction(); |
5712 | CallingConv::ID CallerCC = MF.getFunction().getCallingConv(); |
5713 | auto *G = dyn_cast<GlobalAddressSDNode>(Callee); |
5714 | const GlobalValue *GV = G ? G->getGlobal() : nullptr; |
5715 | bool IsCalleeExternalSymbol = isa<ExternalSymbolSDNode>(Callee); |
5716 | |
5717 | isTailCall = |
5718 | isEligibleForTCO(GV, CallConv, CallerCC, CB, isVarArg, Outs, Ins, |
5719 | &(MF.getFunction()), IsCalleeExternalSymbol); |
5720 | if (isTailCall) { |
5721 | ++NumTailCalls; |
5722 | if (!getTargetMachine().Options.GuaranteedTailCallOpt) |
5723 | ++NumSiblingCalls; |
5724 | |
5725 | // PC Relative calls no longer guarantee that the callee is a Global |
5726 | // Address Node. The callee could be an indirect tail call in which |
5727 | // case the SDValue for the callee could be a load (to load the address |
5728 | // of a function pointer) or it may be a register copy (to move the |
5729 | // address of the callee from a function parameter into a virtual |
5730 | // register). It may also be an ExternalSymbolSDNode (ex memcopy). |
5731 | assert((Subtarget.isUsingPCRelativeCalls() ||(static_cast <bool> ((Subtarget.isUsingPCRelativeCalls( ) || isa<GlobalAddressSDNode>(Callee)) && "Callee should be an llvm::Function object." ) ? void (0) : __assert_fail ("(Subtarget.isUsingPCRelativeCalls() || isa<GlobalAddressSDNode>(Callee)) && \"Callee should be an llvm::Function object.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5733, __extension__ __PRETTY_FUNCTION__)) |
5732 | isa<GlobalAddressSDNode>(Callee)) &&(static_cast <bool> ((Subtarget.isUsingPCRelativeCalls( ) || isa<GlobalAddressSDNode>(Callee)) && "Callee should be an llvm::Function object." ) ? void (0) : __assert_fail ("(Subtarget.isUsingPCRelativeCalls() || isa<GlobalAddressSDNode>(Callee)) && \"Callee should be an llvm::Function object.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5733, __extension__ __PRETTY_FUNCTION__)) |
5733 | "Callee should be an llvm::Function object.")(static_cast <bool> ((Subtarget.isUsingPCRelativeCalls( ) || isa<GlobalAddressSDNode>(Callee)) && "Callee should be an llvm::Function object." ) ? void (0) : __assert_fail ("(Subtarget.isUsingPCRelativeCalls() || isa<GlobalAddressSDNode>(Callee)) && \"Callee should be an llvm::Function object.\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5733, __extension__ __PRETTY_FUNCTION__)); |
5734 | |
5735 | LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("ppc-lowering")) { dbgs() << "TCO caller: " << DAG .getMachineFunction().getName() << "\nTCO callee: "; } } while (false) |
5736 | << "\nTCO callee: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("ppc-lowering")) { dbgs() << "TCO caller: " << DAG .getMachineFunction().getName() << "\nTCO callee: "; } } while (false); |
5737 | LLVM_DEBUG(Callee.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("ppc-lowering")) { Callee.dump(); } } while (false); |
5738 | } |
5739 | } |
5740 | |
5741 | if (!isTailCall && CB && CB->isMustTailCall()) |
5742 | report_fatal_error("failed to perform tail call elimination on a call " |
5743 | "site marked musttail"); |
5744 | |
5745 | // When long calls (i.e. indirect calls) are always used, calls are always |
5746 | // made via function pointer. If we have a function name, first translate it |
5747 | // into a pointer. |
5748 | if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && |
5749 | !isTailCall) |
5750 | Callee = LowerGlobalAddress(Callee, DAG); |
5751 | |
5752 | CallFlags CFlags( |
5753 | CallConv, isTailCall, isVarArg, isPatchPoint, |
5754 | isIndirectCall(Callee, DAG, Subtarget, isPatchPoint), |
5755 | // hasNest |
5756 | Subtarget.is64BitELFABI() && |
5757 | any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }), |
5758 | CLI.NoMerge); |
5759 | |
5760 | if (Subtarget.isAIXABI()) |
5761 | return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, |
5762 | InVals, CB); |
5763 | |
5764 | assert(Subtarget.isSVR4ABI())(static_cast <bool> (Subtarget.isSVR4ABI()) ? void (0) : __assert_fail ("Subtarget.isSVR4ABI()", "llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5764, __extension__ __PRETTY_FUNCTION__)); |
5765 | if (Subtarget.isPPC64()) |
5766 | return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, |
5767 | InVals, CB); |
5768 | return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, |
5769 | InVals, CB); |
5770 | } |
5771 | |
5772 | SDValue PPCTargetLowering::LowerCall_32SVR4( |
5773 | SDValue Chain, SDValue Callee, CallFlags CFlags, |
5774 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
5775 | const SmallVectorImpl<SDValue> &OutVals, |
5776 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
5777 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, |
5778 | const CallBase *CB) const { |
5779 | // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description |
5780 | // of the 32-bit SVR4 ABI stack frame layout. |
5781 | |
5782 | const CallingConv::ID CallConv = CFlags.CallConv; |
5783 | const bool IsVarArg = CFlags.IsVarArg; |
5784 | const bool IsTailCall = CFlags.IsTailCall; |
5785 | |
5786 | assert((CallConv == CallingConv::C ||(static_cast <bool> ((CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && "Unknown calling convention!") ? void (0) : __assert_fail ("(CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && \"Unknown calling convention!\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5788, __extension__ __PRETTY_FUNCTION__)) |
5787 | CallConv == CallingConv::Cold ||(static_cast <bool> ((CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && "Unknown calling convention!") ? void (0) : __assert_fail ("(CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && \"Unknown calling convention!\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5788, __extension__ __PRETTY_FUNCTION__)) |
5788 | CallConv == CallingConv::Fast) && "Unknown calling convention!")(static_cast <bool> ((CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && "Unknown calling convention!") ? void (0) : __assert_fail ("(CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && \"Unknown calling convention!\"" , "llvm/lib/Target/PowerPC/PPCISelLowering.cpp", 5788, __extension__ __PRETTY_FUNCTION__)); |
5789 | |
5790 | const Align PtrAlign(4); |
5791 | |
5792 | MachineFunction &MF = DAG.getMachineFunction(); |
5793 | |
5794 | // Mark this function as potentially containing a function that contains a |
5795 | // tail call. As a consequence the frame pointer will be used for dynamicalloc |
5796 | // and restoring the callers stack pointer in this functions epilog. This is |
5797 | // done because by tail calling the called function might overwrite the value |
5798 | // in this function's (MF) stack pointer stack slot 0(SP). |
5799 | if (getTargetMachine().Options.GuaranteedTailCallOpt && |
5800 | CallConv == CallingConv::Fast) |
5801 | MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); |
5802 | |
5803 | // Count how many bytes are to be pushed on the stack, including the linkage |
5804 | // area, parameter list area and the part of the local variable space which |
5805 | // contains copies of aggregates which are passed by value. |
5806 | |
5807 | // Assign locations to all of the outgoing arguments. |
5808 | SmallVector<CCValAssign, 16> ArgLocs; |
5809 | PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); |
5810 | |
5811 | // Reserve space for the linkage area on the stack. |
5812 | CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), |
5813 | PtrAlign); |
5814 | if (useSoftFloat()) |
5815 | CCInfo.PreAnalyzeCallOperands(Outs); |
5816 | |
5817 | if (IsVarArg) { |
5818 | // Handle fixed and variable vector arguments differently. |
5819 | // Fixed vector arguments go into registers as long as registers are |
5820 | // available. Variable vector arguments always go into memory. |
5821 | unsigned NumArgs = Outs.size(); |
5822 | |
5823 | for (unsigned i = 0; i != NumArgs; ++i) { |
5824 | MVT ArgVT = Outs[i].VT; |
5825 | ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; |
5826 | bool Result; |
5827 | |
5828 | if (Outs[i].IsFixed) { |
5829 | Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, |
5830 | CCInfo); |
5831 | } else { |
5832 | Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, |
5833 | ArgFlags, CCInfo); |
5834 | } |
5835 | |
5836 | if (Result) { |
5837 | #ifndef NDEBUG |
5838 | errs() << "Call operand #" << i << " has unhandled type " |
5839 | << ArgVT << "\n"; |
5840 | #endif |
5841 | llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5841); |
5842 | } |
5843 | } |