File: | llvm/lib/Target/PowerPC/PPCISelLowering.cpp |
Warning: | line 15636, column 9 Assigned value is garbage or undefined |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the PPCISelLowering class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "PPCISelLowering.h" |
14 | #include "MCTargetDesc/PPCPredicates.h" |
15 | #include "PPC.h" |
16 | #include "PPCCCState.h" |
17 | #include "PPCCallingConv.h" |
18 | #include "PPCFrameLowering.h" |
19 | #include "PPCInstrInfo.h" |
20 | #include "PPCMachineFunctionInfo.h" |
21 | #include "PPCPerfectShuffle.h" |
22 | #include "PPCRegisterInfo.h" |
23 | #include "PPCSubtarget.h" |
24 | #include "PPCTargetMachine.h" |
25 | #include "llvm/ADT/APFloat.h" |
26 | #include "llvm/ADT/APInt.h" |
27 | #include "llvm/ADT/ArrayRef.h" |
28 | #include "llvm/ADT/DenseMap.h" |
29 | #include "llvm/ADT/None.h" |
30 | #include "llvm/ADT/STLExtras.h" |
31 | #include "llvm/ADT/SmallPtrSet.h" |
32 | #include "llvm/ADT/SmallSet.h" |
33 | #include "llvm/ADT/SmallVector.h" |
34 | #include "llvm/ADT/Statistic.h" |
35 | #include "llvm/ADT/StringRef.h" |
36 | #include "llvm/ADT/StringSwitch.h" |
37 | #include "llvm/CodeGen/CallingConvLower.h" |
38 | #include "llvm/CodeGen/ISDOpcodes.h" |
39 | #include "llvm/CodeGen/MachineBasicBlock.h" |
40 | #include "llvm/CodeGen/MachineFrameInfo.h" |
41 | #include "llvm/CodeGen/MachineFunction.h" |
42 | #include "llvm/CodeGen/MachineInstr.h" |
43 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
44 | #include "llvm/CodeGen/MachineJumpTableInfo.h" |
45 | #include "llvm/CodeGen/MachineLoopInfo.h" |
46 | #include "llvm/CodeGen/MachineMemOperand.h" |
47 | #include "llvm/CodeGen/MachineModuleInfo.h" |
48 | #include "llvm/CodeGen/MachineOperand.h" |
49 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
50 | #include "llvm/CodeGen/RuntimeLibcalls.h" |
51 | #include "llvm/CodeGen/SelectionDAG.h" |
52 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
53 | #include "llvm/CodeGen/TargetInstrInfo.h" |
54 | #include "llvm/CodeGen/TargetLowering.h" |
55 | #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" |
56 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
57 | #include "llvm/CodeGen/ValueTypes.h" |
58 | #include "llvm/IR/CallingConv.h" |
59 | #include "llvm/IR/Constant.h" |
60 | #include "llvm/IR/Constants.h" |
61 | #include "llvm/IR/DataLayout.h" |
62 | #include "llvm/IR/DebugLoc.h" |
63 | #include "llvm/IR/DerivedTypes.h" |
64 | #include "llvm/IR/Function.h" |
65 | #include "llvm/IR/GlobalValue.h" |
66 | #include "llvm/IR/IRBuilder.h" |
67 | #include "llvm/IR/Instructions.h" |
68 | #include "llvm/IR/Intrinsics.h" |
69 | #include "llvm/IR/IntrinsicsPowerPC.h" |
70 | #include "llvm/IR/Module.h" |
71 | #include "llvm/IR/Type.h" |
72 | #include "llvm/IR/Use.h" |
73 | #include "llvm/IR/Value.h" |
74 | #include "llvm/MC/MCContext.h" |
75 | #include "llvm/MC/MCExpr.h" |
76 | #include "llvm/MC/MCRegisterInfo.h" |
77 | #include "llvm/MC/MCSectionXCOFF.h" |
78 | #include "llvm/MC/MCSymbolXCOFF.h" |
79 | #include "llvm/Support/AtomicOrdering.h" |
80 | #include "llvm/Support/BranchProbability.h" |
81 | #include "llvm/Support/Casting.h" |
82 | #include "llvm/Support/CodeGen.h" |
83 | #include "llvm/Support/CommandLine.h" |
84 | #include "llvm/Support/Compiler.h" |
85 | #include "llvm/Support/Debug.h" |
86 | #include "llvm/Support/ErrorHandling.h" |
87 | #include "llvm/Support/Format.h" |
88 | #include "llvm/Support/KnownBits.h" |
89 | #include "llvm/Support/MachineValueType.h" |
90 | #include "llvm/Support/MathExtras.h" |
91 | #include "llvm/Support/raw_ostream.h" |
92 | #include "llvm/Target/TargetMachine.h" |
93 | #include "llvm/Target/TargetOptions.h" |
94 | #include <algorithm> |
95 | #include <cassert> |
96 | #include <cstdint> |
97 | #include <iterator> |
98 | #include <list> |
99 | #include <utility> |
100 | #include <vector> |
101 | |
102 | using namespace llvm; |
103 | |
104 | #define DEBUG_TYPE"ppc-lowering" "ppc-lowering" |
105 | |
106 | static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", |
107 | cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); |
108 | |
109 | static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", |
110 | cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); |
111 | |
112 | static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", |
113 | cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); |
114 | |
115 | static cl::opt<bool> DisableSCO("disable-ppc-sco", |
116 | cl::desc("disable sibling call optimization on ppc"), cl::Hidden); |
117 | |
118 | static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32", |
119 | cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden); |
120 | |
121 | static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables", |
122 | cl::desc("use absolute jump tables on ppc"), cl::Hidden); |
123 | |
124 | // TODO - Remove this option if soft fp128 has been fully supported . |
125 | static cl::opt<bool> |
126 | EnableSoftFP128("enable-soft-fp128", |
127 | cl::desc("temp option to enable soft fp128"), cl::Hidden); |
128 | |
129 | STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"ppc-lowering", "NumTailCalls" , "Number of tail calls"}; |
130 | STATISTIC(NumSiblingCalls, "Number of sibling calls")static llvm::Statistic NumSiblingCalls = {"ppc-lowering", "NumSiblingCalls" , "Number of sibling calls"}; |
131 | STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM")static llvm::Statistic ShufflesHandledWithVPERM = {"ppc-lowering" , "ShufflesHandledWithVPERM", "Number of shuffles lowered to a VPERM" }; |
132 | STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed")static llvm::Statistic NumDynamicAllocaProbed = {"ppc-lowering" , "NumDynamicAllocaProbed", "Number of dynamic stack allocation probed" }; |
133 | |
134 | static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int); |
135 | |
136 | static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl); |
137 | |
138 | // FIXME: Remove this once the bug has been fixed! |
139 | extern cl::opt<bool> ANDIGlueBug; |
140 | |
141 | PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, |
142 | const PPCSubtarget &STI) |
143 | : TargetLowering(TM), Subtarget(STI) { |
144 | // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all |
145 | // arguments are at least 4/8 bytes aligned. |
146 | bool isPPC64 = Subtarget.isPPC64(); |
147 | setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4)); |
148 | |
149 | // Set up the register classes. |
150 | addRegisterClass(MVT::i32, &PPC::GPRCRegClass); |
151 | if (!useSoftFloat()) { |
152 | if (hasSPE()) { |
153 | addRegisterClass(MVT::f32, &PPC::GPRCRegClass); |
154 | // EFPU2 APU only supports f32 |
155 | if (!Subtarget.hasEFPU2()) |
156 | addRegisterClass(MVT::f64, &PPC::SPERCRegClass); |
157 | } else { |
158 | addRegisterClass(MVT::f32, &PPC::F4RCRegClass); |
159 | addRegisterClass(MVT::f64, &PPC::F8RCRegClass); |
160 | } |
161 | } |
162 | |
163 | // Match BITREVERSE to customized fast code sequence in the td file. |
164 | setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); |
165 | setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); |
166 | |
167 | // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended. |
168 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); |
169 | |
170 | // PowerPC has an i16 but no i8 (or i1) SEXTLOAD. |
171 | for (MVT VT : MVT::integer_valuetypes()) { |
172 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); |
173 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); |
174 | } |
175 | |
176 | if (Subtarget.isISA3_0()) { |
177 | setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal); |
178 | setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal); |
179 | setTruncStoreAction(MVT::f64, MVT::f16, Legal); |
180 | setTruncStoreAction(MVT::f32, MVT::f16, Legal); |
181 | } else { |
182 | // No extending loads from f16 or HW conversions back and forth. |
183 | setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); |
184 | setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); |
185 | setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); |
186 | setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); |
187 | setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); |
188 | setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); |
189 | setTruncStoreAction(MVT::f64, MVT::f16, Expand); |
190 | setTruncStoreAction(MVT::f32, MVT::f16, Expand); |
191 | } |
192 | |
193 | setTruncStoreAction(MVT::f64, MVT::f32, Expand); |
194 | |
195 | // PowerPC has pre-inc load and store's. |
196 | setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); |
197 | setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); |
198 | setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); |
199 | setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); |
200 | setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); |
201 | setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); |
202 | setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); |
203 | setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); |
204 | setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); |
205 | setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); |
206 | if (!Subtarget.hasSPE()) { |
207 | setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); |
208 | setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); |
209 | setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); |
210 | setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); |
211 | } |
212 | |
213 | // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry. |
214 | const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; |
215 | for (MVT VT : ScalarIntVTs) { |
216 | setOperationAction(ISD::ADDC, VT, Legal); |
217 | setOperationAction(ISD::ADDE, VT, Legal); |
218 | setOperationAction(ISD::SUBC, VT, Legal); |
219 | setOperationAction(ISD::SUBE, VT, Legal); |
220 | } |
221 | |
222 | if (Subtarget.useCRBits()) { |
223 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); |
224 | |
225 | if (isPPC64 || Subtarget.hasFPCVT()) { |
226 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Promote); |
227 | AddPromotedToType(ISD::STRICT_SINT_TO_FP, MVT::i1, |
228 | isPPC64 ? MVT::i64 : MVT::i32); |
229 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Promote); |
230 | AddPromotedToType(ISD::STRICT_UINT_TO_FP, MVT::i1, |
231 | isPPC64 ? MVT::i64 : MVT::i32); |
232 | |
233 | setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); |
234 | AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, |
235 | isPPC64 ? MVT::i64 : MVT::i32); |
236 | setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); |
237 | AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, |
238 | isPPC64 ? MVT::i64 : MVT::i32); |
239 | |
240 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i1, Promote); |
241 | AddPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::i1, |
242 | isPPC64 ? MVT::i64 : MVT::i32); |
243 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i1, Promote); |
244 | AddPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::i1, |
245 | isPPC64 ? MVT::i64 : MVT::i32); |
246 | |
247 | setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote); |
248 | AddPromotedToType(ISD::FP_TO_SINT, MVT::i1, |
249 | isPPC64 ? MVT::i64 : MVT::i32); |
250 | setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote); |
251 | AddPromotedToType(ISD::FP_TO_UINT, MVT::i1, |
252 | isPPC64 ? MVT::i64 : MVT::i32); |
253 | } else { |
254 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Custom); |
255 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Custom); |
256 | setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); |
257 | setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); |
258 | } |
259 | |
260 | // PowerPC does not support direct load/store of condition registers. |
261 | setOperationAction(ISD::LOAD, MVT::i1, Custom); |
262 | setOperationAction(ISD::STORE, MVT::i1, Custom); |
263 | |
264 | // FIXME: Remove this once the ANDI glue bug is fixed: |
265 | if (ANDIGlueBug) |
266 | setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); |
267 | |
268 | for (MVT VT : MVT::integer_valuetypes()) { |
269 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); |
270 | setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); |
271 | setTruncStoreAction(VT, MVT::i1, Expand); |
272 | } |
273 | |
274 | addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); |
275 | } |
276 | |
277 | // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on |
278 | // PPC (the libcall is not available). |
279 | setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom); |
280 | setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom); |
281 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::ppcf128, Custom); |
282 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::ppcf128, Custom); |
283 | |
284 | // We do not currently implement these libm ops for PowerPC. |
285 | setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); |
286 | setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); |
287 | setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); |
288 | setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); |
289 | setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); |
290 | setOperationAction(ISD::FREM, MVT::ppcf128, Expand); |
291 | |
292 | // PowerPC has no SREM/UREM instructions unless we are on P9 |
293 | // On P9 we may use a hardware instruction to compute the remainder. |
294 | // When the result of both the remainder and the division is required it is |
295 | // more efficient to compute the remainder from the result of the division |
296 | // rather than use the remainder instruction. The instructions are legalized |
297 | // directly because the DivRemPairsPass performs the transformation at the IR |
298 | // level. |
299 | if (Subtarget.isISA3_0()) { |
300 | setOperationAction(ISD::SREM, MVT::i32, Legal); |
301 | setOperationAction(ISD::UREM, MVT::i32, Legal); |
302 | setOperationAction(ISD::SREM, MVT::i64, Legal); |
303 | setOperationAction(ISD::UREM, MVT::i64, Legal); |
304 | } else { |
305 | setOperationAction(ISD::SREM, MVT::i32, Expand); |
306 | setOperationAction(ISD::UREM, MVT::i32, Expand); |
307 | setOperationAction(ISD::SREM, MVT::i64, Expand); |
308 | setOperationAction(ISD::UREM, MVT::i64, Expand); |
309 | } |
310 | |
311 | // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. |
312 | setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); |
313 | setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); |
314 | setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); |
315 | setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); |
316 | setOperationAction(ISD::UDIVREM, MVT::i32, Expand); |
317 | setOperationAction(ISD::SDIVREM, MVT::i32, Expand); |
318 | setOperationAction(ISD::UDIVREM, MVT::i64, Expand); |
319 | setOperationAction(ISD::SDIVREM, MVT::i64, Expand); |
320 | |
321 | // Handle constrained floating-point operations of scalar. |
322 | // TODO: Handle SPE specific operation. |
323 | setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal); |
324 | setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal); |
325 | setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal); |
326 | setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal); |
327 | setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal); |
328 | setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); |
329 | |
330 | setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal); |
331 | setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal); |
332 | setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal); |
333 | setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal); |
334 | setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal); |
335 | if (Subtarget.hasVSX()) { |
336 | setOperationAction(ISD::STRICT_FRINT, MVT::f32, Legal); |
337 | setOperationAction(ISD::STRICT_FRINT, MVT::f64, Legal); |
338 | } |
339 | |
340 | if (Subtarget.hasFSQRT()) { |
341 | setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal); |
342 | setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal); |
343 | } |
344 | |
345 | if (Subtarget.hasFPRND()) { |
346 | setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal); |
347 | setOperationAction(ISD::STRICT_FCEIL, MVT::f32, Legal); |
348 | setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal); |
349 | setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal); |
350 | |
351 | setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal); |
352 | setOperationAction(ISD::STRICT_FCEIL, MVT::f64, Legal); |
353 | setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal); |
354 | setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal); |
355 | } |
356 | |
357 | // We don't support sin/cos/sqrt/fmod/pow |
358 | setOperationAction(ISD::FSIN , MVT::f64, Expand); |
359 | setOperationAction(ISD::FCOS , MVT::f64, Expand); |
360 | setOperationAction(ISD::FSINCOS, MVT::f64, Expand); |
361 | setOperationAction(ISD::FREM , MVT::f64, Expand); |
362 | setOperationAction(ISD::FPOW , MVT::f64, Expand); |
363 | setOperationAction(ISD::FSIN , MVT::f32, Expand); |
364 | setOperationAction(ISD::FCOS , MVT::f32, Expand); |
365 | setOperationAction(ISD::FSINCOS, MVT::f32, Expand); |
366 | setOperationAction(ISD::FREM , MVT::f32, Expand); |
367 | setOperationAction(ISD::FPOW , MVT::f32, Expand); |
368 | if (Subtarget.hasSPE()) { |
369 | setOperationAction(ISD::FMA , MVT::f64, Expand); |
370 | setOperationAction(ISD::FMA , MVT::f32, Expand); |
371 | } else { |
372 | setOperationAction(ISD::FMA , MVT::f64, Legal); |
373 | setOperationAction(ISD::FMA , MVT::f32, Legal); |
374 | } |
375 | |
376 | if (Subtarget.hasSPE()) |
377 | setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); |
378 | |
379 | setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); |
380 | |
381 | // If we're enabling GP optimizations, use hardware square root |
382 | if (!Subtarget.hasFSQRT() && |
383 | !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && |
384 | Subtarget.hasFRE())) |
385 | setOperationAction(ISD::FSQRT, MVT::f64, Expand); |
386 | |
387 | if (!Subtarget.hasFSQRT() && |
388 | !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && |
389 | Subtarget.hasFRES())) |
390 | setOperationAction(ISD::FSQRT, MVT::f32, Expand); |
391 | |
392 | if (Subtarget.hasFCPSGN()) { |
393 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); |
394 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); |
395 | } else { |
396 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); |
397 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); |
398 | } |
399 | |
400 | if (Subtarget.hasFPRND()) { |
401 | setOperationAction(ISD::FFLOOR, MVT::f64, Legal); |
402 | setOperationAction(ISD::FCEIL, MVT::f64, Legal); |
403 | setOperationAction(ISD::FTRUNC, MVT::f64, Legal); |
404 | setOperationAction(ISD::FROUND, MVT::f64, Legal); |
405 | |
406 | setOperationAction(ISD::FFLOOR, MVT::f32, Legal); |
407 | setOperationAction(ISD::FCEIL, MVT::f32, Legal); |
408 | setOperationAction(ISD::FTRUNC, MVT::f32, Legal); |
409 | setOperationAction(ISD::FROUND, MVT::f32, Legal); |
410 | } |
411 | |
412 | // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd |
413 | // to speed up scalar BSWAP64. |
414 | // CTPOP or CTTZ were introduced in P8/P9 respectively |
415 | setOperationAction(ISD::BSWAP, MVT::i32 , Expand); |
416 | if (Subtarget.hasP9Vector()) |
417 | setOperationAction(ISD::BSWAP, MVT::i64 , Custom); |
418 | else |
419 | setOperationAction(ISD::BSWAP, MVT::i64 , Expand); |
420 | if (Subtarget.isISA3_0()) { |
421 | setOperationAction(ISD::CTTZ , MVT::i32 , Legal); |
422 | setOperationAction(ISD::CTTZ , MVT::i64 , Legal); |
423 | } else { |
424 | setOperationAction(ISD::CTTZ , MVT::i32 , Expand); |
425 | setOperationAction(ISD::CTTZ , MVT::i64 , Expand); |
426 | } |
427 | |
428 | if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { |
429 | setOperationAction(ISD::CTPOP, MVT::i32 , Legal); |
430 | setOperationAction(ISD::CTPOP, MVT::i64 , Legal); |
431 | } else { |
432 | setOperationAction(ISD::CTPOP, MVT::i32 , Expand); |
433 | setOperationAction(ISD::CTPOP, MVT::i64 , Expand); |
434 | } |
435 | |
436 | // PowerPC does not have ROTR |
437 | setOperationAction(ISD::ROTR, MVT::i32 , Expand); |
438 | setOperationAction(ISD::ROTR, MVT::i64 , Expand); |
439 | |
440 | if (!Subtarget.useCRBits()) { |
441 | // PowerPC does not have Select |
442 | setOperationAction(ISD::SELECT, MVT::i32, Expand); |
443 | setOperationAction(ISD::SELECT, MVT::i64, Expand); |
444 | setOperationAction(ISD::SELECT, MVT::f32, Expand); |
445 | setOperationAction(ISD::SELECT, MVT::f64, Expand); |
446 | } |
447 | |
448 | // PowerPC wants to turn select_cc of FP into fsel when possible. |
449 | setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); |
450 | setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); |
451 | |
452 | // PowerPC wants to optimize integer setcc a bit |
453 | if (!Subtarget.useCRBits()) |
454 | setOperationAction(ISD::SETCC, MVT::i32, Custom); |
455 | |
456 | if (Subtarget.hasFPU()) { |
457 | setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal); |
458 | setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal); |
459 | setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal); |
460 | |
461 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal); |
462 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal); |
463 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal); |
464 | } |
465 | |
466 | // PowerPC does not have BRCOND which requires SetCC |
467 | if (!Subtarget.useCRBits()) |
468 | setOperationAction(ISD::BRCOND, MVT::Other, Expand); |
469 | |
470 | setOperationAction(ISD::BR_JT, MVT::Other, Expand); |
471 | |
472 | if (Subtarget.hasSPE()) { |
473 | // SPE has built-in conversions |
474 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal); |
475 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal); |
476 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal); |
477 | setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); |
478 | setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); |
479 | setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); |
480 | } else { |
481 | // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. |
482 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); |
483 | setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); |
484 | |
485 | // PowerPC does not have [U|S]INT_TO_FP |
486 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Expand); |
487 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Expand); |
488 | setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); |
489 | setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); |
490 | } |
491 | |
492 | if (Subtarget.hasDirectMove() && isPPC64) { |
493 | setOperationAction(ISD::BITCAST, MVT::f32, Legal); |
494 | setOperationAction(ISD::BITCAST, MVT::i32, Legal); |
495 | setOperationAction(ISD::BITCAST, MVT::i64, Legal); |
496 | setOperationAction(ISD::BITCAST, MVT::f64, Legal); |
497 | if (TM.Options.UnsafeFPMath) { |
498 | setOperationAction(ISD::LRINT, MVT::f64, Legal); |
499 | setOperationAction(ISD::LRINT, MVT::f32, Legal); |
500 | setOperationAction(ISD::LLRINT, MVT::f64, Legal); |
501 | setOperationAction(ISD::LLRINT, MVT::f32, Legal); |
502 | setOperationAction(ISD::LROUND, MVT::f64, Legal); |
503 | setOperationAction(ISD::LROUND, MVT::f32, Legal); |
504 | setOperationAction(ISD::LLROUND, MVT::f64, Legal); |
505 | setOperationAction(ISD::LLROUND, MVT::f32, Legal); |
506 | } |
507 | } else { |
508 | setOperationAction(ISD::BITCAST, MVT::f32, Expand); |
509 | setOperationAction(ISD::BITCAST, MVT::i32, Expand); |
510 | setOperationAction(ISD::BITCAST, MVT::i64, Expand); |
511 | setOperationAction(ISD::BITCAST, MVT::f64, Expand); |
512 | } |
513 | |
514 | // We cannot sextinreg(i1). Expand to shifts. |
515 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); |
516 | |
517 | // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support |
518 | // SjLj exception handling but a light-weight setjmp/longjmp replacement to |
519 | // support continuation, user-level threading, and etc.. As a result, no |
520 | // other SjLj exception interfaces are implemented and please don't build |
521 | // your own exception handling based on them. |
522 | // LLVM/Clang supports zero-cost DWARF exception handling. |
523 | setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); |
524 | setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); |
525 | |
526 | // We want to legalize GlobalAddress and ConstantPool nodes into the |
527 | // appropriate instructions to materialize the address. |
528 | setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); |
529 | setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); |
530 | setOperationAction(ISD::BlockAddress, MVT::i32, Custom); |
531 | setOperationAction(ISD::ConstantPool, MVT::i32, Custom); |
532 | setOperationAction(ISD::JumpTable, MVT::i32, Custom); |
533 | setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); |
534 | setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); |
535 | setOperationAction(ISD::BlockAddress, MVT::i64, Custom); |
536 | setOperationAction(ISD::ConstantPool, MVT::i64, Custom); |
537 | setOperationAction(ISD::JumpTable, MVT::i64, Custom); |
538 | |
539 | // TRAP is legal. |
540 | setOperationAction(ISD::TRAP, MVT::Other, Legal); |
541 | |
542 | // TRAMPOLINE is custom lowered. |
543 | setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); |
544 | setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); |
545 | |
546 | // VASTART needs to be custom lowered to use the VarArgsFrameIndex |
547 | setOperationAction(ISD::VASTART , MVT::Other, Custom); |
548 | |
549 | if (Subtarget.is64BitELFABI()) { |
550 | // VAARG always uses double-word chunks, so promote anything smaller. |
551 | setOperationAction(ISD::VAARG, MVT::i1, Promote); |
552 | AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64); |
553 | setOperationAction(ISD::VAARG, MVT::i8, Promote); |
554 | AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64); |
555 | setOperationAction(ISD::VAARG, MVT::i16, Promote); |
556 | AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64); |
557 | setOperationAction(ISD::VAARG, MVT::i32, Promote); |
558 | AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64); |
559 | setOperationAction(ISD::VAARG, MVT::Other, Expand); |
560 | } else if (Subtarget.is32BitELFABI()) { |
561 | // VAARG is custom lowered with the 32-bit SVR4 ABI. |
562 | setOperationAction(ISD::VAARG, MVT::Other, Custom); |
563 | setOperationAction(ISD::VAARG, MVT::i64, Custom); |
564 | } else |
565 | setOperationAction(ISD::VAARG, MVT::Other, Expand); |
566 | |
567 | // VACOPY is custom lowered with the 32-bit SVR4 ABI. |
568 | if (Subtarget.is32BitELFABI()) |
569 | setOperationAction(ISD::VACOPY , MVT::Other, Custom); |
570 | else |
571 | setOperationAction(ISD::VACOPY , MVT::Other, Expand); |
572 | |
573 | // Use the default implementation. |
574 | setOperationAction(ISD::VAEND , MVT::Other, Expand); |
575 | setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); |
576 | setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); |
577 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); |
578 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); |
579 | setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); |
580 | setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); |
581 | setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); |
582 | setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); |
583 | |
584 | // We want to custom lower some of our intrinsics. |
585 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); |
586 | |
587 | // To handle counter-based loop conditions. |
588 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); |
589 | |
590 | setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); |
591 | setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); |
592 | setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); |
593 | setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); |
594 | |
595 | // Comparisons that require checking two conditions. |
596 | if (Subtarget.hasSPE()) { |
597 | setCondCodeAction(ISD::SETO, MVT::f32, Expand); |
598 | setCondCodeAction(ISD::SETO, MVT::f64, Expand); |
599 | setCondCodeAction(ISD::SETUO, MVT::f32, Expand); |
600 | setCondCodeAction(ISD::SETUO, MVT::f64, Expand); |
601 | } |
602 | setCondCodeAction(ISD::SETULT, MVT::f32, Expand); |
603 | setCondCodeAction(ISD::SETULT, MVT::f64, Expand); |
604 | setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); |
605 | setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); |
606 | setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); |
607 | setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); |
608 | setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); |
609 | setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); |
610 | setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); |
611 | setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); |
612 | setCondCodeAction(ISD::SETONE, MVT::f32, Expand); |
613 | setCondCodeAction(ISD::SETONE, MVT::f64, Expand); |
614 | |
615 | setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal); |
616 | setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal); |
617 | |
618 | if (Subtarget.has64BitSupport()) { |
619 | // They also have instructions for converting between i64 and fp. |
620 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); |
621 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand); |
622 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom); |
623 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand); |
624 | setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); |
625 | setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); |
626 | setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); |
627 | setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); |
628 | // This is just the low 32 bits of a (signed) fp->i64 conversion. |
629 | // We cannot do this with Promote because i64 is not a legal type. |
630 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); |
631 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); |
632 | |
633 | if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) { |
634 | setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); |
635 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom); |
636 | } |
637 | } else { |
638 | // PowerPC does not have FP_TO_UINT on 32-bit implementations. |
639 | if (Subtarget.hasSPE()) { |
640 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal); |
641 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); |
642 | } else { |
643 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Expand); |
644 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); |
645 | } |
646 | } |
647 | |
648 | // With the instructions enabled under FPCVT, we can do everything. |
649 | if (Subtarget.hasFPCVT()) { |
650 | if (Subtarget.has64BitSupport()) { |
651 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); |
652 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom); |
653 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom); |
654 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom); |
655 | setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); |
656 | setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); |
657 | setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); |
658 | setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); |
659 | } |
660 | |
661 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); |
662 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); |
663 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom); |
664 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom); |
665 | setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); |
666 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); |
667 | setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); |
668 | setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); |
669 | } |
670 | |
671 | if (Subtarget.use64BitRegs()) { |
672 | // 64-bit PowerPC implementations can support i64 types directly |
673 | addRegisterClass(MVT::i64, &PPC::G8RCRegClass); |
674 | // BUILD_PAIR can't be handled natively, and should be expanded to shl/or |
675 | setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); |
676 | // 64-bit PowerPC wants to expand i128 shifts itself. |
677 | setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); |
678 | setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); |
679 | setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); |
680 | } else { |
681 | // 32-bit PowerPC wants to expand i64 shifts itself. |
682 | setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); |
683 | setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); |
684 | setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); |
685 | } |
686 | |
687 | // PowerPC has better expansions for funnel shifts than the generic |
688 | // TargetLowering::expandFunnelShift. |
689 | if (Subtarget.has64BitSupport()) { |
690 | setOperationAction(ISD::FSHL, MVT::i64, Custom); |
691 | setOperationAction(ISD::FSHR, MVT::i64, Custom); |
692 | } |
693 | setOperationAction(ISD::FSHL, MVT::i32, Custom); |
694 | setOperationAction(ISD::FSHR, MVT::i32, Custom); |
695 | |
696 | if (Subtarget.hasVSX()) { |
697 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); |
698 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); |
699 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); |
700 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); |
701 | } |
702 | |
703 | if (Subtarget.hasAltivec()) { |
704 | for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) { |
705 | setOperationAction(ISD::SADDSAT, VT, Legal); |
706 | setOperationAction(ISD::SSUBSAT, VT, Legal); |
707 | setOperationAction(ISD::UADDSAT, VT, Legal); |
708 | setOperationAction(ISD::USUBSAT, VT, Legal); |
709 | } |
710 | // First set operation action for all vector types to expand. Then we |
711 | // will selectively turn on ones that can be effectively codegen'd. |
712 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
713 | // add/sub are legal for all supported vector VT's. |
714 | setOperationAction(ISD::ADD, VT, Legal); |
715 | setOperationAction(ISD::SUB, VT, Legal); |
716 | |
717 | // For v2i64, these are only valid with P8Vector. This is corrected after |
718 | // the loop. |
719 | if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) { |
720 | setOperationAction(ISD::SMAX, VT, Legal); |
721 | setOperationAction(ISD::SMIN, VT, Legal); |
722 | setOperationAction(ISD::UMAX, VT, Legal); |
723 | setOperationAction(ISD::UMIN, VT, Legal); |
724 | } |
725 | else { |
726 | setOperationAction(ISD::SMAX, VT, Expand); |
727 | setOperationAction(ISD::SMIN, VT, Expand); |
728 | setOperationAction(ISD::UMAX, VT, Expand); |
729 | setOperationAction(ISD::UMIN, VT, Expand); |
730 | } |
731 | |
732 | if (Subtarget.hasVSX()) { |
733 | setOperationAction(ISD::FMAXNUM, VT, Legal); |
734 | setOperationAction(ISD::FMINNUM, VT, Legal); |
735 | } |
736 | |
737 | // Vector instructions introduced in P8 |
738 | if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { |
739 | setOperationAction(ISD::CTPOP, VT, Legal); |
740 | setOperationAction(ISD::CTLZ, VT, Legal); |
741 | } |
742 | else { |
743 | setOperationAction(ISD::CTPOP, VT, Expand); |
744 | setOperationAction(ISD::CTLZ, VT, Expand); |
745 | } |
746 | |
747 | // Vector instructions introduced in P9 |
748 | if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) |
749 | setOperationAction(ISD::CTTZ, VT, Legal); |
750 | else |
751 | setOperationAction(ISD::CTTZ, VT, Expand); |
752 | |
753 | // We promote all shuffles to v16i8. |
754 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); |
755 | AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); |
756 | |
757 | // We promote all non-typed operations to v4i32. |
758 | setOperationAction(ISD::AND , VT, Promote); |
759 | AddPromotedToType (ISD::AND , VT, MVT::v4i32); |
760 | setOperationAction(ISD::OR , VT, Promote); |
761 | AddPromotedToType (ISD::OR , VT, MVT::v4i32); |
762 | setOperationAction(ISD::XOR , VT, Promote); |
763 | AddPromotedToType (ISD::XOR , VT, MVT::v4i32); |
764 | setOperationAction(ISD::LOAD , VT, Promote); |
765 | AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); |
766 | setOperationAction(ISD::SELECT, VT, Promote); |
767 | AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); |
768 | setOperationAction(ISD::VSELECT, VT, Legal); |
769 | setOperationAction(ISD::SELECT_CC, VT, Promote); |
770 | AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); |
771 | setOperationAction(ISD::STORE, VT, Promote); |
772 | AddPromotedToType (ISD::STORE, VT, MVT::v4i32); |
773 | |
774 | // No other operations are legal. |
775 | setOperationAction(ISD::MUL , VT, Expand); |
776 | setOperationAction(ISD::SDIV, VT, Expand); |
777 | setOperationAction(ISD::SREM, VT, Expand); |
778 | setOperationAction(ISD::UDIV, VT, Expand); |
779 | setOperationAction(ISD::UREM, VT, Expand); |
780 | setOperationAction(ISD::FDIV, VT, Expand); |
781 | setOperationAction(ISD::FREM, VT, Expand); |
782 | setOperationAction(ISD::FNEG, VT, Expand); |
783 | setOperationAction(ISD::FSQRT, VT, Expand); |
784 | setOperationAction(ISD::FLOG, VT, Expand); |
785 | setOperationAction(ISD::FLOG10, VT, Expand); |
786 | setOperationAction(ISD::FLOG2, VT, Expand); |
787 | setOperationAction(ISD::FEXP, VT, Expand); |
788 | setOperationAction(ISD::FEXP2, VT, Expand); |
789 | setOperationAction(ISD::FSIN, VT, Expand); |
790 | setOperationAction(ISD::FCOS, VT, Expand); |
791 | setOperationAction(ISD::FABS, VT, Expand); |
792 | setOperationAction(ISD::FFLOOR, VT, Expand); |
793 | setOperationAction(ISD::FCEIL, VT, Expand); |
794 | setOperationAction(ISD::FTRUNC, VT, Expand); |
795 | setOperationAction(ISD::FRINT, VT, Expand); |
796 | setOperationAction(ISD::FNEARBYINT, VT, Expand); |
797 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); |
798 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); |
799 | setOperationAction(ISD::BUILD_VECTOR, VT, Expand); |
800 | setOperationAction(ISD::MULHU, VT, Expand); |
801 | setOperationAction(ISD::MULHS, VT, Expand); |
802 | setOperationAction(ISD::UMUL_LOHI, VT, Expand); |
803 | setOperationAction(ISD::SMUL_LOHI, VT, Expand); |
804 | setOperationAction(ISD::UDIVREM, VT, Expand); |
805 | setOperationAction(ISD::SDIVREM, VT, Expand); |
806 | setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); |
807 | setOperationAction(ISD::FPOW, VT, Expand); |
808 | setOperationAction(ISD::BSWAP, VT, Expand); |
809 | setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); |
810 | setOperationAction(ISD::ROTL, VT, Expand); |
811 | setOperationAction(ISD::ROTR, VT, Expand); |
812 | |
813 | for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { |
814 | setTruncStoreAction(VT, InnerVT, Expand); |
815 | setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); |
816 | setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); |
817 | setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); |
818 | } |
819 | } |
820 | setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand); |
821 | if (!Subtarget.hasP8Vector()) { |
822 | setOperationAction(ISD::SMAX, MVT::v2i64, Expand); |
823 | setOperationAction(ISD::SMIN, MVT::v2i64, Expand); |
824 | setOperationAction(ISD::UMAX, MVT::v2i64, Expand); |
825 | setOperationAction(ISD::UMIN, MVT::v2i64, Expand); |
826 | } |
827 | |
828 | // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle |
829 | // with merges, splats, etc. |
830 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); |
831 | |
832 | // Vector truncates to sub-word integer that fit in an Altivec/VSX register |
833 | // are cheap, so handle them before they get expanded to scalar. |
834 | setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom); |
835 | setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom); |
836 | setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom); |
837 | setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom); |
838 | setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom); |
839 | |
840 | setOperationAction(ISD::AND , MVT::v4i32, Legal); |
841 | setOperationAction(ISD::OR , MVT::v4i32, Legal); |
842 | setOperationAction(ISD::XOR , MVT::v4i32, Legal); |
843 | setOperationAction(ISD::LOAD , MVT::v4i32, Legal); |
844 | setOperationAction(ISD::SELECT, MVT::v4i32, |
845 | Subtarget.useCRBits() ? Legal : Expand); |
846 | setOperationAction(ISD::STORE , MVT::v4i32, Legal); |
847 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal); |
848 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal); |
849 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal); |
850 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal); |
851 | setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); |
852 | setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); |
853 | setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); |
854 | setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); |
855 | setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); |
856 | setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); |
857 | setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); |
858 | setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); |
859 | |
860 | // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8. |
861 | setOperationAction(ISD::ROTL, MVT::v1i128, Custom); |
862 | // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w). |
863 | if (Subtarget.hasAltivec()) |
864 | for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8}) |
865 | setOperationAction(ISD::ROTL, VT, Legal); |
866 | // With hasP8Altivec set, we can lower ISD::ROTL to vrld. |
867 | if (Subtarget.hasP8Altivec()) |
868 | setOperationAction(ISD::ROTL, MVT::v2i64, Legal); |
869 | |
870 | addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); |
871 | addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); |
872 | addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); |
873 | addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); |
874 | |
875 | setOperationAction(ISD::MUL, MVT::v4f32, Legal); |
876 | setOperationAction(ISD::FMA, MVT::v4f32, Legal); |
877 | |
878 | if (Subtarget.hasVSX()) { |
879 | setOperationAction(ISD::FDIV, MVT::v4f32, Legal); |
880 | setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); |
881 | } |
882 | |
883 | if (Subtarget.hasP8Altivec()) |
884 | setOperationAction(ISD::MUL, MVT::v4i32, Legal); |
885 | else |
886 | setOperationAction(ISD::MUL, MVT::v4i32, Custom); |
887 | |
888 | if (Subtarget.isISA3_1()) { |
889 | setOperationAction(ISD::MUL, MVT::v2i64, Legal); |
890 | setOperationAction(ISD::MULHS, MVT::v2i64, Legal); |
891 | setOperationAction(ISD::MULHU, MVT::v2i64, Legal); |
892 | setOperationAction(ISD::MULHS, MVT::v4i32, Legal); |
893 | setOperationAction(ISD::MULHU, MVT::v4i32, Legal); |
894 | setOperationAction(ISD::UDIV, MVT::v2i64, Legal); |
895 | setOperationAction(ISD::SDIV, MVT::v2i64, Legal); |
896 | setOperationAction(ISD::UDIV, MVT::v4i32, Legal); |
897 | setOperationAction(ISD::SDIV, MVT::v4i32, Legal); |
898 | setOperationAction(ISD::UREM, MVT::v2i64, Legal); |
899 | setOperationAction(ISD::SREM, MVT::v2i64, Legal); |
900 | setOperationAction(ISD::UREM, MVT::v4i32, Legal); |
901 | setOperationAction(ISD::SREM, MVT::v4i32, Legal); |
902 | setOperationAction(ISD::UREM, MVT::v1i128, Legal); |
903 | setOperationAction(ISD::SREM, MVT::v1i128, Legal); |
904 | setOperationAction(ISD::UDIV, MVT::v1i128, Legal); |
905 | setOperationAction(ISD::SDIV, MVT::v1i128, Legal); |
906 | setOperationAction(ISD::ROTL, MVT::v1i128, Legal); |
907 | } |
908 | |
909 | setOperationAction(ISD::MUL, MVT::v8i16, Legal); |
910 | setOperationAction(ISD::MUL, MVT::v16i8, Custom); |
911 | |
912 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); |
913 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); |
914 | |
915 | setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); |
916 | setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); |
917 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); |
918 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); |
919 | |
920 | // Altivec does not contain unordered floating-point compare instructions |
921 | setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); |
922 | setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); |
923 | setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); |
924 | setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); |
925 | |
926 | if (Subtarget.hasVSX()) { |
927 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); |
928 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); |
929 | if (Subtarget.hasP8Vector()) { |
930 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); |
931 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); |
932 | } |
933 | if (Subtarget.hasDirectMove() && isPPC64) { |
934 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); |
935 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); |
936 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); |
937 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); |
938 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); |
939 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); |
940 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); |
941 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); |
942 | } |
943 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); |
944 | |
945 | // The nearbyint variants are not allowed to raise the inexact exception |
946 | // so we can only code-gen them with unsafe math. |
947 | if (TM.Options.UnsafeFPMath) { |
948 | setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); |
949 | setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); |
950 | } |
951 | |
952 | setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); |
953 | setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); |
954 | setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); |
955 | setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); |
956 | setOperationAction(ISD::FRINT, MVT::v2f64, Legal); |
957 | setOperationAction(ISD::FROUND, MVT::v2f64, Legal); |
958 | setOperationAction(ISD::FROUND, MVT::f64, Legal); |
959 | setOperationAction(ISD::FRINT, MVT::f64, Legal); |
960 | |
961 | setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); |
962 | setOperationAction(ISD::FRINT, MVT::v4f32, Legal); |
963 | setOperationAction(ISD::FROUND, MVT::v4f32, Legal); |
964 | setOperationAction(ISD::FROUND, MVT::f32, Legal); |
965 | setOperationAction(ISD::FRINT, MVT::f32, Legal); |
966 | |
967 | setOperationAction(ISD::MUL, MVT::v2f64, Legal); |
968 | setOperationAction(ISD::FMA, MVT::v2f64, Legal); |
969 | |
970 | setOperationAction(ISD::FDIV, MVT::v2f64, Legal); |
971 | setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); |
972 | |
973 | // Share the Altivec comparison restrictions. |
974 | setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); |
975 | setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); |
976 | setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); |
977 | setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); |
978 | |
979 | setOperationAction(ISD::LOAD, MVT::v2f64, Legal); |
980 | setOperationAction(ISD::STORE, MVT::v2f64, Legal); |
981 | |
982 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); |
983 | |
984 | if (Subtarget.hasP8Vector()) |
985 | addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); |
986 | |
987 | addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); |
988 | |
989 | addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); |
990 | addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); |
991 | addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); |
992 | |
993 | if (Subtarget.hasP8Altivec()) { |
994 | setOperationAction(ISD::SHL, MVT::v2i64, Legal); |
995 | setOperationAction(ISD::SRA, MVT::v2i64, Legal); |
996 | setOperationAction(ISD::SRL, MVT::v2i64, Legal); |
997 | |
998 | // 128 bit shifts can be accomplished via 3 instructions for SHL and |
999 | // SRL, but not for SRA because of the instructions available: |
1000 | // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth |
1001 | // doing |
1002 | setOperationAction(ISD::SHL, MVT::v1i128, Expand); |
1003 | setOperationAction(ISD::SRL, MVT::v1i128, Expand); |
1004 | setOperationAction(ISD::SRA, MVT::v1i128, Expand); |
1005 | |
1006 | setOperationAction(ISD::SETCC, MVT::v2i64, Legal); |
1007 | } |
1008 | else { |
1009 | setOperationAction(ISD::SHL, MVT::v2i64, Expand); |
1010 | setOperationAction(ISD::SRA, MVT::v2i64, Expand); |
1011 | setOperationAction(ISD::SRL, MVT::v2i64, Expand); |
1012 | |
1013 | setOperationAction(ISD::SETCC, MVT::v2i64, Custom); |
1014 | |
1015 | // VSX v2i64 only supports non-arithmetic operations. |
1016 | setOperationAction(ISD::ADD, MVT::v2i64, Expand); |
1017 | setOperationAction(ISD::SUB, MVT::v2i64, Expand); |
1018 | } |
1019 | |
1020 | if (Subtarget.isISA3_1()) |
1021 | setOperationAction(ISD::SETCC, MVT::v1i128, Legal); |
1022 | else |
1023 | setOperationAction(ISD::SETCC, MVT::v1i128, Expand); |
1024 | |
1025 | setOperationAction(ISD::LOAD, MVT::v2i64, Promote); |
1026 | AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); |
1027 | setOperationAction(ISD::STORE, MVT::v2i64, Promote); |
1028 | AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); |
1029 | |
1030 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); |
1031 | |
1032 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal); |
1033 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal); |
1034 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal); |
1035 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal); |
1036 | setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); |
1037 | setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); |
1038 | setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); |
1039 | setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); |
1040 | |
1041 | // Custom handling for partial vectors of integers converted to |
1042 | // floating point. We already have optimal handling for v2i32 through |
1043 | // the DAG combine, so those aren't necessary. |
1044 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i8, Custom); |
1045 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i8, Custom); |
1046 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i16, Custom); |
1047 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i16, Custom); |
1048 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i8, Custom); |
1049 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i8, Custom); |
1050 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i16, Custom); |
1051 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i16, Custom); |
1052 | setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom); |
1053 | setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom); |
1054 | setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom); |
1055 | setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); |
1056 | setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom); |
1057 | setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom); |
1058 | setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom); |
1059 | setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); |
1060 | |
1061 | setOperationAction(ISD::FNEG, MVT::v4f32, Legal); |
1062 | setOperationAction(ISD::FNEG, MVT::v2f64, Legal); |
1063 | setOperationAction(ISD::FABS, MVT::v4f32, Legal); |
1064 | setOperationAction(ISD::FABS, MVT::v2f64, Legal); |
1065 | setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); |
1066 | setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal); |
1067 | |
1068 | if (Subtarget.hasDirectMove()) |
1069 | setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); |
1070 | setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); |
1071 | |
1072 | // Handle constrained floating-point operations of vector. |
1073 | // The predictor is `hasVSX` because altivec instruction has |
1074 | // no exception but VSX vector instruction has. |
1075 | setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal); |
1076 | setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal); |
1077 | setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal); |
1078 | setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal); |
1079 | setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal); |
1080 | setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal); |
1081 | setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal); |
1082 | setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal); |
1083 | setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal); |
1084 | setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal); |
1085 | setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal); |
1086 | setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal); |
1087 | setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal); |
1088 | |
1089 | setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal); |
1090 | setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal); |
1091 | setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal); |
1092 | setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal); |
1093 | setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal); |
1094 | setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal); |
1095 | setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal); |
1096 | setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal); |
1097 | setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal); |
1098 | setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal); |
1099 | setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal); |
1100 | setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal); |
1101 | setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal); |
1102 | |
1103 | addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); |
1104 | } |
1105 | |
1106 | if (Subtarget.hasP8Altivec()) { |
1107 | addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); |
1108 | addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); |
1109 | } |
1110 | |
1111 | if (Subtarget.hasP9Vector()) { |
1112 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); |
1113 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); |
1114 | |
1115 | // 128 bit shifts can be accomplished via 3 instructions for SHL and |
1116 | // SRL, but not for SRA because of the instructions available: |
1117 | // VS{RL} and VS{RL}O. |
1118 | setOperationAction(ISD::SHL, MVT::v1i128, Legal); |
1119 | setOperationAction(ISD::SRL, MVT::v1i128, Legal); |
1120 | setOperationAction(ISD::SRA, MVT::v1i128, Expand); |
1121 | |
1122 | addRegisterClass(MVT::f128, &PPC::VRRCRegClass); |
1123 | setOperationAction(ISD::FADD, MVT::f128, Legal); |
1124 | setOperationAction(ISD::FSUB, MVT::f128, Legal); |
1125 | setOperationAction(ISD::FDIV, MVT::f128, Legal); |
1126 | setOperationAction(ISD::FMUL, MVT::f128, Legal); |
1127 | setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); |
1128 | // No extending loads to f128 on PPC. |
1129 | for (MVT FPT : MVT::fp_valuetypes()) |
1130 | setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand); |
1131 | setOperationAction(ISD::FMA, MVT::f128, Legal); |
1132 | setCondCodeAction(ISD::SETULT, MVT::f128, Expand); |
1133 | setCondCodeAction(ISD::SETUGT, MVT::f128, Expand); |
1134 | setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand); |
1135 | setCondCodeAction(ISD::SETOGE, MVT::f128, Expand); |
1136 | setCondCodeAction(ISD::SETOLE, MVT::f128, Expand); |
1137 | setCondCodeAction(ISD::SETONE, MVT::f128, Expand); |
1138 | |
1139 | setOperationAction(ISD::FTRUNC, MVT::f128, Legal); |
1140 | setOperationAction(ISD::FRINT, MVT::f128, Legal); |
1141 | setOperationAction(ISD::FFLOOR, MVT::f128, Legal); |
1142 | setOperationAction(ISD::FCEIL, MVT::f128, Legal); |
1143 | setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal); |
1144 | setOperationAction(ISD::FROUND, MVT::f128, Legal); |
1145 | |
1146 | setOperationAction(ISD::SELECT, MVT::f128, Expand); |
1147 | setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); |
1148 | setOperationAction(ISD::FP_ROUND, MVT::f32, Legal); |
1149 | setTruncStoreAction(MVT::f128, MVT::f64, Expand); |
1150 | setTruncStoreAction(MVT::f128, MVT::f32, Expand); |
1151 | setOperationAction(ISD::BITCAST, MVT::i128, Custom); |
1152 | // No implementation for these ops for PowerPC. |
1153 | setOperationAction(ISD::FSIN, MVT::f128, Expand); |
1154 | setOperationAction(ISD::FCOS, MVT::f128, Expand); |
1155 | setOperationAction(ISD::FPOW, MVT::f128, Expand); |
1156 | setOperationAction(ISD::FPOWI, MVT::f128, Expand); |
1157 | setOperationAction(ISD::FREM, MVT::f128, Expand); |
1158 | |
1159 | // Handle constrained floating-point operations of fp128 |
1160 | setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal); |
1161 | setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal); |
1162 | setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal); |
1163 | setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal); |
1164 | setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal); |
1165 | setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal); |
1166 | setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal); |
1167 | setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal); |
1168 | setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); |
1169 | setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal); |
1170 | setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal); |
1171 | setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal); |
1172 | setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal); |
1173 | setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal); |
1174 | setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal); |
1175 | setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); |
1176 | setOperationAction(ISD::BSWAP, MVT::v8i16, Legal); |
1177 | setOperationAction(ISD::BSWAP, MVT::v4i32, Legal); |
1178 | setOperationAction(ISD::BSWAP, MVT::v2i64, Legal); |
1179 | setOperationAction(ISD::BSWAP, MVT::v1i128, Legal); |
1180 | } else if (Subtarget.hasAltivec() && EnableSoftFP128) { |
1181 | addRegisterClass(MVT::f128, &PPC::VRRCRegClass); |
1182 | |
1183 | for (MVT FPT : MVT::fp_valuetypes()) |
1184 | setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand); |
1185 | |
1186 | setOperationAction(ISD::LOAD, MVT::f128, Promote); |
1187 | setOperationAction(ISD::STORE, MVT::f128, Promote); |
1188 | |
1189 | AddPromotedToType(ISD::LOAD, MVT::f128, MVT::v4i32); |
1190 | AddPromotedToType(ISD::STORE, MVT::f128, MVT::v4i32); |
1191 | |
1192 | // Set FADD/FSUB as libcall to avoid the legalizer to expand the |
1193 | // fp_to_uint and int_to_fp. |
1194 | setOperationAction(ISD::FADD, MVT::f128, LibCall); |
1195 | setOperationAction(ISD::FSUB, MVT::f128, LibCall); |
1196 | |
1197 | setOperationAction(ISD::FMUL, MVT::f128, Expand); |
1198 | setOperationAction(ISD::FDIV, MVT::f128, Expand); |
1199 | setOperationAction(ISD::FNEG, MVT::f128, Expand); |
1200 | setOperationAction(ISD::FABS, MVT::f128, Expand); |
1201 | setOperationAction(ISD::FSIN, MVT::f128, Expand); |
1202 | setOperationAction(ISD::FCOS, MVT::f128, Expand); |
1203 | setOperationAction(ISD::FPOW, MVT::f128, Expand); |
1204 | setOperationAction(ISD::FPOWI, MVT::f128, Expand); |
1205 | setOperationAction(ISD::FREM, MVT::f128, Expand); |
1206 | setOperationAction(ISD::FSQRT, MVT::f128, Expand); |
1207 | setOperationAction(ISD::FMA, MVT::f128, Expand); |
1208 | setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); |
1209 | |
1210 | setTruncStoreAction(MVT::f128, MVT::f64, Expand); |
1211 | setTruncStoreAction(MVT::f128, MVT::f32, Expand); |
1212 | |
1213 | // Expand the fp_extend if the target type is fp128. |
1214 | setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand); |
1215 | setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Expand); |
1216 | |
1217 | // Expand the fp_round if the source type is fp128. |
1218 | for (MVT VT : {MVT::f32, MVT::f64}) { |
1219 | setOperationAction(ISD::FP_ROUND, VT, Custom); |
1220 | setOperationAction(ISD::STRICT_FP_ROUND, VT, Custom); |
1221 | } |
1222 | } |
1223 | |
1224 | if (Subtarget.hasP9Altivec()) { |
1225 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); |
1226 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); |
1227 | |
1228 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal); |
1229 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal); |
1230 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal); |
1231 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal); |
1232 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal); |
1233 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); |
1234 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); |
1235 | } |
1236 | |
1237 | if (Subtarget.isISA3_1()) { |
1238 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); |
1239 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); |
1240 | } |
1241 | } |
1242 | |
1243 | if (Subtarget.pairedVectorMemops()) { |
1244 | addRegisterClass(MVT::v256i1, &PPC::VSRpRCRegClass); |
1245 | setOperationAction(ISD::LOAD, MVT::v256i1, Custom); |
1246 | setOperationAction(ISD::STORE, MVT::v256i1, Custom); |
1247 | } |
1248 | if (Subtarget.hasMMA()) { |
1249 | addRegisterClass(MVT::v512i1, &PPC::UACCRCRegClass); |
1250 | setOperationAction(ISD::LOAD, MVT::v512i1, Custom); |
1251 | setOperationAction(ISD::STORE, MVT::v512i1, Custom); |
1252 | setOperationAction(ISD::BUILD_VECTOR, MVT::v512i1, Custom); |
1253 | } |
1254 | |
1255 | if (Subtarget.has64BitSupport()) |
1256 | setOperationAction(ISD::PREFETCH, MVT::Other, Legal); |
1257 | |
1258 | if (Subtarget.isISA3_1()) |
1259 | setOperationAction(ISD::SRA, MVT::v1i128, Legal); |
1260 | |
1261 | setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); |
1262 | |
1263 | if (!isPPC64) { |
1264 | setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); |
1265 | setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); |
1266 | } |
1267 | |
1268 | setBooleanContents(ZeroOrOneBooleanContent); |
1269 | |
1270 | if (Subtarget.hasAltivec()) { |
1271 | // Altivec instructions set fields to all zeros or all ones. |
1272 | setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); |
1273 | } |
1274 | |
1275 | if (!isPPC64) { |
1276 | // These libcalls are not available in 32-bit. |
1277 | setLibcallName(RTLIB::SHL_I128, nullptr); |
1278 | setLibcallName(RTLIB::SRL_I128, nullptr); |
1279 | setLibcallName(RTLIB::SRA_I128, nullptr); |
1280 | } |
1281 | |
1282 | if (!isPPC64) |
1283 | setMaxAtomicSizeInBitsSupported(32); |
1284 | |
1285 | setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); |
1286 | |
1287 | // We have target-specific dag combine patterns for the following nodes: |
1288 | setTargetDAGCombine(ISD::ADD); |
1289 | setTargetDAGCombine(ISD::SHL); |
1290 | setTargetDAGCombine(ISD::SRA); |
1291 | setTargetDAGCombine(ISD::SRL); |
1292 | setTargetDAGCombine(ISD::MUL); |
1293 | setTargetDAGCombine(ISD::FMA); |
1294 | setTargetDAGCombine(ISD::SINT_TO_FP); |
1295 | setTargetDAGCombine(ISD::BUILD_VECTOR); |
1296 | if (Subtarget.hasFPCVT()) |
1297 | setTargetDAGCombine(ISD::UINT_TO_FP); |
1298 | setTargetDAGCombine(ISD::LOAD); |
1299 | setTargetDAGCombine(ISD::STORE); |
1300 | setTargetDAGCombine(ISD::BR_CC); |
1301 | if (Subtarget.useCRBits()) |
1302 | setTargetDAGCombine(ISD::BRCOND); |
1303 | setTargetDAGCombine(ISD::BSWAP); |
1304 | setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); |
1305 | setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); |
1306 | setTargetDAGCombine(ISD::INTRINSIC_VOID); |
1307 | |
1308 | setTargetDAGCombine(ISD::SIGN_EXTEND); |
1309 | setTargetDAGCombine(ISD::ZERO_EXTEND); |
1310 | setTargetDAGCombine(ISD::ANY_EXTEND); |
1311 | |
1312 | setTargetDAGCombine(ISD::TRUNCATE); |
1313 | setTargetDAGCombine(ISD::VECTOR_SHUFFLE); |
1314 | |
1315 | |
1316 | if (Subtarget.useCRBits()) { |
1317 | setTargetDAGCombine(ISD::TRUNCATE); |
1318 | setTargetDAGCombine(ISD::SETCC); |
1319 | setTargetDAGCombine(ISD::SELECT_CC); |
1320 | } |
1321 | |
1322 | if (Subtarget.hasP9Altivec()) { |
1323 | setTargetDAGCombine(ISD::ABS); |
1324 | setTargetDAGCombine(ISD::VSELECT); |
1325 | } |
1326 | |
1327 | setLibcallName(RTLIB::LOG_F128, "logf128"); |
1328 | setLibcallName(RTLIB::LOG2_F128, "log2f128"); |
1329 | setLibcallName(RTLIB::LOG10_F128, "log10f128"); |
1330 | setLibcallName(RTLIB::EXP_F128, "expf128"); |
1331 | setLibcallName(RTLIB::EXP2_F128, "exp2f128"); |
1332 | setLibcallName(RTLIB::SIN_F128, "sinf128"); |
1333 | setLibcallName(RTLIB::COS_F128, "cosf128"); |
1334 | setLibcallName(RTLIB::POW_F128, "powf128"); |
1335 | setLibcallName(RTLIB::FMIN_F128, "fminf128"); |
1336 | setLibcallName(RTLIB::FMAX_F128, "fmaxf128"); |
1337 | setLibcallName(RTLIB::REM_F128, "fmodf128"); |
1338 | setLibcallName(RTLIB::SQRT_F128, "sqrtf128"); |
1339 | setLibcallName(RTLIB::CEIL_F128, "ceilf128"); |
1340 | setLibcallName(RTLIB::FLOOR_F128, "floorf128"); |
1341 | setLibcallName(RTLIB::TRUNC_F128, "truncf128"); |
1342 | setLibcallName(RTLIB::ROUND_F128, "roundf128"); |
1343 | setLibcallName(RTLIB::LROUND_F128, "lroundf128"); |
1344 | setLibcallName(RTLIB::LLROUND_F128, "llroundf128"); |
1345 | setLibcallName(RTLIB::RINT_F128, "rintf128"); |
1346 | setLibcallName(RTLIB::LRINT_F128, "lrintf128"); |
1347 | setLibcallName(RTLIB::LLRINT_F128, "llrintf128"); |
1348 | setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintf128"); |
1349 | setLibcallName(RTLIB::FMA_F128, "fmaf128"); |
1350 | |
1351 | // With 32 condition bits, we don't need to sink (and duplicate) compares |
1352 | // aggressively in CodeGenPrep. |
1353 | if (Subtarget.useCRBits()) { |
1354 | setHasMultipleConditionRegisters(); |
1355 | setJumpIsExpensive(); |
1356 | } |
1357 | |
1358 | setMinFunctionAlignment(Align(4)); |
1359 | |
1360 | switch (Subtarget.getCPUDirective()) { |
1361 | default: break; |
1362 | case PPC::DIR_970: |
1363 | case PPC::DIR_A2: |
1364 | case PPC::DIR_E500: |
1365 | case PPC::DIR_E500mc: |
1366 | case PPC::DIR_E5500: |
1367 | case PPC::DIR_PWR4: |
1368 | case PPC::DIR_PWR5: |
1369 | case PPC::DIR_PWR5X: |
1370 | case PPC::DIR_PWR6: |
1371 | case PPC::DIR_PWR6X: |
1372 | case PPC::DIR_PWR7: |
1373 | case PPC::DIR_PWR8: |
1374 | case PPC::DIR_PWR9: |
1375 | case PPC::DIR_PWR10: |
1376 | case PPC::DIR_PWR_FUTURE: |
1377 | setPrefLoopAlignment(Align(16)); |
1378 | setPrefFunctionAlignment(Align(16)); |
1379 | break; |
1380 | } |
1381 | |
1382 | if (Subtarget.enableMachineScheduler()) |
1383 | setSchedulingPreference(Sched::Source); |
1384 | else |
1385 | setSchedulingPreference(Sched::Hybrid); |
1386 | |
1387 | computeRegisterProperties(STI.getRegisterInfo()); |
1388 | |
1389 | // The Freescale cores do better with aggressive inlining of memcpy and |
1390 | // friends. GCC uses same threshold of 128 bytes (= 32 word stores). |
1391 | if (Subtarget.getCPUDirective() == PPC::DIR_E500mc || |
1392 | Subtarget.getCPUDirective() == PPC::DIR_E5500) { |
1393 | MaxStoresPerMemset = 32; |
1394 | MaxStoresPerMemsetOptSize = 16; |
1395 | MaxStoresPerMemcpy = 32; |
1396 | MaxStoresPerMemcpyOptSize = 8; |
1397 | MaxStoresPerMemmove = 32; |
1398 | MaxStoresPerMemmoveOptSize = 8; |
1399 | } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) { |
1400 | // The A2 also benefits from (very) aggressive inlining of memcpy and |
1401 | // friends. The overhead of a the function call, even when warm, can be |
1402 | // over one hundred cycles. |
1403 | MaxStoresPerMemset = 128; |
1404 | MaxStoresPerMemcpy = 128; |
1405 | MaxStoresPerMemmove = 128; |
1406 | MaxLoadsPerMemcmp = 128; |
1407 | } else { |
1408 | MaxLoadsPerMemcmp = 8; |
1409 | MaxLoadsPerMemcmpOptSize = 4; |
1410 | } |
1411 | |
1412 | IsStrictFPEnabled = true; |
1413 | |
1414 | // Let the subtarget (CPU) decide if a predictable select is more expensive |
1415 | // than the corresponding branch. This information is used in CGP to decide |
1416 | // when to convert selects into branches. |
1417 | PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive(); |
1418 | } |
1419 | |
1420 | /// getMaxByValAlign - Helper for getByValTypeAlignment to determine |
1421 | /// the desired ByVal argument alignment. |
1422 | static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) { |
1423 | if (MaxAlign == MaxMaxAlign) |
1424 | return; |
1425 | if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { |
1426 | if (MaxMaxAlign >= 32 && |
1427 | VTy->getPrimitiveSizeInBits().getFixedSize() >= 256) |
1428 | MaxAlign = Align(32); |
1429 | else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 && |
1430 | MaxAlign < 16) |
1431 | MaxAlign = Align(16); |
1432 | } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { |
1433 | Align EltAlign; |
1434 | getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); |
1435 | if (EltAlign > MaxAlign) |
1436 | MaxAlign = EltAlign; |
1437 | } else if (StructType *STy = dyn_cast<StructType>(Ty)) { |
1438 | for (auto *EltTy : STy->elements()) { |
1439 | Align EltAlign; |
1440 | getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); |
1441 | if (EltAlign > MaxAlign) |
1442 | MaxAlign = EltAlign; |
1443 | if (MaxAlign == MaxMaxAlign) |
1444 | break; |
1445 | } |
1446 | } |
1447 | } |
1448 | |
1449 | /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate |
1450 | /// function arguments in the caller parameter area. |
1451 | unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, |
1452 | const DataLayout &DL) const { |
1453 | // 16byte and wider vectors are passed on 16byte boundary. |
1454 | // The rest is 8 on PPC64 and 4 on PPC32 boundary. |
1455 | Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4); |
1456 | if (Subtarget.hasAltivec()) |
1457 | getMaxByValAlign(Ty, Alignment, Align(16)); |
1458 | return Alignment.value(); |
1459 | } |
1460 | |
1461 | bool PPCTargetLowering::useSoftFloat() const { |
1462 | return Subtarget.useSoftFloat(); |
1463 | } |
1464 | |
1465 | bool PPCTargetLowering::hasSPE() const { |
1466 | return Subtarget.hasSPE(); |
1467 | } |
1468 | |
1469 | bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { |
1470 | return VT.isScalarInteger(); |
1471 | } |
1472 | |
1473 | const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { |
1474 | switch ((PPCISD::NodeType)Opcode) { |
1475 | case PPCISD::FIRST_NUMBER: break; |
1476 | case PPCISD::FSEL: return "PPCISD::FSEL"; |
1477 | case PPCISD::XSMAXCDP: return "PPCISD::XSMAXCDP"; |
1478 | case PPCISD::XSMINCDP: return "PPCISD::XSMINCDP"; |
1479 | case PPCISD::FCFID: return "PPCISD::FCFID"; |
1480 | case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; |
1481 | case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; |
1482 | case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; |
1483 | case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; |
1484 | case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; |
1485 | case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; |
1486 | case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; |
1487 | case PPCISD::FP_TO_UINT_IN_VSR: |
1488 | return "PPCISD::FP_TO_UINT_IN_VSR,"; |
1489 | case PPCISD::FP_TO_SINT_IN_VSR: |
1490 | return "PPCISD::FP_TO_SINT_IN_VSR"; |
1491 | case PPCISD::FRE: return "PPCISD::FRE"; |
1492 | case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; |
1493 | case PPCISD::FTSQRT: |
1494 | return "PPCISD::FTSQRT"; |
1495 | case PPCISD::FSQRT: |
1496 | return "PPCISD::FSQRT"; |
1497 | case PPCISD::STFIWX: return "PPCISD::STFIWX"; |
1498 | case PPCISD::VPERM: return "PPCISD::VPERM"; |
1499 | case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; |
1500 | case PPCISD::XXSPLTI_SP_TO_DP: |
1501 | return "PPCISD::XXSPLTI_SP_TO_DP"; |
1502 | case PPCISD::XXSPLTI32DX: |
1503 | return "PPCISD::XXSPLTI32DX"; |
1504 | case PPCISD::VECINSERT: return "PPCISD::VECINSERT"; |
1505 | case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI"; |
1506 | case PPCISD::VECSHL: return "PPCISD::VECSHL"; |
1507 | case PPCISD::CMPB: return "PPCISD::CMPB"; |
1508 | case PPCISD::Hi: return "PPCISD::Hi"; |
1509 | case PPCISD::Lo: return "PPCISD::Lo"; |
1510 | case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; |
1511 | case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8"; |
1512 | case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16"; |
1513 | case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; |
1514 | case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; |
1515 | case PPCISD::PROBED_ALLOCA: return "PPCISD::PROBED_ALLOCA"; |
1516 | case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; |
1517 | case PPCISD::SRL: return "PPCISD::SRL"; |
1518 | case PPCISD::SRA: return "PPCISD::SRA"; |
1519 | case PPCISD::SHL: return "PPCISD::SHL"; |
1520 | case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; |
1521 | case PPCISD::CALL: return "PPCISD::CALL"; |
1522 | case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; |
1523 | case PPCISD::CALL_NOTOC: return "PPCISD::CALL_NOTOC"; |
1524 | case PPCISD::MTCTR: return "PPCISD::MTCTR"; |
1525 | case PPCISD::BCTRL: return "PPCISD::BCTRL"; |
1526 | case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; |
1527 | case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; |
1528 | case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; |
1529 | case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; |
1530 | case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; |
1531 | case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; |
1532 | case PPCISD::MFVSR: return "PPCISD::MFVSR"; |
1533 | case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; |
1534 | case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; |
1535 | case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; |
1536 | case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; |
1537 | case PPCISD::SCALAR_TO_VECTOR_PERMUTED: |
1538 | return "PPCISD::SCALAR_TO_VECTOR_PERMUTED"; |
1539 | case PPCISD::ANDI_rec_1_EQ_BIT: |
1540 | return "PPCISD::ANDI_rec_1_EQ_BIT"; |
1541 | case PPCISD::ANDI_rec_1_GT_BIT: |
1542 | return "PPCISD::ANDI_rec_1_GT_BIT"; |
1543 | case PPCISD::VCMP: return "PPCISD::VCMP"; |
1544 | case PPCISD::VCMP_rec: return "PPCISD::VCMP_rec"; |
1545 | case PPCISD::LBRX: return "PPCISD::LBRX"; |
1546 | case PPCISD::STBRX: return "PPCISD::STBRX"; |
1547 | case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; |
1548 | case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; |
1549 | case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; |
1550 | case PPCISD::STXSIX: return "PPCISD::STXSIX"; |
1551 | case PPCISD::VEXTS: return "PPCISD::VEXTS"; |
1552 | case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; |
1553 | case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; |
1554 | case PPCISD::LOAD_VEC_BE: return "PPCISD::LOAD_VEC_BE"; |
1555 | case PPCISD::STORE_VEC_BE: return "PPCISD::STORE_VEC_BE"; |
1556 | case PPCISD::ST_VSR_SCAL_INT: |
1557 | return "PPCISD::ST_VSR_SCAL_INT"; |
1558 | case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; |
1559 | case PPCISD::BDNZ: return "PPCISD::BDNZ"; |
1560 | case PPCISD::BDZ: return "PPCISD::BDZ"; |
1561 | case PPCISD::MFFS: return "PPCISD::MFFS"; |
1562 | case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; |
1563 | case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; |
1564 | case PPCISD::CR6SET: return "PPCISD::CR6SET"; |
1565 | case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; |
1566 | case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; |
1567 | case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; |
1568 | case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; |
1569 | case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; |
1570 | case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; |
1571 | case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; |
1572 | case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; |
1573 | case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; |
1574 | case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; |
1575 | case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; |
1576 | case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; |
1577 | case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; |
1578 | case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; |
1579 | case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; |
1580 | case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; |
1581 | case PPCISD::PADDI_DTPREL: |
1582 | return "PPCISD::PADDI_DTPREL"; |
1583 | case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; |
1584 | case PPCISD::SC: return "PPCISD::SC"; |
1585 | case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; |
1586 | case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; |
1587 | case PPCISD::RFEBB: return "PPCISD::RFEBB"; |
1588 | case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; |
1589 | case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; |
1590 | case PPCISD::VABSD: return "PPCISD::VABSD"; |
1591 | case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128"; |
1592 | case PPCISD::BUILD_SPE64: return "PPCISD::BUILD_SPE64"; |
1593 | case PPCISD::EXTRACT_SPE: return "PPCISD::EXTRACT_SPE"; |
1594 | case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI"; |
1595 | case PPCISD::LD_VSX_LH: return "PPCISD::LD_VSX_LH"; |
1596 | case PPCISD::FP_EXTEND_HALF: return "PPCISD::FP_EXTEND_HALF"; |
1597 | case PPCISD::MAT_PCREL_ADDR: return "PPCISD::MAT_PCREL_ADDR"; |
1598 | case PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR: |
1599 | return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR"; |
1600 | case PPCISD::TLS_LOCAL_EXEC_MAT_ADDR: |
1601 | return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR"; |
1602 | case PPCISD::ACC_BUILD: return "PPCISD::ACC_BUILD"; |
1603 | case PPCISD::PAIR_BUILD: return "PPCISD::PAIR_BUILD"; |
1604 | case PPCISD::EXTRACT_VSX_REG: return "PPCISD::EXTRACT_VSX_REG"; |
1605 | case PPCISD::XXMFACC: return "PPCISD::XXMFACC"; |
1606 | case PPCISD::LD_SPLAT: return "PPCISD::LD_SPLAT"; |
1607 | case PPCISD::FNMSUB: return "PPCISD::FNMSUB"; |
1608 | case PPCISD::STRICT_FADDRTZ: |
1609 | return "PPCISD::STRICT_FADDRTZ"; |
1610 | case PPCISD::STRICT_FCTIDZ: |
1611 | return "PPCISD::STRICT_FCTIDZ"; |
1612 | case PPCISD::STRICT_FCTIWZ: |
1613 | return "PPCISD::STRICT_FCTIWZ"; |
1614 | case PPCISD::STRICT_FCTIDUZ: |
1615 | return "PPCISD::STRICT_FCTIDUZ"; |
1616 | case PPCISD::STRICT_FCTIWUZ: |
1617 | return "PPCISD::STRICT_FCTIWUZ"; |
1618 | case PPCISD::STRICT_FCFID: |
1619 | return "PPCISD::STRICT_FCFID"; |
1620 | case PPCISD::STRICT_FCFIDU: |
1621 | return "PPCISD::STRICT_FCFIDU"; |
1622 | case PPCISD::STRICT_FCFIDS: |
1623 | return "PPCISD::STRICT_FCFIDS"; |
1624 | case PPCISD::STRICT_FCFIDUS: |
1625 | return "PPCISD::STRICT_FCFIDUS"; |
1626 | case PPCISD::LXVRZX: return "PPCISD::LXVRZX"; |
1627 | } |
1628 | return nullptr; |
1629 | } |
1630 | |
1631 | EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, |
1632 | EVT VT) const { |
1633 | if (!VT.isVector()) |
1634 | return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; |
1635 | |
1636 | return VT.changeVectorElementTypeToInteger(); |
1637 | } |
1638 | |
1639 | bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { |
1640 | assert(VT.isFloatingPoint() && "Non-floating-point FMA?")((VT.isFloatingPoint() && "Non-floating-point FMA?") ? static_cast<void> (0) : __assert_fail ("VT.isFloatingPoint() && \"Non-floating-point FMA?\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 1640, __PRETTY_FUNCTION__)); |
1641 | return true; |
1642 | } |
1643 | |
1644 | //===----------------------------------------------------------------------===// |
1645 | // Node matching predicates, for use by the tblgen matching code. |
1646 | //===----------------------------------------------------------------------===// |
1647 | |
1648 | /// isFloatingPointZero - Return true if this is 0.0 or -0.0. |
1649 | static bool isFloatingPointZero(SDValue Op) { |
1650 | if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) |
1651 | return CFP->getValueAPF().isZero(); |
1652 | else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { |
1653 | // Maybe this has already been legalized into the constant pool? |
1654 | if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) |
1655 | if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) |
1656 | return CFP->getValueAPF().isZero(); |
1657 | } |
1658 | return false; |
1659 | } |
1660 | |
1661 | /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return |
1662 | /// true if Op is undef or if it matches the specified value. |
1663 | static bool isConstantOrUndef(int Op, int Val) { |
1664 | return Op < 0 || Op == Val; |
1665 | } |
1666 | |
1667 | /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a |
1668 | /// VPKUHUM instruction. |
1669 | /// The ShuffleKind distinguishes between big-endian operations with |
1670 | /// two different inputs (0), either-endian operations with two identical |
1671 | /// inputs (1), and little-endian operations with two different inputs (2). |
1672 | /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). |
1673 | bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, |
1674 | SelectionDAG &DAG) { |
1675 | bool IsLE = DAG.getDataLayout().isLittleEndian(); |
1676 | if (ShuffleKind == 0) { |
1677 | if (IsLE) |
1678 | return false; |
1679 | for (unsigned i = 0; i != 16; ++i) |
1680 | if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) |
1681 | return false; |
1682 | } else if (ShuffleKind == 2) { |
1683 | if (!IsLE) |
1684 | return false; |
1685 | for (unsigned i = 0; i != 16; ++i) |
1686 | if (!isConstantOrUndef(N->getMaskElt(i), i*2)) |
1687 | return false; |
1688 | } else if (ShuffleKind == 1) { |
1689 | unsigned j = IsLE ? 0 : 1; |
1690 | for (unsigned i = 0; i != 8; ++i) |
1691 | if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || |
1692 | !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) |
1693 | return false; |
1694 | } |
1695 | return true; |
1696 | } |
1697 | |
1698 | /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a |
1699 | /// VPKUWUM instruction. |
1700 | /// The ShuffleKind distinguishes between big-endian operations with |
1701 | /// two different inputs (0), either-endian operations with two identical |
1702 | /// inputs (1), and little-endian operations with two different inputs (2). |
1703 | /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). |
1704 | bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, |
1705 | SelectionDAG &DAG) { |
1706 | bool IsLE = DAG.getDataLayout().isLittleEndian(); |
1707 | if (ShuffleKind == 0) { |
1708 | if (IsLE) |
1709 | return false; |
1710 | for (unsigned i = 0; i != 16; i += 2) |
1711 | if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || |
1712 | !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) |
1713 | return false; |
1714 | } else if (ShuffleKind == 2) { |
1715 | if (!IsLE) |
1716 | return false; |
1717 | for (unsigned i = 0; i != 16; i += 2) |
1718 | if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || |
1719 | !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) |
1720 | return false; |
1721 | } else if (ShuffleKind == 1) { |
1722 | unsigned j = IsLE ? 0 : 2; |
1723 | for (unsigned i = 0; i != 8; i += 2) |
1724 | if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || |
1725 | !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || |
1726 | !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || |
1727 | !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) |
1728 | return false; |
1729 | } |
1730 | return true; |
1731 | } |
1732 | |
1733 | /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a |
1734 | /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the |
1735 | /// current subtarget. |
1736 | /// |
1737 | /// The ShuffleKind distinguishes between big-endian operations with |
1738 | /// two different inputs (0), either-endian operations with two identical |
1739 | /// inputs (1), and little-endian operations with two different inputs (2). |
1740 | /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). |
1741 | bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, |
1742 | SelectionDAG &DAG) { |
1743 | const PPCSubtarget& Subtarget = |
1744 | static_cast<const PPCSubtarget&>(DAG.getSubtarget()); |
1745 | if (!Subtarget.hasP8Vector()) |
1746 | return false; |
1747 | |
1748 | bool IsLE = DAG.getDataLayout().isLittleEndian(); |
1749 | if (ShuffleKind == 0) { |
1750 | if (IsLE) |
1751 | return false; |
1752 | for (unsigned i = 0; i != 16; i += 4) |
1753 | if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || |
1754 | !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || |
1755 | !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || |
1756 | !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) |
1757 | return false; |
1758 | } else if (ShuffleKind == 2) { |
1759 | if (!IsLE) |
1760 | return false; |
1761 | for (unsigned i = 0; i != 16; i += 4) |
1762 | if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || |
1763 | !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || |
1764 | !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || |
1765 | !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) |
1766 | return false; |
1767 | } else if (ShuffleKind == 1) { |
1768 | unsigned j = IsLE ? 0 : 4; |
1769 | for (unsigned i = 0; i != 8; i += 4) |
1770 | if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || |
1771 | !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || |
1772 | !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || |
1773 | !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || |
1774 | !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || |
1775 | !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || |
1776 | !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || |
1777 | !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) |
1778 | return false; |
1779 | } |
1780 | return true; |
1781 | } |
1782 | |
1783 | /// isVMerge - Common function, used to match vmrg* shuffles. |
1784 | /// |
1785 | static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, |
1786 | unsigned LHSStart, unsigned RHSStart) { |
1787 | if (N->getValueType(0) != MVT::v16i8) |
1788 | return false; |
1789 | assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&(((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && "Unsupported merge size!") ? static_cast<void> (0) : __assert_fail ("(UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && \"Unsupported merge size!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 1790, __PRETTY_FUNCTION__)) |
1790 | "Unsupported merge size!")(((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && "Unsupported merge size!") ? static_cast<void> (0) : __assert_fail ("(UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && \"Unsupported merge size!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 1790, __PRETTY_FUNCTION__)); |
1791 | |
1792 | for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units |
1793 | for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit |
1794 | if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), |
1795 | LHSStart+j+i*UnitSize) || |
1796 | !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), |
1797 | RHSStart+j+i*UnitSize)) |
1798 | return false; |
1799 | } |
1800 | return true; |
1801 | } |
1802 | |
1803 | /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for |
1804 | /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). |
1805 | /// The ShuffleKind distinguishes between big-endian merges with two |
1806 | /// different inputs (0), either-endian merges with two identical inputs (1), |
1807 | /// and little-endian merges with two different inputs (2). For the latter, |
1808 | /// the input operands are swapped (see PPCInstrAltivec.td). |
1809 | bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, |
1810 | unsigned ShuffleKind, SelectionDAG &DAG) { |
1811 | if (DAG.getDataLayout().isLittleEndian()) { |
1812 | if (ShuffleKind == 1) // unary |
1813 | return isVMerge(N, UnitSize, 0, 0); |
1814 | else if (ShuffleKind == 2) // swapped |
1815 | return isVMerge(N, UnitSize, 0, 16); |
1816 | else |
1817 | return false; |
1818 | } else { |
1819 | if (ShuffleKind == 1) // unary |
1820 | return isVMerge(N, UnitSize, 8, 8); |
1821 | else if (ShuffleKind == 0) // normal |
1822 | return isVMerge(N, UnitSize, 8, 24); |
1823 | else |
1824 | return false; |
1825 | } |
1826 | } |
1827 | |
1828 | /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for |
1829 | /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). |
1830 | /// The ShuffleKind distinguishes between big-endian merges with two |
1831 | /// different inputs (0), either-endian merges with two identical inputs (1), |
1832 | /// and little-endian merges with two different inputs (2). For the latter, |
1833 | /// the input operands are swapped (see PPCInstrAltivec.td). |
1834 | bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, |
1835 | unsigned ShuffleKind, SelectionDAG &DAG) { |
1836 | if (DAG.getDataLayout().isLittleEndian()) { |
1837 | if (ShuffleKind == 1) // unary |
1838 | return isVMerge(N, UnitSize, 8, 8); |
1839 | else if (ShuffleKind == 2) // swapped |
1840 | return isVMerge(N, UnitSize, 8, 24); |
1841 | else |
1842 | return false; |
1843 | } else { |
1844 | if (ShuffleKind == 1) // unary |
1845 | return isVMerge(N, UnitSize, 0, 0); |
1846 | else if (ShuffleKind == 0) // normal |
1847 | return isVMerge(N, UnitSize, 0, 16); |
1848 | else |
1849 | return false; |
1850 | } |
1851 | } |
1852 | |
1853 | /** |
1854 | * Common function used to match vmrgew and vmrgow shuffles |
1855 | * |
1856 | * The indexOffset determines whether to look for even or odd words in |
1857 | * the shuffle mask. This is based on the of the endianness of the target |
1858 | * machine. |
1859 | * - Little Endian: |
1860 | * - Use offset of 0 to check for odd elements |
1861 | * - Use offset of 4 to check for even elements |
1862 | * - Big Endian: |
1863 | * - Use offset of 0 to check for even elements |
1864 | * - Use offset of 4 to check for odd elements |
1865 | * A detailed description of the vector element ordering for little endian and |
1866 | * big endian can be found at |
1867 | * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html |
1868 | * Targeting your applications - what little endian and big endian IBM XL C/C++ |
1869 | * compiler differences mean to you |
1870 | * |
1871 | * The mask to the shuffle vector instruction specifies the indices of the |
1872 | * elements from the two input vectors to place in the result. The elements are |
1873 | * numbered in array-access order, starting with the first vector. These vectors |
1874 | * are always of type v16i8, thus each vector will contain 16 elements of size |
1875 | * 8. More info on the shuffle vector can be found in the |
1876 | * http://llvm.org/docs/LangRef.html#shufflevector-instruction |
1877 | * Language Reference. |
1878 | * |
1879 | * The RHSStartValue indicates whether the same input vectors are used (unary) |
1880 | * or two different input vectors are used, based on the following: |
1881 | * - If the instruction uses the same vector for both inputs, the range of the |
1882 | * indices will be 0 to 15. In this case, the RHSStart value passed should |
1883 | * be 0. |
1884 | * - If the instruction has two different vectors then the range of the |
1885 | * indices will be 0 to 31. In this case, the RHSStart value passed should |
1886 | * be 16 (indices 0-15 specify elements in the first vector while indices 16 |
1887 | * to 31 specify elements in the second vector). |
1888 | * |
1889 | * \param[in] N The shuffle vector SD Node to analyze |
1890 | * \param[in] IndexOffset Specifies whether to look for even or odd elements |
1891 | * \param[in] RHSStartValue Specifies the starting index for the righthand input |
1892 | * vector to the shuffle_vector instruction |
1893 | * \return true iff this shuffle vector represents an even or odd word merge |
1894 | */ |
1895 | static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, |
1896 | unsigned RHSStartValue) { |
1897 | if (N->getValueType(0) != MVT::v16i8) |
1898 | return false; |
1899 | |
1900 | for (unsigned i = 0; i < 2; ++i) |
1901 | for (unsigned j = 0; j < 4; ++j) |
1902 | if (!isConstantOrUndef(N->getMaskElt(i*4+j), |
1903 | i*RHSStartValue+j+IndexOffset) || |
1904 | !isConstantOrUndef(N->getMaskElt(i*4+j+8), |
1905 | i*RHSStartValue+j+IndexOffset+8)) |
1906 | return false; |
1907 | return true; |
1908 | } |
1909 | |
1910 | /** |
1911 | * Determine if the specified shuffle mask is suitable for the vmrgew or |
1912 | * vmrgow instructions. |
1913 | * |
1914 | * \param[in] N The shuffle vector SD Node to analyze |
1915 | * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) |
1916 | * \param[in] ShuffleKind Identify the type of merge: |
1917 | * - 0 = big-endian merge with two different inputs; |
1918 | * - 1 = either-endian merge with two identical inputs; |
1919 | * - 2 = little-endian merge with two different inputs (inputs are swapped for |
1920 | * little-endian merges). |
1921 | * \param[in] DAG The current SelectionDAG |
1922 | * \return true iff this shuffle mask |
1923 | */ |
1924 | bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, |
1925 | unsigned ShuffleKind, SelectionDAG &DAG) { |
1926 | if (DAG.getDataLayout().isLittleEndian()) { |
1927 | unsigned indexOffset = CheckEven ? 4 : 0; |
1928 | if (ShuffleKind == 1) // Unary |
1929 | return isVMerge(N, indexOffset, 0); |
1930 | else if (ShuffleKind == 2) // swapped |
1931 | return isVMerge(N, indexOffset, 16); |
1932 | else |
1933 | return false; |
1934 | } |
1935 | else { |
1936 | unsigned indexOffset = CheckEven ? 0 : 4; |
1937 | if (ShuffleKind == 1) // Unary |
1938 | return isVMerge(N, indexOffset, 0); |
1939 | else if (ShuffleKind == 0) // Normal |
1940 | return isVMerge(N, indexOffset, 16); |
1941 | else |
1942 | return false; |
1943 | } |
1944 | return false; |
1945 | } |
1946 | |
1947 | /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift |
1948 | /// amount, otherwise return -1. |
1949 | /// The ShuffleKind distinguishes between big-endian operations with two |
1950 | /// different inputs (0), either-endian operations with two identical inputs |
1951 | /// (1), and little-endian operations with two different inputs (2). For the |
1952 | /// latter, the input operands are swapped (see PPCInstrAltivec.td). |
1953 | int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, |
1954 | SelectionDAG &DAG) { |
1955 | if (N->getValueType(0) != MVT::v16i8) |
1956 | return -1; |
1957 | |
1958 | ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); |
1959 | |
1960 | // Find the first non-undef value in the shuffle mask. |
1961 | unsigned i; |
1962 | for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) |
1963 | /*search*/; |
1964 | |
1965 | if (i == 16) return -1; // all undef. |
1966 | |
1967 | // Otherwise, check to see if the rest of the elements are consecutively |
1968 | // numbered from this value. |
1969 | unsigned ShiftAmt = SVOp->getMaskElt(i); |
1970 | if (ShiftAmt < i) return -1; |
1971 | |
1972 | ShiftAmt -= i; |
1973 | bool isLE = DAG.getDataLayout().isLittleEndian(); |
1974 | |
1975 | if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { |
1976 | // Check the rest of the elements to see if they are consecutive. |
1977 | for (++i; i != 16; ++i) |
1978 | if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) |
1979 | return -1; |
1980 | } else if (ShuffleKind == 1) { |
1981 | // Check the rest of the elements to see if they are consecutive. |
1982 | for (++i; i != 16; ++i) |
1983 | if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) |
1984 | return -1; |
1985 | } else |
1986 | return -1; |
1987 | |
1988 | if (isLE) |
1989 | ShiftAmt = 16 - ShiftAmt; |
1990 | |
1991 | return ShiftAmt; |
1992 | } |
1993 | |
1994 | /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand |
1995 | /// specifies a splat of a single element that is suitable for input to |
1996 | /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.). |
1997 | bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { |
1998 | assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&((N->getValueType(0) == MVT::v16i8 && isPowerOf2_32 (EltSize) && EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes" ) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) && EltSize <= 8 && \"Can only handle 1,2,4,8 byte element sizes\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 1999, __PRETTY_FUNCTION__)) |
1999 | EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes")((N->getValueType(0) == MVT::v16i8 && isPowerOf2_32 (EltSize) && EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes" ) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) && EltSize <= 8 && \"Can only handle 1,2,4,8 byte element sizes\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 1999, __PRETTY_FUNCTION__)); |
2000 | |
2001 | // The consecutive indices need to specify an element, not part of two |
2002 | // different elements. So abandon ship early if this isn't the case. |
2003 | if (N->getMaskElt(0) % EltSize != 0) |
2004 | return false; |
2005 | |
2006 | // This is a splat operation if each element of the permute is the same, and |
2007 | // if the value doesn't reference the second vector. |
2008 | unsigned ElementBase = N->getMaskElt(0); |
2009 | |
2010 | // FIXME: Handle UNDEF elements too! |
2011 | if (ElementBase >= 16) |
2012 | return false; |
2013 | |
2014 | // Check that the indices are consecutive, in the case of a multi-byte element |
2015 | // splatted with a v16i8 mask. |
2016 | for (unsigned i = 1; i != EltSize; ++i) |
2017 | if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) |
2018 | return false; |
2019 | |
2020 | for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { |
2021 | if (N->getMaskElt(i) < 0) continue; |
2022 | for (unsigned j = 0; j != EltSize; ++j) |
2023 | if (N->getMaskElt(i+j) != N->getMaskElt(j)) |
2024 | return false; |
2025 | } |
2026 | return true; |
2027 | } |
2028 | |
2029 | /// Check that the mask is shuffling N byte elements. Within each N byte |
2030 | /// element of the mask, the indices could be either in increasing or |
2031 | /// decreasing order as long as they are consecutive. |
2032 | /// \param[in] N the shuffle vector SD Node to analyze |
2033 | /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/ |
2034 | /// Word/DoubleWord/QuadWord). |
2035 | /// \param[in] StepLen the delta indices number among the N byte element, if |
2036 | /// the mask is in increasing/decreasing order then it is 1/-1. |
2037 | /// \return true iff the mask is shuffling N byte elements. |
2038 | static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width, |
2039 | int StepLen) { |
2040 | assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&(((Width == 2 || Width == 4 || Width == 8 || Width == 16) && "Unexpected element width.") ? static_cast<void> (0) : __assert_fail ("(Width == 2 || Width == 4 || Width == 8 || Width == 16) && \"Unexpected element width.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 2041, __PRETTY_FUNCTION__)) |
2041 | "Unexpected element width.")(((Width == 2 || Width == 4 || Width == 8 || Width == 16) && "Unexpected element width.") ? static_cast<void> (0) : __assert_fail ("(Width == 2 || Width == 4 || Width == 8 || Width == 16) && \"Unexpected element width.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 2041, __PRETTY_FUNCTION__)); |
2042 | assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.")(((StepLen == 1 || StepLen == -1) && "Unexpected element width." ) ? static_cast<void> (0) : __assert_fail ("(StepLen == 1 || StepLen == -1) && \"Unexpected element width.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 2042, __PRETTY_FUNCTION__)); |
2043 | |
2044 | unsigned NumOfElem = 16 / Width; |
2045 | unsigned MaskVal[16]; // Width is never greater than 16 |
2046 | for (unsigned i = 0; i < NumOfElem; ++i) { |
2047 | MaskVal[0] = N->getMaskElt(i * Width); |
2048 | if ((StepLen == 1) && (MaskVal[0] % Width)) { |
2049 | return false; |
2050 | } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) { |
2051 | return false; |
2052 | } |
2053 | |
2054 | for (unsigned int j = 1; j < Width; ++j) { |
2055 | MaskVal[j] = N->getMaskElt(i * Width + j); |
2056 | if (MaskVal[j] != MaskVal[j-1] + StepLen) { |
2057 | return false; |
2058 | } |
2059 | } |
2060 | } |
2061 | |
2062 | return true; |
2063 | } |
2064 | |
2065 | bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, |
2066 | unsigned &InsertAtByte, bool &Swap, bool IsLE) { |
2067 | if (!isNByteElemShuffleMask(N, 4, 1)) |
2068 | return false; |
2069 | |
2070 | // Now we look at mask elements 0,4,8,12 |
2071 | unsigned M0 = N->getMaskElt(0) / 4; |
2072 | unsigned M1 = N->getMaskElt(4) / 4; |
2073 | unsigned M2 = N->getMaskElt(8) / 4; |
2074 | unsigned M3 = N->getMaskElt(12) / 4; |
2075 | unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; |
2076 | unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; |
2077 | |
2078 | // Below, let H and L be arbitrary elements of the shuffle mask |
2079 | // where H is in the range [4,7] and L is in the range [0,3]. |
2080 | // H, 1, 2, 3 or L, 5, 6, 7 |
2081 | if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || |
2082 | (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { |
2083 | ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; |
2084 | InsertAtByte = IsLE ? 12 : 0; |
2085 | Swap = M0 < 4; |
2086 | return true; |
2087 | } |
2088 | // 0, H, 2, 3 or 4, L, 6, 7 |
2089 | if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || |
2090 | (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { |
2091 | ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; |
2092 | InsertAtByte = IsLE ? 8 : 4; |
2093 | Swap = M1 < 4; |
2094 | return true; |
2095 | } |
2096 | // 0, 1, H, 3 or 4, 5, L, 7 |
2097 | if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || |
2098 | (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { |
2099 | ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; |
2100 | InsertAtByte = IsLE ? 4 : 8; |
2101 | Swap = M2 < 4; |
2102 | return true; |
2103 | } |
2104 | // 0, 1, 2, H or 4, 5, 6, L |
2105 | if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || |
2106 | (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { |
2107 | ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; |
2108 | InsertAtByte = IsLE ? 0 : 12; |
2109 | Swap = M3 < 4; |
2110 | return true; |
2111 | } |
2112 | |
2113 | // If both vector operands for the shuffle are the same vector, the mask will |
2114 | // contain only elements from the first one and the second one will be undef. |
2115 | if (N->getOperand(1).isUndef()) { |
2116 | ShiftElts = 0; |
2117 | Swap = true; |
2118 | unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; |
2119 | if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { |
2120 | InsertAtByte = IsLE ? 12 : 0; |
2121 | return true; |
2122 | } |
2123 | if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { |
2124 | InsertAtByte = IsLE ? 8 : 4; |
2125 | return true; |
2126 | } |
2127 | if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { |
2128 | InsertAtByte = IsLE ? 4 : 8; |
2129 | return true; |
2130 | } |
2131 | if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { |
2132 | InsertAtByte = IsLE ? 0 : 12; |
2133 | return true; |
2134 | } |
2135 | } |
2136 | |
2137 | return false; |
2138 | } |
2139 | |
2140 | bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, |
2141 | bool &Swap, bool IsLE) { |
2142 | assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8")((N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8" ) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::v16i8 && \"Shuffle vector expects v16i8\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 2142, __PRETTY_FUNCTION__)); |
2143 | // Ensure each byte index of the word is consecutive. |
2144 | if (!isNByteElemShuffleMask(N, 4, 1)) |
2145 | return false; |
2146 | |
2147 | // Now we look at mask elements 0,4,8,12, which are the beginning of words. |
2148 | unsigned M0 = N->getMaskElt(0) / 4; |
2149 | unsigned M1 = N->getMaskElt(4) / 4; |
2150 | unsigned M2 = N->getMaskElt(8) / 4; |
2151 | unsigned M3 = N->getMaskElt(12) / 4; |
2152 | |
2153 | // If both vector operands for the shuffle are the same vector, the mask will |
2154 | // contain only elements from the first one and the second one will be undef. |
2155 | if (N->getOperand(1).isUndef()) { |
2156 | assert(M0 < 4 && "Indexing into an undef vector?")((M0 < 4 && "Indexing into an undef vector?") ? static_cast <void> (0) : __assert_fail ("M0 < 4 && \"Indexing into an undef vector?\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 2156, __PRETTY_FUNCTION__)); |
2157 | if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4) |
2158 | return false; |
2159 | |
2160 | ShiftElts = IsLE ? (4 - M0) % 4 : M0; |
2161 | Swap = false; |
2162 | return true; |
2163 | } |
2164 | |
2165 | // Ensure each word index of the ShuffleVector Mask is consecutive. |
2166 | if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8) |
2167 | return false; |
2168 | |
2169 | if (IsLE) { |
2170 | if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) { |
2171 | // Input vectors don't need to be swapped if the leading element |
2172 | // of the result is one of the 3 left elements of the second vector |
2173 | // (or if there is no shift to be done at all). |
2174 | Swap = false; |
2175 | ShiftElts = (8 - M0) % 8; |
2176 | } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) { |
2177 | // Input vectors need to be swapped if the leading element |
2178 | // of the result is one of the 3 left elements of the first vector |
2179 | // (or if we're shifting by 4 - thereby simply swapping the vectors). |
2180 | Swap = true; |
2181 | ShiftElts = (4 - M0) % 4; |
2182 | } |
2183 | |
2184 | return true; |
2185 | } else { // BE |
2186 | if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) { |
2187 | // Input vectors don't need to be swapped if the leading element |
2188 | // of the result is one of the 4 elements of the first vector. |
2189 | Swap = false; |
2190 | ShiftElts = M0; |
2191 | } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) { |
2192 | // Input vectors need to be swapped if the leading element |
2193 | // of the result is one of the 4 elements of the right vector. |
2194 | Swap = true; |
2195 | ShiftElts = M0 - 4; |
2196 | } |
2197 | |
2198 | return true; |
2199 | } |
2200 | } |
2201 | |
2202 | bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) { |
2203 | assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8")((N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8" ) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::v16i8 && \"Shuffle vector expects v16i8\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 2203, __PRETTY_FUNCTION__)); |
2204 | |
2205 | if (!isNByteElemShuffleMask(N, Width, -1)) |
2206 | return false; |
2207 | |
2208 | for (int i = 0; i < 16; i += Width) |
2209 | if (N->getMaskElt(i) != i + Width - 1) |
2210 | return false; |
2211 | |
2212 | return true; |
2213 | } |
2214 | |
2215 | bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) { |
2216 | return isXXBRShuffleMaskHelper(N, 2); |
2217 | } |
2218 | |
2219 | bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) { |
2220 | return isXXBRShuffleMaskHelper(N, 4); |
2221 | } |
2222 | |
2223 | bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) { |
2224 | return isXXBRShuffleMaskHelper(N, 8); |
2225 | } |
2226 | |
2227 | bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) { |
2228 | return isXXBRShuffleMaskHelper(N, 16); |
2229 | } |
2230 | |
2231 | /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap |
2232 | /// if the inputs to the instruction should be swapped and set \p DM to the |
2233 | /// value for the immediate. |
2234 | /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI |
2235 | /// AND element 0 of the result comes from the first input (LE) or second input |
2236 | /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered. |
2237 | /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle |
2238 | /// mask. |
2239 | bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM, |
2240 | bool &Swap, bool IsLE) { |
2241 | assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8")((N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8" ) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::v16i8 && \"Shuffle vector expects v16i8\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 2241, __PRETTY_FUNCTION__)); |
2242 | |
2243 | // Ensure each byte index of the double word is consecutive. |
2244 | if (!isNByteElemShuffleMask(N, 8, 1)) |
2245 | return false; |
2246 | |
2247 | unsigned M0 = N->getMaskElt(0) / 8; |
2248 | unsigned M1 = N->getMaskElt(8) / 8; |
2249 | assert(((M0 | M1) < 4) && "A mask element out of bounds?")((((M0 | M1) < 4) && "A mask element out of bounds?" ) ? static_cast<void> (0) : __assert_fail ("((M0 | M1) < 4) && \"A mask element out of bounds?\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 2249, __PRETTY_FUNCTION__)); |
2250 | |
2251 | // If both vector operands for the shuffle are the same vector, the mask will |
2252 | // contain only elements from the first one and the second one will be undef. |
2253 | if (N->getOperand(1).isUndef()) { |
2254 | if ((M0 | M1) < 2) { |
2255 | DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1); |
2256 | Swap = false; |
2257 | return true; |
2258 | } else |
2259 | return false; |
2260 | } |
2261 | |
2262 | if (IsLE) { |
2263 | if (M0 > 1 && M1 < 2) { |
2264 | Swap = false; |
2265 | } else if (M0 < 2 && M1 > 1) { |
2266 | M0 = (M0 + 2) % 4; |
2267 | M1 = (M1 + 2) % 4; |
2268 | Swap = true; |
2269 | } else |
2270 | return false; |
2271 | |
2272 | // Note: if control flow comes here that means Swap is already set above |
2273 | DM = (((~M1) & 1) << 1) + ((~M0) & 1); |
2274 | return true; |
2275 | } else { // BE |
2276 | if (M0 < 2 && M1 > 1) { |
2277 | Swap = false; |
2278 | } else if (M0 > 1 && M1 < 2) { |
2279 | M0 = (M0 + 2) % 4; |
2280 | M1 = (M1 + 2) % 4; |
2281 | Swap = true; |
2282 | } else |
2283 | return false; |
2284 | |
2285 | // Note: if control flow comes here that means Swap is already set above |
2286 | DM = (M0 << 1) + (M1 & 1); |
2287 | return true; |
2288 | } |
2289 | } |
2290 | |
2291 | |
2292 | /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is |
2293 | /// appropriate for PPC mnemonics (which have a big endian bias - namely |
2294 | /// elements are counted from the left of the vector register). |
2295 | unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, |
2296 | SelectionDAG &DAG) { |
2297 | ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); |
2298 | assert(isSplatShuffleMask(SVOp, EltSize))((isSplatShuffleMask(SVOp, EltSize)) ? static_cast<void> (0) : __assert_fail ("isSplatShuffleMask(SVOp, EltSize)", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 2298, __PRETTY_FUNCTION__)); |
2299 | if (DAG.getDataLayout().isLittleEndian()) |
2300 | return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); |
2301 | else |
2302 | return SVOp->getMaskElt(0) / EltSize; |
2303 | } |
2304 | |
2305 | /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed |
2306 | /// by using a vspltis[bhw] instruction of the specified element size, return |
2307 | /// the constant being splatted. The ByteSize field indicates the number of |
2308 | /// bytes of each element [124] -> [bhw]. |
2309 | SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { |
2310 | SDValue OpVal(nullptr, 0); |
2311 | |
2312 | // If ByteSize of the splat is bigger than the element size of the |
2313 | // build_vector, then we have a case where we are checking for a splat where |
2314 | // multiple elements of the buildvector are folded together into a single |
2315 | // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). |
2316 | unsigned EltSize = 16/N->getNumOperands(); |
2317 | if (EltSize < ByteSize) { |
2318 | unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. |
2319 | SDValue UniquedVals[4]; |
2320 | assert(Multiple > 1 && Multiple <= 4 && "How can this happen?")((Multiple > 1 && Multiple <= 4 && "How can this happen?" ) ? static_cast<void> (0) : __assert_fail ("Multiple > 1 && Multiple <= 4 && \"How can this happen?\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 2320, __PRETTY_FUNCTION__)); |
2321 | |
2322 | // See if all of the elements in the buildvector agree across. |
2323 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
2324 | if (N->getOperand(i).isUndef()) continue; |
2325 | // If the element isn't a constant, bail fully out. |
2326 | if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); |
2327 | |
2328 | if (!UniquedVals[i&(Multiple-1)].getNode()) |
2329 | UniquedVals[i&(Multiple-1)] = N->getOperand(i); |
2330 | else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) |
2331 | return SDValue(); // no match. |
2332 | } |
2333 | |
2334 | // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains |
2335 | // either constant or undef values that are identical for each chunk. See |
2336 | // if these chunks can form into a larger vspltis*. |
2337 | |
2338 | // Check to see if all of the leading entries are either 0 or -1. If |
2339 | // neither, then this won't fit into the immediate field. |
2340 | bool LeadingZero = true; |
2341 | bool LeadingOnes = true; |
2342 | for (unsigned i = 0; i != Multiple-1; ++i) { |
2343 | if (!UniquedVals[i].getNode()) continue; // Must have been undefs. |
2344 | |
2345 | LeadingZero &= isNullConstant(UniquedVals[i]); |
2346 | LeadingOnes &= isAllOnesConstant(UniquedVals[i]); |
2347 | } |
2348 | // Finally, check the least significant entry. |
2349 | if (LeadingZero) { |
2350 | if (!UniquedVals[Multiple-1].getNode()) |
2351 | return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef |
2352 | int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); |
2353 | if (Val < 16) // 0,0,0,4 -> vspltisw(4) |
2354 | return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); |
2355 | } |
2356 | if (LeadingOnes) { |
2357 | if (!UniquedVals[Multiple-1].getNode()) |
2358 | return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef |
2359 | int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); |
2360 | if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) |
2361 | return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); |
2362 | } |
2363 | |
2364 | return SDValue(); |
2365 | } |
2366 | |
2367 | // Check to see if this buildvec has a single non-undef value in its elements. |
2368 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
2369 | if (N->getOperand(i).isUndef()) continue; |
2370 | if (!OpVal.getNode()) |
2371 | OpVal = N->getOperand(i); |
2372 | else if (OpVal != N->getOperand(i)) |
2373 | return SDValue(); |
2374 | } |
2375 | |
2376 | if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. |
2377 | |
2378 | unsigned ValSizeInBytes = EltSize; |
2379 | uint64_t Value = 0; |
2380 | if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { |
2381 | Value = CN->getZExtValue(); |
2382 | } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { |
2383 | assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!")((CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!" ) ? static_cast<void> (0) : __assert_fail ("CN->getValueType(0) == MVT::f32 && \"Only one legal FP vector type!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 2383, __PRETTY_FUNCTION__)); |
2384 | Value = FloatToBits(CN->getValueAPF().convertToFloat()); |
2385 | } |
2386 | |
2387 | // If the splat value is larger than the element value, then we can never do |
2388 | // this splat. The only case that we could fit the replicated bits into our |
2389 | // immediate field for would be zero, and we prefer to use vxor for it. |
2390 | if (ValSizeInBytes < ByteSize) return SDValue(); |
2391 | |
2392 | // If the element value is larger than the splat value, check if it consists |
2393 | // of a repeated bit pattern of size ByteSize. |
2394 | if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) |
2395 | return SDValue(); |
2396 | |
2397 | // Properly sign extend the value. |
2398 | int MaskVal = SignExtend32(Value, ByteSize * 8); |
2399 | |
2400 | // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. |
2401 | if (MaskVal == 0) return SDValue(); |
2402 | |
2403 | // Finally, if this value fits in a 5 bit sext field, return it |
2404 | if (SignExtend32<5>(MaskVal) == MaskVal) |
2405 | return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); |
2406 | return SDValue(); |
2407 | } |
2408 | |
2409 | //===----------------------------------------------------------------------===// |
2410 | // Addressing Mode Selection |
2411 | //===----------------------------------------------------------------------===// |
2412 | |
2413 | /// isIntS16Immediate - This method tests to see if the node is either a 32-bit |
2414 | /// or 64-bit immediate, and if the value can be accurately represented as a |
2415 | /// sign extension from a 16-bit value. If so, this returns true and the |
2416 | /// immediate. |
2417 | bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) { |
2418 | if (!isa<ConstantSDNode>(N)) |
2419 | return false; |
2420 | |
2421 | Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue(); |
2422 | if (N->getValueType(0) == MVT::i32) |
2423 | return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); |
2424 | else |
2425 | return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); |
2426 | } |
2427 | bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) { |
2428 | return isIntS16Immediate(Op.getNode(), Imm); |
2429 | } |
2430 | |
2431 | |
2432 | /// SelectAddressEVXRegReg - Given the specified address, check to see if it can |
2433 | /// be represented as an indexed [r+r] operation. |
2434 | bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base, |
2435 | SDValue &Index, |
2436 | SelectionDAG &DAG) const { |
2437 | for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); |
2438 | UI != E; ++UI) { |
2439 | if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) { |
2440 | if (Memop->getMemoryVT() == MVT::f64) { |
2441 | Base = N.getOperand(0); |
2442 | Index = N.getOperand(1); |
2443 | return true; |
2444 | } |
2445 | } |
2446 | } |
2447 | return false; |
2448 | } |
2449 | |
2450 | /// isIntS34Immediate - This method tests if value of node given can be |
2451 | /// accurately represented as a sign extension from a 34-bit value. If so, |
2452 | /// this returns true and the immediate. |
2453 | bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) { |
2454 | if (!isa<ConstantSDNode>(N)) |
2455 | return false; |
2456 | |
2457 | Imm = (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); |
2458 | return isInt<34>(Imm); |
2459 | } |
2460 | bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) { |
2461 | return isIntS34Immediate(Op.getNode(), Imm); |
2462 | } |
2463 | |
2464 | /// SelectAddressRegReg - Given the specified addressed, check to see if it |
2465 | /// can be represented as an indexed [r+r] operation. Returns false if it |
2466 | /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is |
2467 | /// non-zero and N can be represented by a base register plus a signed 16-bit |
2468 | /// displacement, make a more precise judgement by checking (displacement % \p |
2469 | /// EncodingAlignment). |
2470 | bool PPCTargetLowering::SelectAddressRegReg( |
2471 | SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG, |
2472 | MaybeAlign EncodingAlignment) const { |
2473 | // If we have a PC Relative target flag don't select as [reg+reg]. It will be |
2474 | // a [pc+imm]. |
2475 | if (SelectAddressPCRel(N, Base)) |
2476 | return false; |
2477 | |
2478 | int16_t Imm = 0; |
2479 | if (N.getOpcode() == ISD::ADD) { |
2480 | // Is there any SPE load/store (f64), which can't handle 16bit offset? |
2481 | // SPE load/store can only handle 8-bit offsets. |
2482 | if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG)) |
2483 | return true; |
2484 | if (isIntS16Immediate(N.getOperand(1), Imm) && |
2485 | (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) |
2486 | return false; // r+i |
2487 | if (N.getOperand(1).getOpcode() == PPCISD::Lo) |
2488 | return false; // r+i |
2489 | |
2490 | Base = N.getOperand(0); |
2491 | Index = N.getOperand(1); |
2492 | return true; |
2493 | } else if (N.getOpcode() == ISD::OR) { |
2494 | if (isIntS16Immediate(N.getOperand(1), Imm) && |
2495 | (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) |
2496 | return false; // r+i can fold it if we can. |
2497 | |
2498 | // If this is an or of disjoint bitfields, we can codegen this as an add |
2499 | // (for better address arithmetic) if the LHS and RHS of the OR are provably |
2500 | // disjoint. |
2501 | KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); |
2502 | |
2503 | if (LHSKnown.Zero.getBoolValue()) { |
2504 | KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1)); |
2505 | // If all of the bits are known zero on the LHS or RHS, the add won't |
2506 | // carry. |
2507 | if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { |
2508 | Base = N.getOperand(0); |
2509 | Index = N.getOperand(1); |
2510 | return true; |
2511 | } |
2512 | } |
2513 | } |
2514 | |
2515 | return false; |
2516 | } |
2517 | |
2518 | // If we happen to be doing an i64 load or store into a stack slot that has |
2519 | // less than a 4-byte alignment, then the frame-index elimination may need to |
2520 | // use an indexed load or store instruction (because the offset may not be a |
2521 | // multiple of 4). The extra register needed to hold the offset comes from the |
2522 | // register scavenger, and it is possible that the scavenger will need to use |
2523 | // an emergency spill slot. As a result, we need to make sure that a spill slot |
2524 | // is allocated when doing an i64 load/store into a less-than-4-byte-aligned |
2525 | // stack slot. |
2526 | static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { |
2527 | // FIXME: This does not handle the LWA case. |
2528 | if (VT != MVT::i64) |
2529 | return; |
2530 | |
2531 | // NOTE: We'll exclude negative FIs here, which come from argument |
2532 | // lowering, because there are no known test cases triggering this problem |
2533 | // using packed structures (or similar). We can remove this exclusion if |
2534 | // we find such a test case. The reason why this is so test-case driven is |
2535 | // because this entire 'fixup' is only to prevent crashes (from the |
2536 | // register scavenger) on not-really-valid inputs. For example, if we have: |
2537 | // %a = alloca i1 |
2538 | // %b = bitcast i1* %a to i64* |
2539 | // store i64* a, i64 b |
2540 | // then the store should really be marked as 'align 1', but is not. If it |
2541 | // were marked as 'align 1' then the indexed form would have been |
2542 | // instruction-selected initially, and the problem this 'fixup' is preventing |
2543 | // won't happen regardless. |
2544 | if (FrameIdx < 0) |
2545 | return; |
2546 | |
2547 | MachineFunction &MF = DAG.getMachineFunction(); |
2548 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
2549 | |
2550 | if (MFI.getObjectAlign(FrameIdx) >= Align(4)) |
2551 | return; |
2552 | |
2553 | PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); |
2554 | FuncInfo->setHasNonRISpills(); |
2555 | } |
2556 | |
2557 | /// Returns true if the address N can be represented by a base register plus |
2558 | /// a signed 16-bit displacement [r+imm], and if it is not better |
2559 | /// represented as reg+reg. If \p EncodingAlignment is non-zero, only accept |
2560 | /// displacements that are multiples of that value. |
2561 | bool PPCTargetLowering::SelectAddressRegImm( |
2562 | SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, |
2563 | MaybeAlign EncodingAlignment) const { |
2564 | // FIXME dl should come from parent load or store, not from address |
2565 | SDLoc dl(N); |
2566 | |
2567 | // If we have a PC Relative target flag don't select as [reg+imm]. It will be |
2568 | // a [pc+imm]. |
2569 | if (SelectAddressPCRel(N, Base)) |
2570 | return false; |
2571 | |
2572 | // If this can be more profitably realized as r+r, fail. |
2573 | if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment)) |
2574 | return false; |
2575 | |
2576 | if (N.getOpcode() == ISD::ADD) { |
2577 | int16_t imm = 0; |
2578 | if (isIntS16Immediate(N.getOperand(1), imm) && |
2579 | (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) { |
2580 | Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); |
2581 | if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { |
2582 | Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); |
2583 | fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); |
2584 | } else { |
2585 | Base = N.getOperand(0); |
2586 | } |
2587 | return true; // [r+i] |
2588 | } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { |
2589 | // Match LOAD (ADD (X, Lo(G))). |
2590 | assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()((!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))-> getZExtValue() && "Cannot handle constant offsets yet!" ) ? static_cast<void> (0) : __assert_fail ("!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() && \"Cannot handle constant offsets yet!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 2591, __PRETTY_FUNCTION__)) |
2591 | && "Cannot handle constant offsets yet!")((!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))-> getZExtValue() && "Cannot handle constant offsets yet!" ) ? static_cast<void> (0) : __assert_fail ("!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() && \"Cannot handle constant offsets yet!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 2591, __PRETTY_FUNCTION__)); |
2592 | Disp = N.getOperand(1).getOperand(0); // The global address. |
2593 | assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||((Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode () == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD:: TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable ) ? static_cast<void> (0) : __assert_fail ("Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode() == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD::TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 2596, __PRETTY_FUNCTION__)) |
2594 | Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||((Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode () == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD:: TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable ) ? static_cast<void> (0) : __assert_fail ("Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode() == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD::TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 2596, __PRETTY_FUNCTION__)) |
2595 | Disp.getOpcode() == ISD::TargetConstantPool ||((Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode () == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD:: TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable ) ? static_cast<void> (0) : __assert_fail ("Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode() == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD::TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 2596, __PRETTY_FUNCTION__)) |
2596 | Disp.getOpcode() == ISD::TargetJumpTable)((Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode () == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD:: TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable ) ? static_cast<void> (0) : __assert_fail ("Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode() == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD::TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 2596, __PRETTY_FUNCTION__)); |
2597 | Base = N.getOperand(0); |
2598 | return true; // [&g+r] |
2599 | } |
2600 | } else if (N.getOpcode() == ISD::OR) { |
2601 | int16_t imm = 0; |
2602 | if (isIntS16Immediate(N.getOperand(1), imm) && |
2603 | (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) { |
2604 | // If this is an or of disjoint bitfields, we can codegen this as an add |
2605 | // (for better address arithmetic) if the LHS and RHS of the OR are |
2606 | // provably disjoint. |
2607 | KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); |
2608 | |
2609 | if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { |
2610 | // If all of the bits are known zero on the LHS or RHS, the add won't |
2611 | // carry. |
2612 | if (FrameIndexSDNode *FI = |
2613 | dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { |
2614 | Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); |
2615 | fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); |
2616 | } else { |
2617 | Base = N.getOperand(0); |
2618 | } |
2619 | Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); |
2620 | return true; |
2621 | } |
2622 | } |
2623 | } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { |
2624 | // Loading from a constant address. |
2625 | |
2626 | // If this address fits entirely in a 16-bit sext immediate field, codegen |
2627 | // this as "d, 0" |
2628 | int16_t Imm; |
2629 | if (isIntS16Immediate(CN, Imm) && |
2630 | (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) { |
2631 | Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); |
2632 | Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, |
2633 | CN->getValueType(0)); |
2634 | return true; |
2635 | } |
2636 | |
2637 | // Handle 32-bit sext immediates with LIS + addr mode. |
2638 | if ((CN->getValueType(0) == MVT::i32 || |
2639 | (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && |
2640 | (!EncodingAlignment || |
2641 | isAligned(*EncodingAlignment, CN->getZExtValue()))) { |
2642 | int Addr = (int)CN->getZExtValue(); |
2643 | |
2644 | // Otherwise, break this down into an LIS + disp. |
2645 | Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); |
2646 | |
2647 | Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, |
2648 | MVT::i32); |
2649 | unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; |
2650 | Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); |
2651 | return true; |
2652 | } |
2653 | } |
2654 | |
2655 | Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); |
2656 | if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { |
2657 | Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); |
2658 | fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); |
2659 | } else |
2660 | Base = N; |
2661 | return true; // [r+0] |
2662 | } |
2663 | |
2664 | /// Similar to the 16-bit case but for instructions that take a 34-bit |
2665 | /// displacement field (prefixed loads/stores). |
2666 | bool PPCTargetLowering::SelectAddressRegImm34(SDValue N, SDValue &Disp, |
2667 | SDValue &Base, |
2668 | SelectionDAG &DAG) const { |
2669 | // Only on 64-bit targets. |
2670 | if (N.getValueType() != MVT::i64) |
2671 | return false; |
2672 | |
2673 | SDLoc dl(N); |
2674 | int64_t Imm = 0; |
2675 | |
2676 | if (N.getOpcode() == ISD::ADD) { |
2677 | if (!isIntS34Immediate(N.getOperand(1), Imm)) |
2678 | return false; |
2679 | Disp = DAG.getTargetConstant(Imm, dl, N.getValueType()); |
2680 | if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) |
2681 | Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); |
2682 | else |
2683 | Base = N.getOperand(0); |
2684 | return true; |
2685 | } |
2686 | |
2687 | if (N.getOpcode() == ISD::OR) { |
2688 | if (!isIntS34Immediate(N.getOperand(1), Imm)) |
2689 | return false; |
2690 | // If this is an or of disjoint bitfields, we can codegen this as an add |
2691 | // (for better address arithmetic) if the LHS and RHS of the OR are |
2692 | // provably disjoint. |
2693 | KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); |
2694 | if ((LHSKnown.Zero.getZExtValue() | ~(uint64_t)Imm) != ~0ULL) |
2695 | return false; |
2696 | if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) |
2697 | Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); |
2698 | else |
2699 | Base = N.getOperand(0); |
2700 | Disp = DAG.getTargetConstant(Imm, dl, N.getValueType()); |
2701 | return true; |
2702 | } |
2703 | |
2704 | if (isIntS34Immediate(N, Imm)) { // If the address is a 34-bit const. |
2705 | Disp = DAG.getTargetConstant(Imm, dl, N.getValueType()); |
2706 | Base = DAG.getRegister(PPC::ZERO8, N.getValueType()); |
2707 | return true; |
2708 | } |
2709 | |
2710 | return false; |
2711 | } |
2712 | |
2713 | /// SelectAddressRegRegOnly - Given the specified addressed, force it to be |
2714 | /// represented as an indexed [r+r] operation. |
2715 | bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, |
2716 | SDValue &Index, |
2717 | SelectionDAG &DAG) const { |
2718 | // Check to see if we can easily represent this as an [r+r] address. This |
2719 | // will fail if it thinks that the address is more profitably represented as |
2720 | // reg+imm, e.g. where imm = 0. |
2721 | if (SelectAddressRegReg(N, Base, Index, DAG)) |
2722 | return true; |
2723 | |
2724 | // If the address is the result of an add, we will utilize the fact that the |
2725 | // address calculation includes an implicit add. However, we can reduce |
2726 | // register pressure if we do not materialize a constant just for use as the |
2727 | // index register. We only get rid of the add if it is not an add of a |
2728 | // value and a 16-bit signed constant and both have a single use. |
2729 | int16_t imm = 0; |
2730 | if (N.getOpcode() == ISD::ADD && |
2731 | (!isIntS16Immediate(N.getOperand(1), imm) || |
2732 | !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) { |
2733 | Base = N.getOperand(0); |
2734 | Index = N.getOperand(1); |
2735 | return true; |
2736 | } |
2737 | |
2738 | // Otherwise, do it the hard way, using R0 as the base register. |
2739 | Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, |
2740 | N.getValueType()); |
2741 | Index = N; |
2742 | return true; |
2743 | } |
2744 | |
2745 | template <typename Ty> static bool isValidPCRelNode(SDValue N) { |
2746 | Ty *PCRelCand = dyn_cast<Ty>(N); |
2747 | return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG); |
2748 | } |
2749 | |
2750 | /// Returns true if this address is a PC Relative address. |
2751 | /// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG |
2752 | /// or if the node opcode is PPCISD::MAT_PCREL_ADDR. |
2753 | bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const { |
2754 | // This is a materialize PC Relative node. Always select this as PC Relative. |
2755 | Base = N; |
2756 | if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR) |
2757 | return true; |
2758 | if (isValidPCRelNode<ConstantPoolSDNode>(N) || |
2759 | isValidPCRelNode<GlobalAddressSDNode>(N) || |
2760 | isValidPCRelNode<JumpTableSDNode>(N) || |
2761 | isValidPCRelNode<BlockAddressSDNode>(N)) |
2762 | return true; |
2763 | return false; |
2764 | } |
2765 | |
2766 | /// Returns true if we should use a direct load into vector instruction |
2767 | /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence. |
2768 | static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) { |
2769 | |
2770 | // If there are any other uses other than scalar to vector, then we should |
2771 | // keep it as a scalar load -> direct move pattern to prevent multiple |
2772 | // loads. |
2773 | LoadSDNode *LD = dyn_cast<LoadSDNode>(N); |
2774 | if (!LD) |
2775 | return false; |
2776 | |
2777 | EVT MemVT = LD->getMemoryVT(); |
2778 | if (!MemVT.isSimple()) |
2779 | return false; |
2780 | switch(MemVT.getSimpleVT().SimpleTy) { |
2781 | case MVT::i64: |
2782 | break; |
2783 | case MVT::i32: |
2784 | if (!ST.hasP8Vector()) |
2785 | return false; |
2786 | break; |
2787 | case MVT::i16: |
2788 | case MVT::i8: |
2789 | if (!ST.hasP9Vector()) |
2790 | return false; |
2791 | break; |
2792 | default: |
2793 | return false; |
2794 | } |
2795 | |
2796 | SDValue LoadedVal(N, 0); |
2797 | if (!LoadedVal.hasOneUse()) |
2798 | return false; |
2799 | |
2800 | for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); |
2801 | UI != UE; ++UI) |
2802 | if (UI.getUse().get().getResNo() == 0 && |
2803 | UI->getOpcode() != ISD::SCALAR_TO_VECTOR && |
2804 | UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED) |
2805 | return false; |
2806 | |
2807 | return true; |
2808 | } |
2809 | |
2810 | /// getPreIndexedAddressParts - returns true by value, base pointer and |
2811 | /// offset pointer and addressing mode by reference if the node's address |
2812 | /// can be legally represented as pre-indexed load / store address. |
2813 | bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, |
2814 | SDValue &Offset, |
2815 | ISD::MemIndexedMode &AM, |
2816 | SelectionDAG &DAG) const { |
2817 | if (DisablePPCPreinc) return false; |
2818 | |
2819 | bool isLoad = true; |
2820 | SDValue Ptr; |
2821 | EVT VT; |
2822 | unsigned Alignment; |
2823 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { |
2824 | Ptr = LD->getBasePtr(); |
2825 | VT = LD->getMemoryVT(); |
2826 | Alignment = LD->getAlignment(); |
2827 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { |
2828 | Ptr = ST->getBasePtr(); |
2829 | VT = ST->getMemoryVT(); |
2830 | Alignment = ST->getAlignment(); |
2831 | isLoad = false; |
2832 | } else |
2833 | return false; |
2834 | |
2835 | // Do not generate pre-inc forms for specific loads that feed scalar_to_vector |
2836 | // instructions because we can fold these into a more efficient instruction |
2837 | // instead, (such as LXSD). |
2838 | if (isLoad && usePartialVectorLoads(N, Subtarget)) { |
2839 | return false; |
2840 | } |
2841 | |
2842 | // PowerPC doesn't have preinc load/store instructions for vectors |
2843 | if (VT.isVector()) |
2844 | return false; |
2845 | |
2846 | if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { |
2847 | // Common code will reject creating a pre-inc form if the base pointer |
2848 | // is a frame index, or if N is a store and the base pointer is either |
2849 | // the same as or a predecessor of the value being stored. Check for |
2850 | // those situations here, and try with swapped Base/Offset instead. |
2851 | bool Swap = false; |
2852 | |
2853 | if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) |
2854 | Swap = true; |
2855 | else if (!isLoad) { |
2856 | SDValue Val = cast<StoreSDNode>(N)->getValue(); |
2857 | if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) |
2858 | Swap = true; |
2859 | } |
2860 | |
2861 | if (Swap) |
2862 | std::swap(Base, Offset); |
2863 | |
2864 | AM = ISD::PRE_INC; |
2865 | return true; |
2866 | } |
2867 | |
2868 | // LDU/STU can only handle immediates that are a multiple of 4. |
2869 | if (VT != MVT::i64) { |
2870 | if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None)) |
2871 | return false; |
2872 | } else { |
2873 | // LDU/STU need an address with at least 4-byte alignment. |
2874 | if (Alignment < 4) |
2875 | return false; |
2876 | |
2877 | if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4))) |
2878 | return false; |
2879 | } |
2880 | |
2881 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { |
2882 | // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of |
2883 | // sext i32 to i64 when addr mode is r+i. |
2884 | if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && |
2885 | LD->getExtensionType() == ISD::SEXTLOAD && |
2886 | isa<ConstantSDNode>(Offset)) |
2887 | return false; |
2888 | } |
2889 | |
2890 | AM = ISD::PRE_INC; |
2891 | return true; |
2892 | } |
2893 | |
2894 | //===----------------------------------------------------------------------===// |
2895 | // LowerOperation implementation |
2896 | //===----------------------------------------------------------------------===// |
2897 | |
2898 | /// Return true if we should reference labels using a PICBase, set the HiOpFlags |
2899 | /// and LoOpFlags to the target MO flags. |
2900 | static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, |
2901 | unsigned &HiOpFlags, unsigned &LoOpFlags, |
2902 | const GlobalValue *GV = nullptr) { |
2903 | HiOpFlags = PPCII::MO_HA; |
2904 | LoOpFlags = PPCII::MO_LO; |
2905 | |
2906 | // Don't use the pic base if not in PIC relocation model. |
2907 | if (IsPIC) { |
2908 | HiOpFlags |= PPCII::MO_PIC_FLAG; |
2909 | LoOpFlags |= PPCII::MO_PIC_FLAG; |
2910 | } |
2911 | } |
2912 | |
2913 | static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, |
2914 | SelectionDAG &DAG) { |
2915 | SDLoc DL(HiPart); |
2916 | EVT PtrVT = HiPart.getValueType(); |
2917 | SDValue Zero = DAG.getConstant(0, DL, PtrVT); |
2918 | |
2919 | SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); |
2920 | SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); |
2921 | |
2922 | // With PIC, the first instruction is actually "GR+hi(&G)". |
2923 | if (isPIC) |
2924 | Hi = DAG.getNode(ISD::ADD, DL, PtrVT, |
2925 | DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); |
2926 | |
2927 | // Generate non-pic code that has direct accesses to the constant pool. |
2928 | // The address of the global is just (hi(&g)+lo(&g)). |
2929 | return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); |
2930 | } |
2931 | |
2932 | static void setUsesTOCBasePtr(MachineFunction &MF) { |
2933 | PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); |
2934 | FuncInfo->setUsesTOCBasePtr(); |
2935 | } |
2936 | |
2937 | static void setUsesTOCBasePtr(SelectionDAG &DAG) { |
2938 | setUsesTOCBasePtr(DAG.getMachineFunction()); |
2939 | } |
2940 | |
2941 | SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, |
2942 | SDValue GA) const { |
2943 | const bool Is64Bit = Subtarget.isPPC64(); |
2944 | EVT VT = Is64Bit ? MVT::i64 : MVT::i32; |
2945 | SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) |
2946 | : Subtarget.isAIXABI() |
2947 | ? DAG.getRegister(PPC::R2, VT) |
2948 | : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); |
2949 | SDValue Ops[] = { GA, Reg }; |
2950 | return DAG.getMemIntrinsicNode( |
2951 | PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, |
2952 | MachinePointerInfo::getGOT(DAG.getMachineFunction()), None, |
2953 | MachineMemOperand::MOLoad); |
2954 | } |
2955 | |
2956 | SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, |
2957 | SelectionDAG &DAG) const { |
2958 | EVT PtrVT = Op.getValueType(); |
2959 | ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); |
2960 | const Constant *C = CP->getConstVal(); |
2961 | |
2962 | // 64-bit SVR4 ABI and AIX ABI code are always position-independent. |
2963 | // The actual address of the GlobalValue is stored in the TOC. |
2964 | if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { |
2965 | if (Subtarget.isUsingPCRelativeCalls()) { |
2966 | SDLoc DL(CP); |
2967 | EVT Ty = getPointerTy(DAG.getDataLayout()); |
2968 | SDValue ConstPool = DAG.getTargetConstantPool( |
2969 | C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG); |
2970 | return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool); |
2971 | } |
2972 | setUsesTOCBasePtr(DAG); |
2973 | SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0); |
2974 | return getTOCEntry(DAG, SDLoc(CP), GA); |
2975 | } |
2976 | |
2977 | unsigned MOHiFlag, MOLoFlag; |
2978 | bool IsPIC = isPositionIndependent(); |
2979 | getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); |
2980 | |
2981 | if (IsPIC && Subtarget.isSVR4ABI()) { |
2982 | SDValue GA = |
2983 | DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG); |
2984 | return getTOCEntry(DAG, SDLoc(CP), GA); |
2985 | } |
2986 | |
2987 | SDValue CPIHi = |
2988 | DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag); |
2989 | SDValue CPILo = |
2990 | DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag); |
2991 | return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); |
2992 | } |
2993 | |
2994 | // For 64-bit PowerPC, prefer the more compact relative encodings. |
2995 | // This trades 32 bits per jump table entry for one or two instructions |
2996 | // on the jump site. |
2997 | unsigned PPCTargetLowering::getJumpTableEncoding() const { |
2998 | if (isJumpTableRelative()) |
2999 | return MachineJumpTableInfo::EK_LabelDifference32; |
3000 | |
3001 | return TargetLowering::getJumpTableEncoding(); |
3002 | } |
3003 | |
3004 | bool PPCTargetLowering::isJumpTableRelative() const { |
3005 | if (UseAbsoluteJumpTables) |
3006 | return false; |
3007 | if (Subtarget.isPPC64() || Subtarget.isAIXABI()) |
3008 | return true; |
3009 | return TargetLowering::isJumpTableRelative(); |
3010 | } |
3011 | |
3012 | SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, |
3013 | SelectionDAG &DAG) const { |
3014 | if (!Subtarget.isPPC64() || Subtarget.isAIXABI()) |
3015 | return TargetLowering::getPICJumpTableRelocBase(Table, DAG); |
3016 | |
3017 | switch (getTargetMachine().getCodeModel()) { |
3018 | case CodeModel::Small: |
3019 | case CodeModel::Medium: |
3020 | return TargetLowering::getPICJumpTableRelocBase(Table, DAG); |
3021 | default: |
3022 | return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), |
3023 | getPointerTy(DAG.getDataLayout())); |
3024 | } |
3025 | } |
3026 | |
3027 | const MCExpr * |
3028 | PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, |
3029 | unsigned JTI, |
3030 | MCContext &Ctx) const { |
3031 | if (!Subtarget.isPPC64() || Subtarget.isAIXABI()) |
3032 | return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); |
3033 | |
3034 | switch (getTargetMachine().getCodeModel()) { |
3035 | case CodeModel::Small: |
3036 | case CodeModel::Medium: |
3037 | return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); |
3038 | default: |
3039 | return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); |
3040 | } |
3041 | } |
3042 | |
3043 | SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { |
3044 | EVT PtrVT = Op.getValueType(); |
3045 | JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); |
3046 | |
3047 | // isUsingPCRelativeCalls() returns true when PCRelative is enabled |
3048 | if (Subtarget.isUsingPCRelativeCalls()) { |
3049 | SDLoc DL(JT); |
3050 | EVT Ty = getPointerTy(DAG.getDataLayout()); |
3051 | SDValue GA = |
3052 | DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG); |
3053 | SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); |
3054 | return MatAddr; |
3055 | } |
3056 | |
3057 | // 64-bit SVR4 ABI and AIX ABI code are always position-independent. |
3058 | // The actual address of the GlobalValue is stored in the TOC. |
3059 | if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { |
3060 | setUsesTOCBasePtr(DAG); |
3061 | SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); |
3062 | return getTOCEntry(DAG, SDLoc(JT), GA); |
3063 | } |
3064 | |
3065 | unsigned MOHiFlag, MOLoFlag; |
3066 | bool IsPIC = isPositionIndependent(); |
3067 | getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); |
3068 | |
3069 | if (IsPIC && Subtarget.isSVR4ABI()) { |
3070 | SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, |
3071 | PPCII::MO_PIC_FLAG); |
3072 | return getTOCEntry(DAG, SDLoc(GA), GA); |
3073 | } |
3074 | |
3075 | SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); |
3076 | SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); |
3077 | return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); |
3078 | } |
3079 | |
3080 | SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, |
3081 | SelectionDAG &DAG) const { |
3082 | EVT PtrVT = Op.getValueType(); |
3083 | BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); |
3084 | const BlockAddress *BA = BASDN->getBlockAddress(); |
3085 | |
3086 | // isUsingPCRelativeCalls() returns true when PCRelative is enabled |
3087 | if (Subtarget.isUsingPCRelativeCalls()) { |
3088 | SDLoc DL(BASDN); |
3089 | EVT Ty = getPointerTy(DAG.getDataLayout()); |
3090 | SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(), |
3091 | PPCII::MO_PCREL_FLAG); |
3092 | SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); |
3093 | return MatAddr; |
3094 | } |
3095 | |
3096 | // 64-bit SVR4 ABI and AIX ABI code are always position-independent. |
3097 | // The actual BlockAddress is stored in the TOC. |
3098 | if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { |
3099 | setUsesTOCBasePtr(DAG); |
3100 | SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); |
3101 | return getTOCEntry(DAG, SDLoc(BASDN), GA); |
3102 | } |
3103 | |
3104 | // 32-bit position-independent ELF stores the BlockAddress in the .got. |
3105 | if (Subtarget.is32BitELFABI() && isPositionIndependent()) |
3106 | return getTOCEntry( |
3107 | DAG, SDLoc(BASDN), |
3108 | DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset())); |
3109 | |
3110 | unsigned MOHiFlag, MOLoFlag; |
3111 | bool IsPIC = isPositionIndependent(); |
3112 | getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); |
3113 | SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); |
3114 | SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); |
3115 | return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); |
3116 | } |
3117 | |
3118 | SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, |
3119 | SelectionDAG &DAG) const { |
3120 | // FIXME: TLS addresses currently use medium model code sequences, |
3121 | // which is the most useful form. Eventually support for small and |
3122 | // large models could be added if users need it, at the cost of |
3123 | // additional complexity. |
3124 | GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); |
3125 | if (DAG.getTarget().useEmulatedTLS()) |
3126 | return LowerToTLSEmulatedModel(GA, DAG); |
3127 | |
3128 | SDLoc dl(GA); |
3129 | const GlobalValue *GV = GA->getGlobal(); |
3130 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3131 | bool is64bit = Subtarget.isPPC64(); |
3132 | const Module *M = DAG.getMachineFunction().getFunction().getParent(); |
3133 | PICLevel::Level picLevel = M->getPICLevel(); |
3134 | |
3135 | const TargetMachine &TM = getTargetMachine(); |
3136 | TLSModel::Model Model = TM.getTLSModel(GV); |
3137 | |
3138 | if (Model == TLSModel::LocalExec) { |
3139 | if (Subtarget.isUsingPCRelativeCalls()) { |
3140 | SDValue TLSReg = DAG.getRegister(PPC::X13, MVT::i64); |
3141 | SDValue TGA = DAG.getTargetGlobalAddress( |
3142 | GV, dl, PtrVT, 0, (PPCII::MO_PCREL_FLAG | PPCII::MO_TPREL_FLAG)); |
3143 | SDValue MatAddr = |
3144 | DAG.getNode(PPCISD::TLS_LOCAL_EXEC_MAT_ADDR, dl, PtrVT, TGA); |
3145 | return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, MatAddr); |
3146 | } |
3147 | |
3148 | SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, |
3149 | PPCII::MO_TPREL_HA); |
3150 | SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, |
3151 | PPCII::MO_TPREL_LO); |
3152 | SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64) |
3153 | : DAG.getRegister(PPC::R2, MVT::i32); |
3154 | |
3155 | SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); |
3156 | return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); |
3157 | } |
3158 | |
3159 | if (Model == TLSModel::InitialExec) { |
3160 | bool IsPCRel = Subtarget.isUsingPCRelativeCalls(); |
3161 | SDValue TGA = DAG.getTargetGlobalAddress( |
3162 | GV, dl, PtrVT, 0, IsPCRel ? PPCII::MO_GOT_TPREL_PCREL_FLAG : 0); |
3163 | SDValue TGATLS = DAG.getTargetGlobalAddress( |
3164 | GV, dl, PtrVT, 0, |
3165 | IsPCRel ? (PPCII::MO_TLS | PPCII::MO_PCREL_FLAG) : PPCII::MO_TLS); |
3166 | SDValue TPOffset; |
3167 | if (IsPCRel) { |
3168 | SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, dl, PtrVT, TGA); |
3169 | TPOffset = DAG.getLoad(MVT::i64, dl, DAG.getEntryNode(), MatPCRel, |
3170 | MachinePointerInfo()); |
3171 | } else { |
3172 | SDValue GOTPtr; |
3173 | if (is64bit) { |
3174 | setUsesTOCBasePtr(DAG); |
3175 | SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); |
3176 | GOTPtr = |
3177 | DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, PtrVT, GOTReg, TGA); |
3178 | } else { |
3179 | if (!TM.isPositionIndependent()) |
3180 | GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); |
3181 | else if (picLevel == PICLevel::SmallPIC) |
3182 | GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); |
3183 | else |
3184 | GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); |
3185 | } |
3186 | TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, PtrVT, TGA, GOTPtr); |
3187 | } |
3188 | return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); |
3189 | } |
3190 | |
3191 | if (Model == TLSModel::GeneralDynamic) { |
3192 | if (Subtarget.isUsingPCRelativeCalls()) { |
3193 | SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, |
3194 | PPCII::MO_GOT_TLSGD_PCREL_FLAG); |
3195 | return DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA); |
3196 | } |
3197 | |
3198 | SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); |
3199 | SDValue GOTPtr; |
3200 | if (is64bit) { |
3201 | setUsesTOCBasePtr(DAG); |
3202 | SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); |
3203 | GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, |
3204 | GOTReg, TGA); |
3205 | } else { |
3206 | if (picLevel == PICLevel::SmallPIC) |
3207 | GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); |
3208 | else |
3209 | GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); |
3210 | } |
3211 | return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, |
3212 | GOTPtr, TGA, TGA); |
3213 | } |
3214 | |
3215 | if (Model == TLSModel::LocalDynamic) { |
3216 | if (Subtarget.isUsingPCRelativeCalls()) { |
3217 | SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, |
3218 | PPCII::MO_GOT_TLSLD_PCREL_FLAG); |
3219 | SDValue MatPCRel = |
3220 | DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA); |
3221 | return DAG.getNode(PPCISD::PADDI_DTPREL, dl, PtrVT, MatPCRel, TGA); |
3222 | } |
3223 | |
3224 | SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); |
3225 | SDValue GOTPtr; |
3226 | if (is64bit) { |
3227 | setUsesTOCBasePtr(DAG); |
3228 | SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); |
3229 | GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, |
3230 | GOTReg, TGA); |
3231 | } else { |
3232 | if (picLevel == PICLevel::SmallPIC) |
3233 | GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); |
3234 | else |
3235 | GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); |
3236 | } |
3237 | SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, |
3238 | PtrVT, GOTPtr, TGA, TGA); |
3239 | SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, |
3240 | PtrVT, TLSAddr, TGA); |
3241 | return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); |
3242 | } |
3243 | |
3244 | llvm_unreachable("Unknown TLS model!")::llvm::llvm_unreachable_internal("Unknown TLS model!", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 3244); |
3245 | } |
3246 | |
3247 | SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, |
3248 | SelectionDAG &DAG) const { |
3249 | EVT PtrVT = Op.getValueType(); |
3250 | GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); |
3251 | SDLoc DL(GSDN); |
3252 | const GlobalValue *GV = GSDN->getGlobal(); |
3253 | |
3254 | // 64-bit SVR4 ABI & AIX ABI code is always position-independent. |
3255 | // The actual address of the GlobalValue is stored in the TOC. |
3256 | if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { |
3257 | if (Subtarget.isUsingPCRelativeCalls()) { |
3258 | EVT Ty = getPointerTy(DAG.getDataLayout()); |
3259 | if (isAccessedAsGotIndirect(Op)) { |
3260 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(), |
3261 | PPCII::MO_PCREL_FLAG | |
3262 | PPCII::MO_GOT_FLAG); |
3263 | SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); |
3264 | SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel, |
3265 | MachinePointerInfo()); |
3266 | return Load; |
3267 | } else { |
3268 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(), |
3269 | PPCII::MO_PCREL_FLAG); |
3270 | return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); |
3271 | } |
3272 | } |
3273 | setUsesTOCBasePtr(DAG); |
3274 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); |
3275 | return getTOCEntry(DAG, DL, GA); |
3276 | } |
3277 | |
3278 | unsigned MOHiFlag, MOLoFlag; |
3279 | bool IsPIC = isPositionIndependent(); |
3280 | getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); |
3281 | |
3282 | if (IsPIC && Subtarget.isSVR4ABI()) { |
3283 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, |
3284 | GSDN->getOffset(), |
3285 | PPCII::MO_PIC_FLAG); |
3286 | return getTOCEntry(DAG, DL, GA); |
3287 | } |
3288 | |
3289 | SDValue GAHi = |
3290 | DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); |
3291 | SDValue GALo = |
3292 | DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); |
3293 | |
3294 | return LowerLabelRef(GAHi, GALo, IsPIC, DAG); |
3295 | } |
3296 | |
3297 | SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { |
3298 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); |
3299 | SDLoc dl(Op); |
3300 | |
3301 | if (Op.getValueType() == MVT::v2i64) { |
3302 | // When the operands themselves are v2i64 values, we need to do something |
3303 | // special because VSX has no underlying comparison operations for these. |
3304 | if (Op.getOperand(0).getValueType() == MVT::v2i64) { |
3305 | // Equality can be handled by casting to the legal type for Altivec |
3306 | // comparisons, everything else needs to be expanded. |
3307 | if (CC == ISD::SETEQ || CC == ISD::SETNE) { |
3308 | return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, |
3309 | DAG.getSetCC(dl, MVT::v4i32, |
3310 | DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), |
3311 | DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), |
3312 | CC)); |
3313 | } |
3314 | |
3315 | return SDValue(); |
3316 | } |
3317 | |
3318 | // We handle most of these in the usual way. |
3319 | return Op; |
3320 | } |
3321 | |
3322 | // If we're comparing for equality to zero, expose the fact that this is |
3323 | // implemented as a ctlz/srl pair on ppc, so that the dag combiner can |
3324 | // fold the new nodes. |
3325 | if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) |
3326 | return V; |
3327 | |
3328 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { |
3329 | // Leave comparisons against 0 and -1 alone for now, since they're usually |
3330 | // optimized. FIXME: revisit this when we can custom lower all setcc |
3331 | // optimizations. |
3332 | if (C->isAllOnesValue() || C->isNullValue()) |
3333 | return SDValue(); |
3334 | } |
3335 | |
3336 | // If we have an integer seteq/setne, turn it into a compare against zero |
3337 | // by xor'ing the rhs with the lhs, which is faster than setting a |
3338 | // condition register, reading it back out, and masking the correct bit. The |
3339 | // normal approach here uses sub to do this instead of xor. Using xor exposes |
3340 | // the result to other bit-twiddling opportunities. |
3341 | EVT LHSVT = Op.getOperand(0).getValueType(); |
3342 | if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { |
3343 | EVT VT = Op.getValueType(); |
3344 | SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), |
3345 | Op.getOperand(1)); |
3346 | return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); |
3347 | } |
3348 | return SDValue(); |
3349 | } |
3350 | |
3351 | SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { |
3352 | SDNode *Node = Op.getNode(); |
3353 | EVT VT = Node->getValueType(0); |
3354 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3355 | SDValue InChain = Node->getOperand(0); |
3356 | SDValue VAListPtr = Node->getOperand(1); |
3357 | const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); |
3358 | SDLoc dl(Node); |
3359 | |
3360 | assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only")((!Subtarget.isPPC64() && "LowerVAARG is PPC32 only") ? static_cast<void> (0) : __assert_fail ("!Subtarget.isPPC64() && \"LowerVAARG is PPC32 only\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 3360, __PRETTY_FUNCTION__)); |
3361 | |
3362 | // gpr_index |
3363 | SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, |
3364 | VAListPtr, MachinePointerInfo(SV), MVT::i8); |
3365 | InChain = GprIndex.getValue(1); |
3366 | |
3367 | if (VT == MVT::i64) { |
3368 | // Check if GprIndex is even |
3369 | SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, |
3370 | DAG.getConstant(1, dl, MVT::i32)); |
3371 | SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, |
3372 | DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); |
3373 | SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, |
3374 | DAG.getConstant(1, dl, MVT::i32)); |
3375 | // Align GprIndex to be even if it isn't |
3376 | GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, |
3377 | GprIndex); |
3378 | } |
3379 | |
3380 | // fpr index is 1 byte after gpr |
3381 | SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, |
3382 | DAG.getConstant(1, dl, MVT::i32)); |
3383 | |
3384 | // fpr |
3385 | SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, |
3386 | FprPtr, MachinePointerInfo(SV), MVT::i8); |
3387 | InChain = FprIndex.getValue(1); |
3388 | |
3389 | SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, |
3390 | DAG.getConstant(8, dl, MVT::i32)); |
3391 | |
3392 | SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, |
3393 | DAG.getConstant(4, dl, MVT::i32)); |
3394 | |
3395 | // areas |
3396 | SDValue OverflowArea = |
3397 | DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); |
3398 | InChain = OverflowArea.getValue(1); |
3399 | |
3400 | SDValue RegSaveArea = |
3401 | DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); |
3402 | InChain = RegSaveArea.getValue(1); |
3403 | |
3404 | // select overflow_area if index > 8 |
3405 | SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, |
3406 | DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); |
3407 | |
3408 | // adjustment constant gpr_index * 4/8 |
3409 | SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, |
3410 | VT.isInteger() ? GprIndex : FprIndex, |
3411 | DAG.getConstant(VT.isInteger() ? 4 : 8, dl, |
3412 | MVT::i32)); |
3413 | |
3414 | // OurReg = RegSaveArea + RegConstant |
3415 | SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, |
3416 | RegConstant); |
3417 | |
3418 | // Floating types are 32 bytes into RegSaveArea |
3419 | if (VT.isFloatingPoint()) |
3420 | OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, |
3421 | DAG.getConstant(32, dl, MVT::i32)); |
3422 | |
3423 | // increase {f,g}pr_index by 1 (or 2 if VT is i64) |
3424 | SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, |
3425 | VT.isInteger() ? GprIndex : FprIndex, |
3426 | DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, |
3427 | MVT::i32)); |
3428 | |
3429 | InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, |
3430 | VT.isInteger() ? VAListPtr : FprPtr, |
3431 | MachinePointerInfo(SV), MVT::i8); |
3432 | |
3433 | // determine if we should load from reg_save_area or overflow_area |
3434 | SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); |
3435 | |
3436 | // increase overflow_area by 4/8 if gpr/fpr > 8 |
3437 | SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, |
3438 | DAG.getConstant(VT.isInteger() ? 4 : 8, |
3439 | dl, MVT::i32)); |
3440 | |
3441 | OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, |
3442 | OverflowAreaPlusN); |
3443 | |
3444 | InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, |
3445 | MachinePointerInfo(), MVT::i32); |
3446 | |
3447 | return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); |
3448 | } |
3449 | |
3450 | SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { |
3451 | assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only")((!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only" ) ? static_cast<void> (0) : __assert_fail ("!Subtarget.isPPC64() && \"LowerVACOPY is PPC32 only\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 3451, __PRETTY_FUNCTION__)); |
3452 | |
3453 | // We have to copy the entire va_list struct: |
3454 | // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte |
3455 | return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2), |
3456 | DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8), |
3457 | false, true, false, MachinePointerInfo(), |
3458 | MachinePointerInfo()); |
3459 | } |
3460 | |
3461 | SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, |
3462 | SelectionDAG &DAG) const { |
3463 | if (Subtarget.isAIXABI()) |
3464 | report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX."); |
3465 | |
3466 | return Op.getOperand(0); |
3467 | } |
3468 | |
3469 | SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, |
3470 | SelectionDAG &DAG) const { |
3471 | if (Subtarget.isAIXABI()) |
3472 | report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX."); |
3473 | |
3474 | SDValue Chain = Op.getOperand(0); |
3475 | SDValue Trmp = Op.getOperand(1); // trampoline |
3476 | SDValue FPtr = Op.getOperand(2); // nested function |
3477 | SDValue Nest = Op.getOperand(3); // 'nest' parameter value |
3478 | SDLoc dl(Op); |
3479 | |
3480 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3481 | bool isPPC64 = (PtrVT == MVT::i64); |
3482 | Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); |
3483 | |
3484 | TargetLowering::ArgListTy Args; |
3485 | TargetLowering::ArgListEntry Entry; |
3486 | |
3487 | Entry.Ty = IntPtrTy; |
3488 | Entry.Node = Trmp; Args.push_back(Entry); |
3489 | |
3490 | // TrampSize == (isPPC64 ? 48 : 40); |
3491 | Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, |
3492 | isPPC64 ? MVT::i64 : MVT::i32); |
3493 | Args.push_back(Entry); |
3494 | |
3495 | Entry.Node = FPtr; Args.push_back(Entry); |
3496 | Entry.Node = Nest; Args.push_back(Entry); |
3497 | |
3498 | // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) |
3499 | TargetLowering::CallLoweringInfo CLI(DAG); |
3500 | CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( |
3501 | CallingConv::C, Type::getVoidTy(*DAG.getContext()), |
3502 | DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); |
3503 | |
3504 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
3505 | return CallResult.second; |
3506 | } |
3507 | |
3508 | SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { |
3509 | MachineFunction &MF = DAG.getMachineFunction(); |
3510 | PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); |
3511 | EVT PtrVT = getPointerTy(MF.getDataLayout()); |
3512 | |
3513 | SDLoc dl(Op); |
3514 | |
3515 | if (Subtarget.isPPC64() || Subtarget.isAIXABI()) { |
3516 | // vastart just stores the address of the VarArgsFrameIndex slot into the |
3517 | // memory location argument. |
3518 | SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); |
3519 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); |
3520 | return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), |
3521 | MachinePointerInfo(SV)); |
3522 | } |
3523 | |
3524 | // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. |
3525 | // We suppose the given va_list is already allocated. |
3526 | // |
3527 | // typedef struct { |
3528 | // char gpr; /* index into the array of 8 GPRs |
3529 | // * stored in the register save area |
3530 | // * gpr=0 corresponds to r3, |
3531 | // * gpr=1 to r4, etc. |
3532 | // */ |
3533 | // char fpr; /* index into the array of 8 FPRs |
3534 | // * stored in the register save area |
3535 | // * fpr=0 corresponds to f1, |
3536 | // * fpr=1 to f2, etc. |
3537 | // */ |
3538 | // char *overflow_arg_area; |
3539 | // /* location on stack that holds |
3540 | // * the next overflow argument |
3541 | // */ |
3542 | // char *reg_save_area; |
3543 | // /* where r3:r10 and f1:f8 (if saved) |
3544 | // * are stored |
3545 | // */ |
3546 | // } va_list[1]; |
3547 | |
3548 | SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); |
3549 | SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); |
3550 | SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), |
3551 | PtrVT); |
3552 | SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), |
3553 | PtrVT); |
3554 | |
3555 | uint64_t FrameOffset = PtrVT.getSizeInBits()/8; |
3556 | SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); |
3557 | |
3558 | uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; |
3559 | SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); |
3560 | |
3561 | uint64_t FPROffset = 1; |
3562 | SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); |
3563 | |
3564 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); |
3565 | |
3566 | // Store first byte : number of int regs |
3567 | SDValue firstStore = |
3568 | DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), |
3569 | MachinePointerInfo(SV), MVT::i8); |
3570 | uint64_t nextOffset = FPROffset; |
3571 | SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), |
3572 | ConstFPROffset); |
3573 | |
3574 | // Store second byte : number of float regs |
3575 | SDValue secondStore = |
3576 | DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, |
3577 | MachinePointerInfo(SV, nextOffset), MVT::i8); |
3578 | nextOffset += StackOffset; |
3579 | nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); |
3580 | |
3581 | // Store second word : arguments given on stack |
3582 | SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, |
3583 | MachinePointerInfo(SV, nextOffset)); |
3584 | nextOffset += FrameOffset; |
3585 | nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); |
3586 | |
3587 | // Store third word : arguments given in registers |
3588 | return DAG.getStore(thirdStore, dl, FR, nextPtr, |
3589 | MachinePointerInfo(SV, nextOffset)); |
3590 | } |
3591 | |
3592 | /// FPR - The set of FP registers that should be allocated for arguments |
3593 | /// on Darwin and AIX. |
3594 | static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, |
3595 | PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, |
3596 | PPC::F11, PPC::F12, PPC::F13}; |
3597 | |
3598 | /// CalculateStackSlotSize - Calculates the size reserved for this argument on |
3599 | /// the stack. |
3600 | static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, |
3601 | unsigned PtrByteSize) { |
3602 | unsigned ArgSize = ArgVT.getStoreSize(); |
3603 | if (Flags.isByVal()) |
3604 | ArgSize = Flags.getByValSize(); |
3605 | |
3606 | // Round up to multiples of the pointer size, except for array members, |
3607 | // which are always packed. |
3608 | if (!Flags.isInConsecutiveRegs()) |
3609 | ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; |
3610 | |
3611 | return ArgSize; |
3612 | } |
3613 | |
3614 | /// CalculateStackSlotAlignment - Calculates the alignment of this argument |
3615 | /// on the stack. |
3616 | static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, |
3617 | ISD::ArgFlagsTy Flags, |
3618 | unsigned PtrByteSize) { |
3619 | Align Alignment(PtrByteSize); |
3620 | |
3621 | // Altivec parameters are padded to a 16 byte boundary. |
3622 | if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || |
3623 | ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || |
3624 | ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || |
3625 | ArgVT == MVT::v1i128 || ArgVT == MVT::f128) |
3626 | Alignment = Align(16); |
3627 | |
3628 | // ByVal parameters are aligned as requested. |
3629 | if (Flags.isByVal()) { |
3630 | auto BVAlign = Flags.getNonZeroByValAlign(); |
3631 | if (BVAlign > PtrByteSize) { |
3632 | if (BVAlign.value() % PtrByteSize != 0) |
3633 | llvm_unreachable(::llvm::llvm_unreachable_internal("ByVal alignment is not a multiple of the pointer size" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 3634) |
3634 | "ByVal alignment is not a multiple of the pointer size")::llvm::llvm_unreachable_internal("ByVal alignment is not a multiple of the pointer size" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 3634); |
3635 | |
3636 | Alignment = BVAlign; |
3637 | } |
3638 | } |
3639 | |
3640 | // Array members are always packed to their original alignment. |
3641 | if (Flags.isInConsecutiveRegs()) { |
3642 | // If the array member was split into multiple registers, the first |
3643 | // needs to be aligned to the size of the full type. (Except for |
3644 | // ppcf128, which is only aligned as its f64 components.) |
3645 | if (Flags.isSplit() && OrigVT != MVT::ppcf128) |
3646 | Alignment = Align(OrigVT.getStoreSize()); |
3647 | else |
3648 | Alignment = Align(ArgVT.getStoreSize()); |
3649 | } |
3650 | |
3651 | return Alignment; |
3652 | } |
3653 | |
3654 | /// CalculateStackSlotUsed - Return whether this argument will use its |
3655 | /// stack slot (instead of being passed in registers). ArgOffset, |
3656 | /// AvailableFPRs, and AvailableVRs must hold the current argument |
3657 | /// position, and will be updated to account for this argument. |
3658 | static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, |
3659 | unsigned PtrByteSize, unsigned LinkageSize, |
3660 | unsigned ParamAreaSize, unsigned &ArgOffset, |
3661 | unsigned &AvailableFPRs, |
3662 | unsigned &AvailableVRs) { |
3663 | bool UseMemory = false; |
3664 | |
3665 | // Respect alignment of argument on the stack. |
3666 | Align Alignment = |
3667 | CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); |
3668 | ArgOffset = alignTo(ArgOffset, Alignment); |
3669 | // If there's no space left in the argument save area, we must |
3670 | // use memory (this check also catches zero-sized arguments). |
3671 | if (ArgOffset >= LinkageSize + ParamAreaSize) |
3672 | UseMemory = true; |
3673 | |
3674 | // Allocate argument on the stack. |
3675 | ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); |
3676 | if (Flags.isInConsecutiveRegsLast()) |
3677 | ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; |
3678 | // If we overran the argument save area, we must use memory |
3679 | // (this check catches arguments passed partially in memory) |
3680 | if (ArgOffset > LinkageSize + ParamAreaSize) |
3681 | UseMemory = true; |
3682 | |
3683 | // However, if the argument is actually passed in an FPR or a VR, |
3684 | // we don't use memory after all. |
3685 | if (!Flags.isByVal()) { |
3686 | if (ArgVT == MVT::f32 || ArgVT == MVT::f64) |
3687 | if (AvailableFPRs > 0) { |
3688 | --AvailableFPRs; |
3689 | return false; |
3690 | } |
3691 | if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || |
3692 | ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || |
3693 | ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || |
3694 | ArgVT == MVT::v1i128 || ArgVT == MVT::f128) |
3695 | if (AvailableVRs > 0) { |
3696 | --AvailableVRs; |
3697 | return false; |
3698 | } |
3699 | } |
3700 | |
3701 | return UseMemory; |
3702 | } |
3703 | |
3704 | /// EnsureStackAlignment - Round stack frame size up from NumBytes to |
3705 | /// ensure minimum alignment required for target. |
3706 | static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, |
3707 | unsigned NumBytes) { |
3708 | return alignTo(NumBytes, Lowering->getStackAlign()); |
3709 | } |
3710 | |
3711 | SDValue PPCTargetLowering::LowerFormalArguments( |
3712 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
3713 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
3714 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
3715 | if (Subtarget.isAIXABI()) |
3716 | return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG, |
3717 | InVals); |
3718 | if (Subtarget.is64BitELFABI()) |
3719 | return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, |
3720 | InVals); |
3721 | assert(Subtarget.is32BitELFABI())((Subtarget.is32BitELFABI()) ? static_cast<void> (0) : __assert_fail ("Subtarget.is32BitELFABI()", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 3721, __PRETTY_FUNCTION__)); |
3722 | return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, |
3723 | InVals); |
3724 | } |
3725 | |
3726 | SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( |
3727 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
3728 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
3729 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
3730 | |
3731 | // 32-bit SVR4 ABI Stack Frame Layout: |
3732 | // +-----------------------------------+ |
3733 | // +--> | Back chain | |
3734 | // | +-----------------------------------+ |
3735 | // | | Floating-point register save area | |
3736 | // | +-----------------------------------+ |
3737 | // | | General register save area | |
3738 | // | +-----------------------------------+ |
3739 | // | | CR save word | |
3740 | // | +-----------------------------------+ |
3741 | // | | VRSAVE save word | |
3742 | // | +-----------------------------------+ |
3743 | // | | Alignment padding | |
3744 | // | +-----------------------------------+ |
3745 | // | | Vector register save area | |
3746 | // | +-----------------------------------+ |
3747 | // | | Local variable space | |
3748 | // | +-----------------------------------+ |
3749 | // | | Parameter list area | |
3750 | // | +-----------------------------------+ |
3751 | // | | LR save word | |
3752 | // | +-----------------------------------+ |
3753 | // SP--> +--- | Back chain | |
3754 | // +-----------------------------------+ |
3755 | // |
3756 | // Specifications: |
3757 | // System V Application Binary Interface PowerPC Processor Supplement |
3758 | // AltiVec Technology Programming Interface Manual |
3759 | |
3760 | MachineFunction &MF = DAG.getMachineFunction(); |
3761 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
3762 | PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); |
3763 | |
3764 | EVT PtrVT = getPointerTy(MF.getDataLayout()); |
3765 | // Potential tail calls could cause overwriting of argument stack slots. |
3766 | bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && |
3767 | (CallConv == CallingConv::Fast)); |
3768 | const Align PtrAlign(4); |
3769 | |
3770 | // Assign locations to all of the incoming arguments. |
3771 | SmallVector<CCValAssign, 16> ArgLocs; |
3772 | PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
3773 | *DAG.getContext()); |
3774 | |
3775 | // Reserve space for the linkage area on the stack. |
3776 | unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); |
3777 | CCInfo.AllocateStack(LinkageSize, PtrAlign); |
3778 | if (useSoftFloat()) |
3779 | CCInfo.PreAnalyzeFormalArguments(Ins); |
3780 | |
3781 | CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); |
3782 | CCInfo.clearWasPPCF128(); |
3783 | |
3784 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
3785 | CCValAssign &VA = ArgLocs[i]; |
3786 | |
3787 | // Arguments stored in registers. |
3788 | if (VA.isRegLoc()) { |
3789 | const TargetRegisterClass *RC; |
3790 | EVT ValVT = VA.getValVT(); |
3791 | |
3792 | switch (ValVT.getSimpleVT().SimpleTy) { |
3793 | default: |
3794 | llvm_unreachable("ValVT not supported by formal arguments Lowering")::llvm::llvm_unreachable_internal("ValVT not supported by formal arguments Lowering" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 3794); |
3795 | case MVT::i1: |
3796 | case MVT::i32: |
3797 | RC = &PPC::GPRCRegClass; |
3798 | break; |
3799 | case MVT::f32: |
3800 | if (Subtarget.hasP8Vector()) |
3801 | RC = &PPC::VSSRCRegClass; |
3802 | else if (Subtarget.hasSPE()) |
3803 | RC = &PPC::GPRCRegClass; |
3804 | else |
3805 | RC = &PPC::F4RCRegClass; |
3806 | break; |
3807 | case MVT::f64: |
3808 | if (Subtarget.hasVSX()) |
3809 | RC = &PPC::VSFRCRegClass; |
3810 | else if (Subtarget.hasSPE()) |
3811 | // SPE passes doubles in GPR pairs. |
3812 | RC = &PPC::GPRCRegClass; |
3813 | else |
3814 | RC = &PPC::F8RCRegClass; |
3815 | break; |
3816 | case MVT::v16i8: |
3817 | case MVT::v8i16: |
3818 | case MVT::v4i32: |
3819 | RC = &PPC::VRRCRegClass; |
3820 | break; |
3821 | case MVT::v4f32: |
3822 | RC = &PPC::VRRCRegClass; |
3823 | break; |
3824 | case MVT::v2f64: |
3825 | case MVT::v2i64: |
3826 | RC = &PPC::VRRCRegClass; |
3827 | break; |
3828 | } |
3829 | |
3830 | SDValue ArgValue; |
3831 | // Transform the arguments stored in physical registers into |
3832 | // virtual ones. |
3833 | if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) { |
3834 | assert(i + 1 < e && "No second half of double precision argument")((i + 1 < e && "No second half of double precision argument" ) ? static_cast<void> (0) : __assert_fail ("i + 1 < e && \"No second half of double precision argument\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 3834, __PRETTY_FUNCTION__)); |
3835 | unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC); |
3836 | unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC); |
3837 | SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32); |
3838 | SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32); |
3839 | if (!Subtarget.isLittleEndian()) |
3840 | std::swap (ArgValueLo, ArgValueHi); |
3841 | ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo, |
3842 | ArgValueHi); |
3843 | } else { |
3844 | unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); |
3845 | ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, |
3846 | ValVT == MVT::i1 ? MVT::i32 : ValVT); |
3847 | if (ValVT == MVT::i1) |
3848 | ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); |
3849 | } |
3850 | |
3851 | InVals.push_back(ArgValue); |
3852 | } else { |
3853 | // Argument stored in memory. |
3854 | assert(VA.isMemLoc())((VA.isMemLoc()) ? static_cast<void> (0) : __assert_fail ("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 3854, __PRETTY_FUNCTION__)); |
3855 | |
3856 | // Get the extended size of the argument type in stack |
3857 | unsigned ArgSize = VA.getLocVT().getStoreSize(); |
3858 | // Get the actual size of the argument type |
3859 | unsigned ObjSize = VA.getValVT().getStoreSize(); |
3860 | unsigned ArgOffset = VA.getLocMemOffset(); |
3861 | // Stack objects in PPC32 are right justified. |
3862 | ArgOffset += ArgSize - ObjSize; |
3863 | int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable); |
3864 | |
3865 | // Create load nodes to retrieve arguments from the stack. |
3866 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); |
3867 | InVals.push_back( |
3868 | DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); |
3869 | } |
3870 | } |
3871 | |
3872 | // Assign locations to all of the incoming aggregate by value arguments. |
3873 | // Aggregates passed by value are stored in the local variable space of the |
3874 | // caller's stack frame, right above the parameter list area. |
3875 | SmallVector<CCValAssign, 16> ByValArgLocs; |
3876 | CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), |
3877 | ByValArgLocs, *DAG.getContext()); |
3878 | |
3879 | // Reserve stack space for the allocations in CCInfo. |
3880 | CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign); |
3881 | |
3882 | CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); |
3883 | |
3884 | // Area that is at least reserved in the caller of this function. |
3885 | unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); |
3886 | MinReservedArea = std::max(MinReservedArea, LinkageSize); |
3887 | |
3888 | // Set the size that is at least reserved in caller of this function. Tail |
3889 | // call optimized function's reserved stack space needs to be aligned so that |
3890 | // taking the difference between two stack areas will result in an aligned |
3891 | // stack. |
3892 | MinReservedArea = |
3893 | EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); |
3894 | FuncInfo->setMinReservedArea(MinReservedArea); |
3895 | |
3896 | SmallVector<SDValue, 8> MemOps; |
3897 | |
3898 | // If the function takes variable number of arguments, make a frame index for |
3899 | // the start of the first vararg value... for expansion of llvm.va_start. |
3900 | if (isVarArg) { |
3901 | static const MCPhysReg GPArgRegs[] = { |
3902 | PPC::R3, PPC::R4, PPC::R5, PPC::R6, |
3903 | PPC::R7, PPC::R8, PPC::R9, PPC::R10, |
3904 | }; |
3905 | const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); |
3906 | |
3907 | static const MCPhysReg FPArgRegs[] = { |
3908 | PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, |
3909 | PPC::F8 |
3910 | }; |
3911 | unsigned NumFPArgRegs = array_lengthof(FPArgRegs); |
3912 | |
3913 | if (useSoftFloat() || hasSPE()) |
3914 | NumFPArgRegs = 0; |
3915 | |
3916 | FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); |
3917 | FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); |
3918 | |
3919 | // Make room for NumGPArgRegs and NumFPArgRegs. |
3920 | int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + |
3921 | NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; |
3922 | |
3923 | FuncInfo->setVarArgsStackOffset( |
3924 | MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, |
3925 | CCInfo.getNextStackOffset(), true)); |
3926 | |
3927 | FuncInfo->setVarArgsFrameIndex( |
3928 | MFI.CreateStackObject(Depth, Align(8), false)); |
3929 | SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); |
3930 | |
3931 | // The fixed integer arguments of a variadic function are stored to the |
3932 | // VarArgsFrameIndex on the stack so that they may be loaded by |
3933 | // dereferencing the result of va_next. |
3934 | for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { |
3935 | // Get an existing live-in vreg, or add a new one. |
3936 | unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); |
3937 | if (!VReg) |
3938 | VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); |
3939 | |
3940 | SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); |
3941 | SDValue Store = |
3942 | DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); |
3943 | MemOps.push_back(Store); |
3944 | // Increment the address by four for the next argument to store |
3945 | SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); |
3946 | FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); |
3947 | } |
3948 | |
3949 | // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 |
3950 | // is set. |
3951 | // The double arguments are stored to the VarArgsFrameIndex |
3952 | // on the stack. |
3953 | for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { |
3954 | // Get an existing live-in vreg, or add a new one. |
3955 | unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); |
3956 | if (!VReg) |
3957 | VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); |
3958 | |
3959 | SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); |
3960 | SDValue Store = |
3961 | DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); |
3962 | MemOps.push_back(Store); |
3963 | // Increment the address by eight for the next argument to store |
3964 | SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, |
3965 | PtrVT); |
3966 | FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); |
3967 | } |
3968 | } |
3969 | |
3970 | if (!MemOps.empty()) |
3971 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); |
3972 | |
3973 | return Chain; |
3974 | } |
3975 | |
3976 | // PPC64 passes i8, i16, and i32 values in i64 registers. Promote |
3977 | // value to MVT::i64 and then truncate to the correct register size. |
3978 | SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, |
3979 | EVT ObjectVT, SelectionDAG &DAG, |
3980 | SDValue ArgVal, |
3981 | const SDLoc &dl) const { |
3982 | if (Flags.isSExt()) |
3983 | ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, |
3984 | DAG.getValueType(ObjectVT)); |
3985 | else if (Flags.isZExt()) |
3986 | ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, |
3987 | DAG.getValueType(ObjectVT)); |
3988 | |
3989 | return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); |
3990 | } |
3991 | |
3992 | SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( |
3993 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
3994 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
3995 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
3996 | // TODO: add description of PPC stack frame format, or at least some docs. |
3997 | // |
3998 | bool isELFv2ABI = Subtarget.isELFv2ABI(); |
3999 | bool isLittleEndian = Subtarget.isLittleEndian(); |
4000 | MachineFunction &MF = DAG.getMachineFunction(); |
4001 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
4002 | PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); |
4003 | |
4004 | assert(!(CallConv == CallingConv::Fast && isVarArg) &&((!(CallConv == CallingConv::Fast && isVarArg) && "fastcc not supported on varargs functions") ? static_cast< void> (0) : __assert_fail ("!(CallConv == CallingConv::Fast && isVarArg) && \"fastcc not supported on varargs functions\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 4005, __PRETTY_FUNCTION__)) |
4005 | "fastcc not supported on varargs functions")((!(CallConv == CallingConv::Fast && isVarArg) && "fastcc not supported on varargs functions") ? static_cast< void> (0) : __assert_fail ("!(CallConv == CallingConv::Fast && isVarArg) && \"fastcc not supported on varargs functions\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 4005, __PRETTY_FUNCTION__)); |
4006 | |
4007 | EVT PtrVT = getPointerTy(MF.getDataLayout()); |
4008 | // Potential tail calls could cause overwriting of argument stack slots. |
4009 | bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && |
4010 | (CallConv == CallingConv::Fast)); |
4011 | unsigned PtrByteSize = 8; |
4012 | unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); |
4013 | |
4014 | static const MCPhysReg GPR[] = { |
4015 | PPC::X3, PPC::X4, PPC::X5, PPC::X6, |
4016 | PPC::X7, PPC::X8, PPC::X9, PPC::X10, |
4017 | }; |
4018 | static const MCPhysReg VR[] = { |
4019 | PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, |
4020 | PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 |
4021 | }; |
4022 | |
4023 | const unsigned Num_GPR_Regs = array_lengthof(GPR); |
4024 | const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; |
4025 | const unsigned Num_VR_Regs = array_lengthof(VR); |
4026 | |
4027 | // Do a first pass over the arguments to determine whether the ABI |
4028 | // guarantees that our caller has allocated the parameter save area |
4029 | // on its stack frame. In the ELFv1 ABI, this is always the case; |
4030 | // in the ELFv2 ABI, it is true if this is a vararg function or if |
4031 | // any parameter is located in a stack slot. |
4032 | |
4033 | bool HasParameterArea = !isELFv2ABI || isVarArg; |
4034 | unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; |
4035 | unsigned NumBytes = LinkageSize; |
4036 | unsigned AvailableFPRs = Num_FPR_Regs; |
4037 | unsigned AvailableVRs = Num_VR_Regs; |
4038 | for (unsigned i = 0, e = Ins.size(); i != e; ++i) { |
4039 | if (Ins[i].Flags.isNest()) |
4040 | continue; |
4041 | |
4042 | if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, |
4043 | PtrByteSize, LinkageSize, ParamAreaSize, |
4044 | NumBytes, AvailableFPRs, AvailableVRs)) |
4045 | HasParameterArea = true; |
4046 | } |
4047 | |
4048 | // Add DAG nodes to load the arguments or copy them out of registers. On |
4049 | // entry to a function on PPC, the arguments start after the linkage area, |
4050 | // although the first ones are often in registers. |
4051 | |
4052 | unsigned ArgOffset = LinkageSize; |
4053 | unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; |
4054 | SmallVector<SDValue, 8> MemOps; |
4055 | Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); |
4056 | unsigned CurArgIdx = 0; |
4057 | for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { |
4058 | SDValue ArgVal; |
4059 | bool needsLoad = false; |
4060 | EVT ObjectVT = Ins[ArgNo].VT; |
4061 | EVT OrigVT = Ins[ArgNo].ArgVT; |
4062 | unsigned ObjSize = ObjectVT.getStoreSize(); |
4063 | unsigned ArgSize = ObjSize; |
4064 | ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; |
4065 | if (Ins[ArgNo].isOrigArg()) { |
4066 | std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); |
4067 | CurArgIdx = Ins[ArgNo].getOrigArgIndex(); |
4068 | } |
4069 | // We re-align the argument offset for each argument, except when using the |
4070 | // fast calling convention, when we need to make sure we do that only when |
4071 | // we'll actually use a stack slot. |
4072 | unsigned CurArgOffset; |
4073 | Align Alignment; |
4074 | auto ComputeArgOffset = [&]() { |
4075 | /* Respect alignment of argument on the stack. */ |
4076 | Alignment = |
4077 | CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); |
4078 | ArgOffset = alignTo(ArgOffset, Alignment); |
4079 | CurArgOffset = ArgOffset; |
4080 | }; |
4081 | |
4082 | if (CallConv != CallingConv::Fast) { |
4083 | ComputeArgOffset(); |
4084 | |
4085 | /* Compute GPR index associated with argument offset. */ |
4086 | GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; |
4087 | GPR_idx = std::min(GPR_idx, Num_GPR_Regs); |
4088 | } |
4089 | |
4090 | // FIXME the codegen can be much improved in some cases. |
4091 | // We do not have to keep everything in memory. |
4092 | if (Flags.isByVal()) { |
4093 | assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit")((Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit" ) ? static_cast<void> (0) : __assert_fail ("Ins[ArgNo].isOrigArg() && \"Byval arguments cannot be implicit\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 4093, __PRETTY_FUNCTION__)); |
4094 | |
4095 | if (CallConv == CallingConv::Fast) |
4096 | ComputeArgOffset(); |
4097 | |
4098 | // ObjSize is the true size, ArgSize rounded up to multiple of registers. |
4099 | ObjSize = Flags.getByValSize(); |
4100 | ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; |
4101 | // Empty aggregate parameters do not take up registers. Examples: |
4102 | // struct { } a; |
4103 | // union { } b; |
4104 | // int c[0]; |
4105 | // etc. However, we have to provide a place-holder in InVals, so |
4106 | // pretend we have an 8-byte item at the current address for that |
4107 | // purpose. |
4108 | if (!ObjSize) { |
4109 | int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); |
4110 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); |
4111 | InVals.push_back(FIN); |
4112 | continue; |
4113 | } |
4114 | |
4115 | // Create a stack object covering all stack doublewords occupied |
4116 | // by the argument. If the argument is (fully or partially) on |
4117 | // the stack, or if the argument is fully in registers but the |
4118 | // caller has allocated the parameter save anyway, we can refer |
4119 | // directly to the caller's stack frame. Otherwise, create a |
4120 | // local copy in our own frame. |
4121 | int FI; |
4122 | if (HasParameterArea || |
4123 | ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) |
4124 | FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); |
4125 | else |
4126 | FI = MFI.CreateStackObject(ArgSize, Alignment, false); |
4127 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); |
4128 | |
4129 | // Handle aggregates smaller than 8 bytes. |
4130 | if (ObjSize < PtrByteSize) { |
4131 | // The value of the object is its address, which differs from the |
4132 | // address of the enclosing doubleword on big-endian systems. |
4133 | SDValue Arg = FIN; |
4134 | if (!isLittleEndian) { |
4135 | SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); |
4136 | Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); |
4137 | } |
4138 | InVals.push_back(Arg); |
4139 | |
4140 | if (GPR_idx != Num_GPR_Regs) { |
4141 | unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); |
4142 | FuncInfo->addLiveInAttr(VReg, Flags); |
4143 | SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); |
4144 | SDValue Store; |
4145 | |
4146 | if (ObjSize==1 || ObjSize==2 || ObjSize==4) { |
4147 | EVT ObjType = (ObjSize == 1 ? MVT::i8 : |
4148 | (ObjSize == 2 ? MVT::i16 : MVT::i32)); |
4149 | Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, |
4150 | MachinePointerInfo(&*FuncArg), ObjType); |
4151 | } else { |
4152 | // For sizes that don't fit a truncating store (3, 5, 6, 7), |
4153 | // store the whole register as-is to the parameter save area |
4154 | // slot. |
4155 | Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, |
4156 | MachinePointerInfo(&*FuncArg)); |
4157 | } |
4158 | |
4159 | MemOps.push_back(Store); |
4160 | } |
4161 | // Whether we copied from a register or not, advance the offset |
4162 | // into the parameter save area by a full doubleword. |
4163 | ArgOffset += PtrByteSize; |
4164 | continue; |
4165 | } |
4166 | |
4167 | // The value of the object is its address, which is the address of |
4168 | // its first stack doubleword. |
4169 | InVals.push_back(FIN); |
4170 | |
4171 | // Store whatever pieces of the object are in registers to memory. |
4172 | for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { |
4173 | if (GPR_idx == Num_GPR_Regs) |
4174 | break; |
4175 | |
4176 | unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); |
4177 | FuncInfo->addLiveInAttr(VReg, Flags); |
4178 | SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); |
4179 | SDValue Addr = FIN; |
4180 | if (j) { |
4181 | SDValue Off = DAG.getConstant(j, dl, PtrVT); |
4182 | Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); |
4183 | } |
4184 | SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, |
4185 | MachinePointerInfo(&*FuncArg, j)); |
4186 | MemOps.push_back(Store); |
4187 | ++GPR_idx; |
4188 | } |
4189 | ArgOffset += ArgSize; |
4190 | continue; |
4191 | } |
4192 | |
4193 | switch (ObjectVT.getSimpleVT().SimpleTy) { |
4194 | default: llvm_unreachable("Unhandled argument type!")::llvm::llvm_unreachable_internal("Unhandled argument type!", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 4194); |
4195 | case MVT::i1: |
4196 | case MVT::i32: |
4197 | case MVT::i64: |
4198 | if (Flags.isNest()) { |
4199 | // The 'nest' parameter, if any, is passed in R11. |
4200 | unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); |
4201 | ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); |
4202 | |
4203 | if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) |
4204 | ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); |
4205 | |
4206 | break; |
4207 | } |
4208 | |
4209 | // These can be scalar arguments or elements of an integer array type |
4210 | // passed directly. Clang may use those instead of "byval" aggregate |
4211 | // types to avoid forcing arguments to memory unnecessarily. |
4212 | if (GPR_idx != Num_GPR_Regs) { |
4213 | unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); |
4214 | FuncInfo->addLiveInAttr(VReg, Flags); |
4215 | ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); |
4216 | |
4217 | if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) |
4218 | // PPC64 passes i8, i16, and i32 values in i64 registers. Promote |
4219 | // value to MVT::i64 and then truncate to the correct register size. |
4220 | ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); |
4221 | } else { |
4222 | if (CallConv == CallingConv::Fast) |
4223 | ComputeArgOffset(); |
4224 | |
4225 | needsLoad = true; |
4226 | ArgSize = PtrByteSize; |
4227 | } |
4228 | if (CallConv != CallingConv::Fast || needsLoad) |
4229 | ArgOffset += 8; |
4230 | break; |
4231 | |
4232 | case MVT::f32: |
4233 | case MVT::f64: |
4234 | // These can be scalar arguments or elements of a float array type |
4235 | // passed directly. The latter are used to implement ELFv2 homogenous |
4236 | // float aggregates. |
4237 | if (FPR_idx != Num_FPR_Regs) { |
4238 | unsigned VReg; |
4239 | |
4240 | if (ObjectVT == MVT::f32) |
4241 | VReg = MF.addLiveIn(FPR[FPR_idx], |
4242 | Subtarget.hasP8Vector() |
4243 | ? &PPC::VSSRCRegClass |
4244 | : &PPC::F4RCRegClass); |
4245 | else |
4246 | VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() |
4247 | ? &PPC::VSFRCRegClass |
4248 | : &PPC::F8RCRegClass); |
4249 | |
4250 | ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); |
4251 | ++FPR_idx; |
4252 | } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { |
4253 | // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 |
4254 | // once we support fp <-> gpr moves. |
4255 | |
4256 | // This can only ever happen in the presence of f32 array types, |
4257 | // since otherwise we never run out of FPRs before running out |
4258 | // of GPRs. |
4259 | unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); |
4260 | FuncInfo->addLiveInAttr(VReg, Flags); |
4261 | ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); |
4262 | |
4263 | if (ObjectVT == MVT::f32) { |
4264 | if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) |
4265 | ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, |
4266 | DAG.getConstant(32, dl, MVT::i32)); |
4267 | ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); |
4268 | } |
4269 | |
4270 | ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); |
4271 | } else { |
4272 | if (CallConv == CallingConv::Fast) |
4273 | ComputeArgOffset(); |
4274 | |
4275 | needsLoad = true; |
4276 | } |
4277 | |
4278 | // When passing an array of floats, the array occupies consecutive |
4279 | // space in the argument area; only round up to the next doubleword |
4280 | // at the end of the array. Otherwise, each float takes 8 bytes. |
4281 | if (CallConv != CallingConv::Fast || needsLoad) { |
4282 | ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; |
4283 | ArgOffset += ArgSize; |
4284 | if (Flags.isInConsecutiveRegsLast()) |
4285 | ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; |
4286 | } |
4287 | break; |
4288 | case MVT::v4f32: |
4289 | case MVT::v4i32: |
4290 | case MVT::v8i16: |
4291 | case MVT::v16i8: |
4292 | case MVT::v2f64: |
4293 | case MVT::v2i64: |
4294 | case MVT::v1i128: |
4295 | case MVT::f128: |
4296 | // These can be scalar arguments or elements of a vector array type |
4297 | // passed directly. The latter are used to implement ELFv2 homogenous |
4298 | // vector aggregates. |
4299 | if (VR_idx != Num_VR_Regs) { |
4300 | unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); |
4301 | ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); |
4302 | ++VR_idx; |
4303 | } else { |
4304 | if (CallConv == CallingConv::Fast) |
4305 | ComputeArgOffset(); |
4306 | needsLoad = true; |
4307 | } |
4308 | if (CallConv != CallingConv::Fast || needsLoad) |
4309 | ArgOffset += 16; |
4310 | break; |
4311 | } |
4312 | |
4313 | // We need to load the argument to a virtual register if we determined |
4314 | // above that we ran out of physical registers of the appropriate type. |
4315 | if (needsLoad) { |
4316 | if (ObjSize < ArgSize && !isLittleEndian) |
4317 | CurArgOffset += ArgSize - ObjSize; |
4318 | int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); |
4319 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); |
4320 | ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); |
4321 | } |
4322 | |
4323 | InVals.push_back(ArgVal); |
4324 | } |
4325 | |
4326 | // Area that is at least reserved in the caller of this function. |
4327 | unsigned MinReservedArea; |
4328 | if (HasParameterArea) |
4329 | MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); |
4330 | else |
4331 | MinReservedArea = LinkageSize; |
4332 | |
4333 | // Set the size that is at least reserved in caller of this function. Tail |
4334 | // call optimized functions' reserved stack space needs to be aligned so that |
4335 | // taking the difference between two stack areas will result in an aligned |
4336 | // stack. |
4337 | MinReservedArea = |
4338 | EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); |
4339 | FuncInfo->setMinReservedArea(MinReservedArea); |
4340 | |
4341 | // If the function takes variable number of arguments, make a frame index for |
4342 | // the start of the first vararg value... for expansion of llvm.va_start. |
4343 | // On ELFv2ABI spec, it writes: |
4344 | // C programs that are intended to be *portable* across different compilers |
4345 | // and architectures must use the header file <stdarg.h> to deal with variable |
4346 | // argument lists. |
4347 | if (isVarArg && MFI.hasVAStart()) { |
4348 | int Depth = ArgOffset; |
4349 | |
4350 | FuncInfo->setVarArgsFrameIndex( |
4351 | MFI.CreateFixedObject(PtrByteSize, Depth, true)); |
4352 | SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); |
4353 | |
4354 | // If this function is vararg, store any remaining integer argument regs |
4355 | // to their spots on the stack so that they may be loaded by dereferencing |
4356 | // the result of va_next. |
4357 | for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; |
4358 | GPR_idx < Num_GPR_Regs; ++GPR_idx) { |
4359 | unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); |
4360 | SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); |
4361 | SDValue Store = |
4362 | DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); |
4363 | MemOps.push_back(Store); |
4364 | // Increment the address by four for the next argument to store |
4365 | SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); |
4366 | FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); |
4367 | } |
4368 | } |
4369 | |
4370 | if (!MemOps.empty()) |
4371 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); |
4372 | |
4373 | return Chain; |
4374 | } |
4375 | |
4376 | /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be |
4377 | /// adjusted to accommodate the arguments for the tailcall. |
4378 | static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, |
4379 | unsigned ParamSize) { |
4380 | |
4381 | if (!isTailCall) return 0; |
4382 | |
4383 | PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); |
4384 | unsigned CallerMinReservedArea = FI->getMinReservedArea(); |
4385 | int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; |
4386 | // Remember only if the new adjustment is bigger. |
4387 | if (SPDiff < FI->getTailCallSPDelta()) |
4388 | FI->setTailCallSPDelta(SPDiff); |
4389 | |
4390 | return SPDiff; |
4391 | } |
4392 | |
4393 | static bool isFunctionGlobalAddress(SDValue Callee); |
4394 | |
4395 | static bool callsShareTOCBase(const Function *Caller, SDValue Callee, |
4396 | const TargetMachine &TM) { |
4397 | // It does not make sense to call callsShareTOCBase() with a caller that |
4398 | // is PC Relative since PC Relative callers do not have a TOC. |
4399 | #ifndef NDEBUG |
4400 | const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller); |
4401 | assert(!STICaller->isUsingPCRelativeCalls() &&((!STICaller->isUsingPCRelativeCalls() && "PC Relative callers do not have a TOC and cannot share a TOC Base" ) ? static_cast<void> (0) : __assert_fail ("!STICaller->isUsingPCRelativeCalls() && \"PC Relative callers do not have a TOC and cannot share a TOC Base\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 4402, __PRETTY_FUNCTION__)) |
4402 | "PC Relative callers do not have a TOC and cannot share a TOC Base")((!STICaller->isUsingPCRelativeCalls() && "PC Relative callers do not have a TOC and cannot share a TOC Base" ) ? static_cast<void> (0) : __assert_fail ("!STICaller->isUsingPCRelativeCalls() && \"PC Relative callers do not have a TOC and cannot share a TOC Base\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 4402, __PRETTY_FUNCTION__)); |
4403 | #endif |
4404 | |
4405 | // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols |
4406 | // don't have enough information to determine if the caller and callee share |
4407 | // the same TOC base, so we have to pessimistically assume they don't for |
4408 | // correctness. |
4409 | GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); |
4410 | if (!G) |
4411 | return false; |
4412 | |
4413 | const GlobalValue *GV = G->getGlobal(); |
4414 | |
4415 | // If the callee is preemptable, then the static linker will use a plt-stub |
4416 | // which saves the toc to the stack, and needs a nop after the call |
4417 | // instruction to convert to a toc-restore. |
4418 | if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV)) |
4419 | return false; |
4420 | |
4421 | // Functions with PC Relative enabled may clobber the TOC in the same DSO. |
4422 | // We may need a TOC restore in the situation where the caller requires a |
4423 | // valid TOC but the callee is PC Relative and does not. |
4424 | const Function *F = dyn_cast<Function>(GV); |
4425 | const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV); |
4426 | |
4427 | // If we have an Alias we can try to get the function from there. |
4428 | if (Alias) { |
4429 | const GlobalObject *GlobalObj = Alias->getBaseObject(); |
4430 | F = dyn_cast<Function>(GlobalObj); |
4431 | } |
4432 | |
4433 | // If we still have no valid function pointer we do not have enough |
4434 | // information to determine if the callee uses PC Relative calls so we must |
4435 | // assume that it does. |
4436 | if (!F) |
4437 | return false; |
4438 | |
4439 | // If the callee uses PC Relative we cannot guarantee that the callee won't |
4440 | // clobber the TOC of the caller and so we must assume that the two |
4441 | // functions do not share a TOC base. |
4442 | const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F); |
4443 | if (STICallee->isUsingPCRelativeCalls()) |
4444 | return false; |
4445 | |
4446 | // If the GV is not a strong definition then we need to assume it can be |
4447 | // replaced by another function at link time. The function that replaces |
4448 | // it may not share the same TOC as the caller since the callee may be |
4449 | // replaced by a PC Relative version of the same function. |
4450 | if (!GV->isStrongDefinitionForLinker()) |
4451 | return false; |
4452 | |
4453 | // The medium and large code models are expected to provide a sufficiently |
4454 | // large TOC to provide all data addressing needs of a module with a |
4455 | // single TOC. |
4456 | if (CodeModel::Medium == TM.getCodeModel() || |
4457 | CodeModel::Large == TM.getCodeModel()) |
4458 | return true; |
4459 | |
4460 | // Any explicitly-specified sections and section prefixes must also match. |
4461 | // Also, if we're using -ffunction-sections, then each function is always in |
4462 | // a different section (the same is true for COMDAT functions). |
4463 | if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() || |
4464 | GV->getSection() != Caller->getSection()) |
4465 | return false; |
4466 | if (const auto *F = dyn_cast<Function>(GV)) { |
4467 | if (F->getSectionPrefix() != Caller->getSectionPrefix()) |
4468 | return false; |
4469 | } |
4470 | |
4471 | return true; |
4472 | } |
4473 | |
4474 | static bool |
4475 | needStackSlotPassParameters(const PPCSubtarget &Subtarget, |
4476 | const SmallVectorImpl<ISD::OutputArg> &Outs) { |
4477 | assert(Subtarget.is64BitELFABI())((Subtarget.is64BitELFABI()) ? static_cast<void> (0) : __assert_fail ("Subtarget.is64BitELFABI()", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 4477, __PRETTY_FUNCTION__)); |
4478 | |
4479 | const unsigned PtrByteSize = 8; |
4480 | const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); |
4481 | |
4482 | static const MCPhysReg GPR[] = { |
4483 | PPC::X3, PPC::X4, PPC::X5, PPC::X6, |
4484 | PPC::X7, PPC::X8, PPC::X9, PPC::X10, |
4485 | }; |
4486 | static const MCPhysReg VR[] = { |
4487 | PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, |
4488 | PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 |
4489 | }; |
4490 | |
4491 | const unsigned NumGPRs = array_lengthof(GPR); |
4492 | const unsigned NumFPRs = 13; |
4493 | const unsigned NumVRs = array_lengthof(VR); |
4494 | const unsigned ParamAreaSize = NumGPRs * PtrByteSize; |
4495 | |
4496 | unsigned NumBytes = LinkageSize; |
4497 | unsigned AvailableFPRs = NumFPRs; |
4498 | unsigned AvailableVRs = NumVRs; |
4499 | |
4500 | for (const ISD::OutputArg& Param : Outs) { |
4501 | if (Param.Flags.isNest()) continue; |
4502 | |
4503 | if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, PtrByteSize, |
4504 | LinkageSize, ParamAreaSize, NumBytes, |
4505 | AvailableFPRs, AvailableVRs)) |
4506 | return true; |
4507 | } |
4508 | return false; |
4509 | } |
4510 | |
4511 | static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) { |
4512 | if (CB.arg_size() != CallerFn->arg_size()) |
4513 | return false; |
4514 | |
4515 | auto CalleeArgIter = CB.arg_begin(); |
4516 | auto CalleeArgEnd = CB.arg_end(); |
4517 | Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); |
4518 | |
4519 | for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { |
4520 | const Value* CalleeArg = *CalleeArgIter; |
4521 | const Value* CallerArg = &(*CallerArgIter); |
4522 | if (CalleeArg == CallerArg) |
4523 | continue; |
4524 | |
4525 | // e.g. @caller([4 x i64] %a, [4 x i64] %b) { |
4526 | // tail call @callee([4 x i64] undef, [4 x i64] %b) |
4527 | // } |
4528 | // 1st argument of callee is undef and has the same type as caller. |
4529 | if (CalleeArg->getType() == CallerArg->getType() && |
4530 | isa<UndefValue>(CalleeArg)) |
4531 | continue; |
4532 | |
4533 | return false; |
4534 | } |
4535 | |
4536 | return true; |
4537 | } |
4538 | |
4539 | // Returns true if TCO is possible between the callers and callees |
4540 | // calling conventions. |
4541 | static bool |
4542 | areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, |
4543 | CallingConv::ID CalleeCC) { |
4544 | // Tail calls are possible with fastcc and ccc. |
4545 | auto isTailCallableCC = [] (CallingConv::ID CC){ |
4546 | return CC == CallingConv::C || CC == CallingConv::Fast; |
4547 | }; |
4548 | if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC)) |
4549 | return false; |
4550 | |
4551 | // We can safely tail call both fastcc and ccc callees from a c calling |
4552 | // convention caller. If the caller is fastcc, we may have less stack space |
4553 | // than a non-fastcc caller with the same signature so disable tail-calls in |
4554 | // that case. |
4555 | return CallerCC == CallingConv::C || CallerCC == CalleeCC; |
4556 | } |
4557 | |
4558 | bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( |
4559 | SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg, |
4560 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
4561 | const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { |
4562 | bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; |
4563 | |
4564 | if (DisableSCO && !TailCallOpt) return false; |
4565 | |
4566 | // Variadic argument functions are not supported. |
4567 | if (isVarArg) return false; |
4568 | |
4569 | auto &Caller = DAG.getMachineFunction().getFunction(); |
4570 | // Check that the calling conventions are compatible for tco. |
4571 | if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC)) |
4572 | return false; |
4573 | |
4574 | // Caller contains any byval parameter is not supported. |
4575 | if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) |
4576 | return false; |
4577 | |
4578 | // Callee contains any byval parameter is not supported, too. |
4579 | // Note: This is a quick work around, because in some cases, e.g. |
4580 | // caller's stack size > callee's stack size, we are still able to apply |
4581 | // sibling call optimization. For example, gcc is able to do SCO for caller1 |
4582 | // in the following example, but not for caller2. |
4583 | // struct test { |
4584 | // long int a; |
4585 | // char ary[56]; |
4586 | // } gTest; |
4587 | // __attribute__((noinline)) int callee(struct test v, struct test *b) { |
4588 | // b->a = v.a; |
4589 | // return 0; |
4590 | // } |
4591 | // void caller1(struct test a, struct test c, struct test *b) { |
4592 | // callee(gTest, b); } |
4593 | // void caller2(struct test *b) { callee(gTest, b); } |
4594 | if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) |
4595 | return false; |
4596 | |
4597 | // If callee and caller use different calling conventions, we cannot pass |
4598 | // parameters on stack since offsets for the parameter area may be different. |
4599 | if (Caller.getCallingConv() != CalleeCC && |
4600 | needStackSlotPassParameters(Subtarget, Outs)) |
4601 | return false; |
4602 | |
4603 | // All variants of 64-bit ELF ABIs without PC-Relative addressing require that |
4604 | // the caller and callee share the same TOC for TCO/SCO. If the caller and |
4605 | // callee potentially have different TOC bases then we cannot tail call since |
4606 | // we need to restore the TOC pointer after the call. |
4607 | // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 |
4608 | // We cannot guarantee this for indirect calls or calls to external functions. |
4609 | // When PC-Relative addressing is used, the concept of the TOC is no longer |
4610 | // applicable so this check is not required. |
4611 | // Check first for indirect calls. |
4612 | if (!Subtarget.isUsingPCRelativeCalls() && |
4613 | !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee)) |
4614 | return false; |
4615 | |
4616 | // Check if we share the TOC base. |
4617 | if (!Subtarget.isUsingPCRelativeCalls() && |
4618 | !callsShareTOCBase(&Caller, Callee, getTargetMachine())) |
4619 | return false; |
4620 | |
4621 | // TCO allows altering callee ABI, so we don't have to check further. |
4622 | if (CalleeCC == CallingConv::Fast && TailCallOpt) |
4623 | return true; |
4624 | |
4625 | if (DisableSCO) return false; |
4626 | |
4627 | // If callee use the same argument list that caller is using, then we can |
4628 | // apply SCO on this case. If it is not, then we need to check if callee needs |
4629 | // stack for passing arguments. |
4630 | // PC Relative tail calls may not have a CallBase. |
4631 | // If there is no CallBase we cannot verify if we have the same argument |
4632 | // list so assume that we don't have the same argument list. |
4633 | if (CB && !hasSameArgumentList(&Caller, *CB) && |
4634 | needStackSlotPassParameters(Subtarget, Outs)) |
4635 | return false; |
4636 | else if (!CB && needStackSlotPassParameters(Subtarget, Outs)) |
4637 | return false; |
4638 | |
4639 | return true; |
4640 | } |
4641 | |
4642 | /// IsEligibleForTailCallOptimization - Check whether the call is eligible |
4643 | /// for tail call optimization. Targets which want to do tail call |
4644 | /// optimization should implement this function. |
4645 | bool |
4646 | PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, |
4647 | CallingConv::ID CalleeCC, |
4648 | bool isVarArg, |
4649 | const SmallVectorImpl<ISD::InputArg> &Ins, |
4650 | SelectionDAG& DAG) const { |
4651 | if (!getTargetMachine().Options.GuaranteedTailCallOpt) |
4652 | return false; |
4653 | |
4654 | // Variable argument functions are not supported. |
4655 | if (isVarArg) |
4656 | return false; |
4657 | |
4658 | MachineFunction &MF = DAG.getMachineFunction(); |
4659 | CallingConv::ID CallerCC = MF.getFunction().getCallingConv(); |
4660 | if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { |
4661 | // Functions containing by val parameters are not supported. |
4662 | for (unsigned i = 0; i != Ins.size(); i++) { |
4663 | ISD::ArgFlagsTy Flags = Ins[i].Flags; |
4664 | if (Flags.isByVal()) return false; |
4665 | } |
4666 | |
4667 | // Non-PIC/GOT tail calls are supported. |
4668 | if (getTargetMachine().getRelocationModel() != Reloc::PIC_) |
4669 | return true; |
4670 | |
4671 | // At the moment we can only do local tail calls (in same module, hidden |
4672 | // or protected) if we are generating PIC. |
4673 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) |
4674 | return G->getGlobal()->hasHiddenVisibility() |
4675 | || G->getGlobal()->hasProtectedVisibility(); |
4676 | } |
4677 | |
4678 | return false; |
4679 | } |
4680 | |
4681 | /// isCallCompatibleAddress - Return the immediate to use if the specified |
4682 | /// 32-bit value is representable in the immediate field of a BxA instruction. |
4683 | static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { |
4684 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); |
4685 | if (!C) return nullptr; |
4686 | |
4687 | int Addr = C->getZExtValue(); |
4688 | if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. |
4689 | SignExtend32<26>(Addr) != Addr) |
4690 | return nullptr; // Top 6 bits have to be sext of immediate. |
4691 | |
4692 | return DAG |
4693 | .getConstant( |
4694 | (int)C->getZExtValue() >> 2, SDLoc(Op), |
4695 | DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) |
4696 | .getNode(); |
4697 | } |
4698 | |
4699 | namespace { |
4700 | |
4701 | struct TailCallArgumentInfo { |
4702 | SDValue Arg; |
4703 | SDValue FrameIdxOp; |
4704 | int FrameIdx = 0; |
4705 | |
4706 | TailCallArgumentInfo() = default; |
4707 | }; |
4708 | |
4709 | } // end anonymous namespace |
4710 | |
4711 | /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. |
4712 | static void StoreTailCallArgumentsToStackSlot( |
4713 | SelectionDAG &DAG, SDValue Chain, |
4714 | const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, |
4715 | SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { |
4716 | for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { |
4717 | SDValue Arg = TailCallArgs[i].Arg; |
4718 | SDValue FIN = TailCallArgs[i].FrameIdxOp; |
4719 | int FI = TailCallArgs[i].FrameIdx; |
4720 | // Store relative to framepointer. |
4721 | MemOpChains.push_back(DAG.getStore( |
4722 | Chain, dl, Arg, FIN, |
4723 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); |
4724 | } |
4725 | } |
4726 | |
4727 | /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to |
4728 | /// the appropriate stack slot for the tail call optimized function call. |
4729 | static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, |
4730 | SDValue OldRetAddr, SDValue OldFP, |
4731 | int SPDiff, const SDLoc &dl) { |
4732 | if (SPDiff) { |
4733 | // Calculate the new stack slot for the return address. |
4734 | MachineFunction &MF = DAG.getMachineFunction(); |
4735 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
4736 | const PPCFrameLowering *FL = Subtarget.getFrameLowering(); |
4737 | bool isPPC64 = Subtarget.isPPC64(); |
4738 | int SlotSize = isPPC64 ? 8 : 4; |
4739 | int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); |
4740 | int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, |
4741 | NewRetAddrLoc, true); |
4742 | EVT VT = isPPC64 ? MVT::i64 : MVT::i32; |
4743 | SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); |
4744 | Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, |
4745 | MachinePointerInfo::getFixedStack(MF, NewRetAddr)); |
4746 | } |
4747 | return Chain; |
4748 | } |
4749 | |
4750 | /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate |
4751 | /// the position of the argument. |
4752 | static void |
4753 | CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, |
4754 | SDValue Arg, int SPDiff, unsigned ArgOffset, |
4755 | SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { |
4756 | int Offset = ArgOffset + SPDiff; |
4757 | uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; |
4758 | int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); |
4759 | EVT VT = isPPC64 ? MVT::i64 : MVT::i32; |
4760 | SDValue FIN = DAG.getFrameIndex(FI, VT); |
4761 | TailCallArgumentInfo Info; |
4762 | Info.Arg = Arg; |
4763 | Info.FrameIdxOp = FIN; |
4764 | Info.FrameIdx = FI; |
4765 | TailCallArguments.push_back(Info); |
4766 | } |
4767 | |
4768 | /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address |
4769 | /// stack slot. Returns the chain as result and the loaded frame pointers in |
4770 | /// LROpOut/FPOpout. Used when tail calling. |
4771 | SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( |
4772 | SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, |
4773 | SDValue &FPOpOut, const SDLoc &dl) const { |
4774 | if (SPDiff) { |
4775 | // Load the LR and FP stack slot for later adjusting. |
4776 | EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; |
4777 | LROpOut = getReturnAddrFrameIndex(DAG); |
4778 | LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); |
4779 | Chain = SDValue(LROpOut.getNode(), 1); |
4780 | } |
4781 | return Chain; |
4782 | } |
4783 | |
4784 | /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified |
4785 | /// by "Src" to address "Dst" of size "Size". Alignment information is |
4786 | /// specified by the specific parameter attribute. The copy will be passed as |
4787 | /// a byval function parameter. |
4788 | /// Sometimes what we are copying is the end of a larger object, the part that |
4789 | /// does not fit in registers. |
4790 | static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, |
4791 | SDValue Chain, ISD::ArgFlagsTy Flags, |
4792 | SelectionDAG &DAG, const SDLoc &dl) { |
4793 | SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); |
4794 | return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, |
4795 | Flags.getNonZeroByValAlign(), false, false, false, |
4796 | MachinePointerInfo(), MachinePointerInfo()); |
4797 | } |
4798 | |
4799 | /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of |
4800 | /// tail calls. |
4801 | static void LowerMemOpCallTo( |
4802 | SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, |
4803 | SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, |
4804 | bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, |
4805 | SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { |
4806 | EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); |
4807 | if (!isTailCall) { |
4808 | if (isVector) { |
4809 | SDValue StackPtr; |
4810 | if (isPPC64) |
4811 | StackPtr = DAG.getRegister(PPC::X1, MVT::i64); |
4812 | else |
4813 | StackPtr = DAG.getRegister(PPC::R1, MVT::i32); |
4814 | PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, |
4815 | DAG.getConstant(ArgOffset, dl, PtrVT)); |
4816 | } |
4817 | MemOpChains.push_back( |
4818 | DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); |
4819 | // Calculate and remember argument location. |
4820 | } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, |
4821 | TailCallArguments); |
4822 | } |
4823 | |
4824 | static void |
4825 | PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, |
4826 | const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, |
4827 | SDValue FPOp, |
4828 | SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { |
4829 | // Emit a sequence of copyto/copyfrom virtual registers for arguments that |
4830 | // might overwrite each other in case of tail call optimization. |
4831 | SmallVector<SDValue, 8> MemOpChains2; |
4832 | // Do not flag preceding copytoreg stuff together with the following stuff. |
4833 | InFlag = SDValue(); |
4834 | StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, |
4835 | MemOpChains2, dl); |
4836 | if (!MemOpChains2.empty()) |
4837 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); |
4838 | |
4839 | // Store the return address to the appropriate stack slot. |
4840 | Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); |
4841 | |
4842 | // Emit callseq_end just before tailcall node. |
4843 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), |
4844 | DAG.getIntPtrConstant(0, dl, true), InFlag, dl); |
4845 | InFlag = Chain.getValue(1); |
4846 | } |
4847 | |
4848 | // Is this global address that of a function that can be called by name? (as |
4849 | // opposed to something that must hold a descriptor for an indirect call). |
4850 | static bool isFunctionGlobalAddress(SDValue Callee) { |
4851 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { |
4852 | if (Callee.getOpcode() == ISD::GlobalTLSAddress || |
4853 | Callee.getOpcode() == ISD::TargetGlobalTLSAddress) |
4854 | return false; |
4855 | |
4856 | return G->getGlobal()->getValueType()->isFunctionTy(); |
4857 | } |
4858 | |
4859 | return false; |
4860 | } |
4861 | |
4862 | SDValue PPCTargetLowering::LowerCallResult( |
4863 | SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, |
4864 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
4865 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
4866 | SmallVector<CCValAssign, 16> RVLocs; |
4867 | CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
4868 | *DAG.getContext()); |
4869 | |
4870 | CCRetInfo.AnalyzeCallResult( |
4871 | Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) |
4872 | ? RetCC_PPC_Cold |
4873 | : RetCC_PPC); |
4874 | |
4875 | // Copy all of the result registers out of their specified physreg. |
4876 | for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { |
4877 | CCValAssign &VA = RVLocs[i]; |
4878 | assert(VA.isRegLoc() && "Can only return in registers!")((VA.isRegLoc() && "Can only return in registers!") ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 4878, __PRETTY_FUNCTION__)); |
4879 | |
4880 | SDValue Val; |
4881 | |
4882 | if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) { |
4883 | SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, |
4884 | InFlag); |
4885 | Chain = Lo.getValue(1); |
4886 | InFlag = Lo.getValue(2); |
4887 | VA = RVLocs[++i]; // skip ahead to next loc |
4888 | SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, |
4889 | InFlag); |
4890 | Chain = Hi.getValue(1); |
4891 | InFlag = Hi.getValue(2); |
4892 | if (!Subtarget.isLittleEndian()) |
4893 | std::swap (Lo, Hi); |
4894 | Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi); |
4895 | } else { |
4896 | Val = DAG.getCopyFromReg(Chain, dl, |
4897 | VA.getLocReg(), VA.getLocVT(), InFlag); |
4898 | Chain = Val.getValue(1); |
4899 | InFlag = Val.getValue(2); |
4900 | } |
4901 | |
4902 | switch (VA.getLocInfo()) { |
4903 | default: llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 4903); |
4904 | case CCValAssign::Full: break; |
4905 | case CCValAssign::AExt: |
4906 | Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); |
4907 | break; |
4908 | case CCValAssign::ZExt: |
4909 | Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, |
4910 | DAG.getValueType(VA.getValVT())); |
4911 | Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); |
4912 | break; |
4913 | case CCValAssign::SExt: |
4914 | Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, |
4915 | DAG.getValueType(VA.getValVT())); |
4916 | Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); |
4917 | break; |
4918 | } |
4919 | |
4920 | InVals.push_back(Val); |
4921 | } |
4922 | |
4923 | return Chain; |
4924 | } |
4925 | |
4926 | static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG, |
4927 | const PPCSubtarget &Subtarget, bool isPatchPoint) { |
4928 | // PatchPoint calls are not indirect. |
4929 | if (isPatchPoint) |
4930 | return false; |
4931 | |
4932 | if (isFunctionGlobalAddress(Callee) || isa<ExternalSymbolSDNode>(Callee)) |
4933 | return false; |
4934 | |
4935 | // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not |
4936 | // becuase the immediate function pointer points to a descriptor instead of |
4937 | // a function entry point. The ELFv2 ABI cannot use a BLA because the function |
4938 | // pointer immediate points to the global entry point, while the BLA would |
4939 | // need to jump to the local entry point (see rL211174). |
4940 | if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() && |
4941 | isBLACompatibleAddress(Callee, DAG)) |
4942 | return false; |
4943 | |
4944 | return true; |
4945 | } |
4946 | |
4947 | // AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls. |
4948 | static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) { |
4949 | return Subtarget.isAIXABI() || |
4950 | (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()); |
4951 | } |
4952 | |
4953 | static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags, |
4954 | const Function &Caller, |
4955 | const SDValue &Callee, |
4956 | const PPCSubtarget &Subtarget, |
4957 | const TargetMachine &TM) { |
4958 | if (CFlags.IsTailCall) |
4959 | return PPCISD::TC_RETURN; |
4960 | |
4961 | // This is a call through a function pointer. |
4962 | if (CFlags.IsIndirect) { |
4963 | // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross |
4964 | // indirect calls. The save of the caller's TOC pointer to the stack will be |
4965 | // inserted into the DAG as part of call lowering. The restore of the TOC |
4966 | // pointer is modeled by using a pseudo instruction for the call opcode that |
4967 | // represents the 2 instruction sequence of an indirect branch and link, |
4968 | // immediately followed by a load of the TOC pointer from the the stack save |
4969 | // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC |
4970 | // as it is not saved or used. |
4971 | return isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC |
4972 | : PPCISD::BCTRL; |
4973 | } |
4974 | |
4975 | if (Subtarget.isUsingPCRelativeCalls()) { |
4976 | assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.")((Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI." ) ? static_cast<void> (0) : __assert_fail ("Subtarget.is64BitELFABI() && \"PC Relative is only on ELF ABI.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 4976, __PRETTY_FUNCTION__)); |
4977 | return PPCISD::CALL_NOTOC; |
4978 | } |
4979 | |
4980 | // The ABIs that maintain a TOC pointer accross calls need to have a nop |
4981 | // immediately following the call instruction if the caller and callee may |
4982 | // have different TOC bases. At link time if the linker determines the calls |
4983 | // may not share a TOC base, the call is redirected to a trampoline inserted |
4984 | // by the linker. The trampoline will (among other things) save the callers |
4985 | // TOC pointer at an ABI designated offset in the linkage area and the linker |
4986 | // will rewrite the nop to be a load of the TOC pointer from the linkage area |
4987 | // into gpr2. |
4988 | if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI()) |
4989 | return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL |
4990 | : PPCISD::CALL_NOP; |
4991 | |
4992 | return PPCISD::CALL; |
4993 | } |
4994 | |
4995 | static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG, |
4996 | const SDLoc &dl, const PPCSubtarget &Subtarget) { |
4997 | if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI()) |
4998 | if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) |
4999 | return SDValue(Dest, 0); |
5000 | |
5001 | // Returns true if the callee is local, and false otherwise. |
5002 | auto isLocalCallee = [&]() { |
5003 | const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); |
5004 | const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); |
5005 | const GlobalValue *GV = G ? G->getGlobal() : nullptr; |
5006 | |
5007 | return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) && |
5008 | !dyn_cast_or_null<GlobalIFunc>(GV); |
5009 | }; |
5010 | |
5011 | // The PLT is only used in 32-bit ELF PIC mode. Attempting to use the PLT in |
5012 | // a static relocation model causes some versions of GNU LD (2.17.50, at |
5013 | // least) to force BSS-PLT, instead of secure-PLT, even if all objects are |
5014 | // built with secure-PLT. |
5015 | bool UsePlt = |
5016 | Subtarget.is32BitELFABI() && !isLocalCallee() && |
5017 | Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_; |
5018 | |
5019 | const auto getAIXFuncEntryPointSymbolSDNode = [&](const GlobalValue *GV) { |
5020 | const TargetMachine &TM = Subtarget.getTargetMachine(); |
5021 | const TargetLoweringObjectFile *TLOF = TM.getObjFileLowering(); |
5022 | MCSymbolXCOFF *S = |
5023 | cast<MCSymbolXCOFF>(TLOF->getFunctionEntryPointSymbol(GV, TM)); |
5024 | |
5025 | MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); |
5026 | return DAG.getMCSymbol(S, PtrVT); |
5027 | }; |
5028 | |
5029 | if (isFunctionGlobalAddress(Callee)) { |
5030 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); |
5031 | |
5032 | if (Subtarget.isAIXABI()) { |
5033 | assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.")((!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX." ) ? static_cast<void> (0) : __assert_fail ("!isa<GlobalIFunc>(GV) && \"IFunc is not supported on AIX.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5033, __PRETTY_FUNCTION__)); |
5034 | return getAIXFuncEntryPointSymbolSDNode(GV); |
5035 | } |
5036 | return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0, |
5037 | UsePlt ? PPCII::MO_PLT : 0); |
5038 | } |
5039 | |
5040 | if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { |
5041 | const char *SymName = S->getSymbol(); |
5042 | if (Subtarget.isAIXABI()) { |
5043 | // If there exists a user-declared function whose name is the same as the |
5044 | // ExternalSymbol's, then we pick up the user-declared version. |
5045 | const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); |
5046 | if (const Function *F = |
5047 | dyn_cast_or_null<Function>(Mod->getNamedValue(SymName))) |
5048 | return getAIXFuncEntryPointSymbolSDNode(F); |
5049 | |
5050 | // On AIX, direct function calls reference the symbol for the function's |
5051 | // entry point, which is named by prepending a "." before the function's |
5052 | // C-linkage name. A Qualname is returned here because an external |
5053 | // function entry point is a csect with XTY_ER property. |
5054 | const auto getExternalFunctionEntryPointSymbol = [&](StringRef SymName) { |
5055 | auto &Context = DAG.getMachineFunction().getMMI().getContext(); |
5056 | MCSectionXCOFF *Sec = Context.getXCOFFSection( |
5057 | (Twine(".") + Twine(SymName)).str(), SectionKind::getMetadata(), |
5058 | XCOFF::CsectProperties(XCOFF::XMC_PR, XCOFF::XTY_ER)); |
5059 | return Sec->getQualNameSymbol(); |
5060 | }; |
5061 | |
5062 | SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data(); |
5063 | } |
5064 | return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(), |
5065 | UsePlt ? PPCII::MO_PLT : 0); |
5066 | } |
5067 | |
5068 | // No transformation needed. |
5069 | assert(Callee.getNode() && "What no callee?")((Callee.getNode() && "What no callee?") ? static_cast <void> (0) : __assert_fail ("Callee.getNode() && \"What no callee?\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5069, __PRETTY_FUNCTION__)); |
5070 | return Callee; |
5071 | } |
5072 | |
5073 | static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) { |
5074 | assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&((CallSeqStart.getOpcode() == ISD::CALLSEQ_START && "Expected a CALLSEQ_STARTSDNode." ) ? static_cast<void> (0) : __assert_fail ("CallSeqStart.getOpcode() == ISD::CALLSEQ_START && \"Expected a CALLSEQ_STARTSDNode.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5075, __PRETTY_FUNCTION__)) |
5075 | "Expected a CALLSEQ_STARTSDNode.")((CallSeqStart.getOpcode() == ISD::CALLSEQ_START && "Expected a CALLSEQ_STARTSDNode." ) ? static_cast<void> (0) : __assert_fail ("CallSeqStart.getOpcode() == ISD::CALLSEQ_START && \"Expected a CALLSEQ_STARTSDNode.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5075, __PRETTY_FUNCTION__)); |
5076 | |
5077 | // The last operand is the chain, except when the node has glue. If the node |
5078 | // has glue, then the last operand is the glue, and the chain is the second |
5079 | // last operand. |
5080 | SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1); |
5081 | if (LastValue.getValueType() != MVT::Glue) |
5082 | return LastValue; |
5083 | |
5084 | return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2); |
5085 | } |
5086 | |
5087 | // Creates the node that moves a functions address into the count register |
5088 | // to prepare for an indirect call instruction. |
5089 | static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee, |
5090 | SDValue &Glue, SDValue &Chain, |
5091 | const SDLoc &dl) { |
5092 | SDValue MTCTROps[] = {Chain, Callee, Glue}; |
5093 | EVT ReturnTypes[] = {MVT::Other, MVT::Glue}; |
5094 | Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2), |
5095 | makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2)); |
5096 | // The glue is the second value produced. |
5097 | Glue = Chain.getValue(1); |
5098 | } |
5099 | |
5100 | static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee, |
5101 | SDValue &Glue, SDValue &Chain, |
5102 | SDValue CallSeqStart, |
5103 | const CallBase *CB, const SDLoc &dl, |
5104 | bool hasNest, |
5105 | const PPCSubtarget &Subtarget) { |
5106 | // Function pointers in the 64-bit SVR4 ABI do not point to the function |
5107 | // entry point, but to the function descriptor (the function entry point |
5108 | // address is part of the function descriptor though). |
5109 | // The function descriptor is a three doubleword structure with the |
5110 | // following fields: function entry point, TOC base address and |
5111 | // environment pointer. |
5112 | // Thus for a call through a function pointer, the following actions need |
5113 | // to be performed: |
5114 | // 1. Save the TOC of the caller in the TOC save area of its stack |
5115 | // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). |
5116 | // 2. Load the address of the function entry point from the function |
5117 | // descriptor. |
5118 | // 3. Load the TOC of the callee from the function descriptor into r2. |
5119 | // 4. Load the environment pointer from the function descriptor into |
5120 | // r11. |
5121 | // 5. Branch to the function entry point address. |
5122 | // 6. On return of the callee, the TOC of the caller needs to be |
5123 | // restored (this is done in FinishCall()). |
5124 | // |
5125 | // The loads are scheduled at the beginning of the call sequence, and the |
5126 | // register copies are flagged together to ensure that no other |
5127 | // operations can be scheduled in between. E.g. without flagging the |
5128 | // copies together, a TOC access in the caller could be scheduled between |
5129 | // the assignment of the callee TOC and the branch to the callee, which leads |
5130 | // to incorrect code. |
5131 | |
5132 | // Start by loading the function address from the descriptor. |
5133 | SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart); |
5134 | auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() |
5135 | ? (MachineMemOperand::MODereferenceable | |
5136 | MachineMemOperand::MOInvariant) |
5137 | : MachineMemOperand::MONone; |
5138 | |
5139 | MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr); |
5140 | |
5141 | // Registers used in building the DAG. |
5142 | const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister(); |
5143 | const MCRegister TOCReg = Subtarget.getTOCPointerRegister(); |
5144 | |
5145 | // Offsets of descriptor members. |
5146 | const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset(); |
5147 | const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset(); |
5148 | |
5149 | const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; |
5150 | const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4; |
5151 | |
5152 | // One load for the functions entry point address. |
5153 | SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI, |
5154 | Alignment, MMOFlags); |
5155 | |
5156 | // One for loading the TOC anchor for the module that contains the called |
5157 | // function. |
5158 | SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl); |
5159 | SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff); |
5160 | SDValue TOCPtr = |
5161 | DAG.getLoad(RegVT, dl, LDChain, AddTOC, |
5162 | MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags); |
5163 | |
5164 | // One for loading the environment pointer. |
5165 | SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl); |
5166 | SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff); |
5167 | SDValue LoadEnvPtr = |
5168 | DAG.getLoad(RegVT, dl, LDChain, AddPtr, |
5169 | MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags); |
5170 | |
5171 | |
5172 | // Then copy the newly loaded TOC anchor to the TOC pointer. |
5173 | SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue); |
5174 | Chain = TOCVal.getValue(0); |
5175 | Glue = TOCVal.getValue(1); |
5176 | |
5177 | // If the function call has an explicit 'nest' parameter, it takes the |
5178 | // place of the environment pointer. |
5179 | assert((!hasNest || !Subtarget.isAIXABI()) &&(((!hasNest || !Subtarget.isAIXABI()) && "Nest parameter is not supported on AIX." ) ? static_cast<void> (0) : __assert_fail ("(!hasNest || !Subtarget.isAIXABI()) && \"Nest parameter is not supported on AIX.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5180, __PRETTY_FUNCTION__)) |
5180 | "Nest parameter is not supported on AIX.")(((!hasNest || !Subtarget.isAIXABI()) && "Nest parameter is not supported on AIX." ) ? static_cast<void> (0) : __assert_fail ("(!hasNest || !Subtarget.isAIXABI()) && \"Nest parameter is not supported on AIX.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5180, __PRETTY_FUNCTION__)); |
5181 | if (!hasNest) { |
5182 | SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue); |
5183 | Chain = EnvVal.getValue(0); |
5184 | Glue = EnvVal.getValue(1); |
5185 | } |
5186 | |
5187 | // The rest of the indirect call sequence is the same as the non-descriptor |
5188 | // DAG. |
5189 | prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl); |
5190 | } |
5191 | |
5192 | static void |
5193 | buildCallOperands(SmallVectorImpl<SDValue> &Ops, |
5194 | PPCTargetLowering::CallFlags CFlags, const SDLoc &dl, |
5195 | SelectionDAG &DAG, |
5196 | SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, |
5197 | SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff, |
5198 | const PPCSubtarget &Subtarget) { |
5199 | const bool IsPPC64 = Subtarget.isPPC64(); |
5200 | // MVT for a general purpose register. |
5201 | const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; |
5202 | |
5203 | // First operand is always the chain. |
5204 | Ops.push_back(Chain); |
5205 | |
5206 | // If it's a direct call pass the callee as the second operand. |
5207 | if (!CFlags.IsIndirect) |
5208 | Ops.push_back(Callee); |
5209 | else { |
5210 | assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.")((!CFlags.IsPatchPoint && "Patch point calls are not indirect." ) ? static_cast<void> (0) : __assert_fail ("!CFlags.IsPatchPoint && \"Patch point calls are not indirect.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5210, __PRETTY_FUNCTION__)); |
5211 | |
5212 | // For the TOC based ABIs, we have saved the TOC pointer to the linkage area |
5213 | // on the stack (this would have been done in `LowerCall_64SVR4` or |
5214 | // `LowerCall_AIX`). The call instruction is a pseudo instruction that |
5215 | // represents both the indirect branch and a load that restores the TOC |
5216 | // pointer from the linkage area. The operand for the TOC restore is an add |
5217 | // of the TOC save offset to the stack pointer. This must be the second |
5218 | // operand: after the chain input but before any other variadic arguments. |
5219 | // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not |
5220 | // saved or used. |
5221 | if (isTOCSaveRestoreRequired(Subtarget)) { |
5222 | const MCRegister StackPtrReg = Subtarget.getStackPointerRegister(); |
5223 | |
5224 | SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT); |
5225 | unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); |
5226 | SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); |
5227 | SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff); |
5228 | Ops.push_back(AddTOC); |
5229 | } |
5230 | |
5231 | // Add the register used for the environment pointer. |
5232 | if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest) |
5233 | Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(), |
5234 | RegVT)); |
5235 | |
5236 | |
5237 | // Add CTR register as callee so a bctr can be emitted later. |
5238 | if (CFlags.IsTailCall) |
5239 | Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT)); |
5240 | } |
5241 | |
5242 | // If this is a tail call add stack pointer delta. |
5243 | if (CFlags.IsTailCall) |
5244 | Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); |
5245 | |
5246 | // Add argument registers to the end of the list so that they are known live |
5247 | // into the call. |
5248 | for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) |
5249 | Ops.push_back(DAG.getRegister(RegsToPass[i].first, |
5250 | RegsToPass[i].second.getValueType())); |
5251 | |
5252 | // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is |
5253 | // no way to mark dependencies as implicit here. |
5254 | // We will add the R2/X2 dependency in EmitInstrWithCustomInserter. |
5255 | if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) && |
5256 | !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls()) |
5257 | Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT)); |
5258 | |
5259 | // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls |
5260 | if (CFlags.IsVarArg && Subtarget.is32BitELFABI()) |
5261 | Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); |
5262 | |
5263 | // Add a register mask operand representing the call-preserved registers. |
5264 | const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); |
5265 | const uint32_t *Mask = |
5266 | TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv); |
5267 | assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention" ) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5267, __PRETTY_FUNCTION__)); |
5268 | Ops.push_back(DAG.getRegisterMask(Mask)); |
5269 | |
5270 | // If the glue is valid, it is the last operand. |
5271 | if (Glue.getNode()) |
5272 | Ops.push_back(Glue); |
5273 | } |
5274 | |
5275 | SDValue PPCTargetLowering::FinishCall( |
5276 | CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG, |
5277 | SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue, |
5278 | SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, |
5279 | unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, |
5280 | SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const { |
5281 | |
5282 | if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) || |
5283 | Subtarget.isAIXABI()) |
5284 | setUsesTOCBasePtr(DAG); |
5285 | |
5286 | unsigned CallOpc = |
5287 | getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee, |
5288 | Subtarget, DAG.getTarget()); |
5289 | |
5290 | if (!CFlags.IsIndirect) |
5291 | Callee = transformCallee(Callee, DAG, dl, Subtarget); |
5292 | else if (Subtarget.usesFunctionDescriptors()) |
5293 | prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB, |
5294 | dl, CFlags.HasNest, Subtarget); |
5295 | else |
5296 | prepareIndirectCall(DAG, Callee, Glue, Chain, dl); |
5297 | |
5298 | // Build the operand list for the call instruction. |
5299 | SmallVector<SDValue, 8> Ops; |
5300 | buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee, |
5301 | SPDiff, Subtarget); |
5302 | |
5303 | // Emit tail call. |
5304 | if (CFlags.IsTailCall) { |
5305 | // Indirect tail call when using PC Relative calls do not have the same |
5306 | // constraints. |
5307 | assert(((Callee.getOpcode() == ISD::Register &&((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode >(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5315, __PRETTY_FUNCTION__)) |
5308 | cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode >(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5315, __PRETTY_FUNCTION__)) |
5309 | Callee.getOpcode() == ISD::TargetExternalSymbol ||((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode >(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5315, __PRETTY_FUNCTION__)) |
5310 | Callee.getOpcode() == ISD::TargetGlobalAddress ||((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode >(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5315, __PRETTY_FUNCTION__)) |
5311 | isa<ConstantSDNode>(Callee) ||((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode >(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5315, __PRETTY_FUNCTION__)) |
5312 | (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode >(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5315, __PRETTY_FUNCTION__)) |
5313 | "Expecting a global address, external symbol, absolute value, "((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode >(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5315, __PRETTY_FUNCTION__)) |
5314 | "register or an indirect tail call when PC Relative calls are "((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode >(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5315, __PRETTY_FUNCTION__)) |
5315 | "used.")((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode >(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, " "register or an indirect tail call when PC Relative calls are " "used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5315, __PRETTY_FUNCTION__)); |
5316 | // PC Relative calls also use TC_RETURN as the way to mark tail calls. |
5317 | assert(CallOpc == PPCISD::TC_RETURN &&((CallOpc == PPCISD::TC_RETURN && "Unexpected call opcode for a tail call." ) ? static_cast<void> (0) : __assert_fail ("CallOpc == PPCISD::TC_RETURN && \"Unexpected call opcode for a tail call.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5318, __PRETTY_FUNCTION__)) |
5318 | "Unexpected call opcode for a tail call.")((CallOpc == PPCISD::TC_RETURN && "Unexpected call opcode for a tail call." ) ? static_cast<void> (0) : __assert_fail ("CallOpc == PPCISD::TC_RETURN && \"Unexpected call opcode for a tail call.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5318, __PRETTY_FUNCTION__)); |
5319 | DAG.getMachineFunction().getFrameInfo().setHasTailCall(); |
5320 | return DAG.getNode(CallOpc, dl, MVT::Other, Ops); |
5321 | } |
5322 | |
5323 | std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}}; |
5324 | Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops); |
5325 | DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge); |
5326 | Glue = Chain.getValue(1); |
5327 | |
5328 | // When performing tail call optimization the callee pops its arguments off |
5329 | // the stack. Account for this here so these bytes can be pushed back on in |
5330 | // PPCFrameLowering::eliminateCallFramePseudoInstr. |
5331 | int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast && |
5332 | getTargetMachine().Options.GuaranteedTailCallOpt) |
5333 | ? NumBytes |
5334 | : 0; |
5335 | |
5336 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), |
5337 | DAG.getIntPtrConstant(BytesCalleePops, dl, true), |
5338 | Glue, dl); |
5339 | Glue = Chain.getValue(1); |
5340 | |
5341 | return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl, |
5342 | DAG, InVals); |
5343 | } |
5344 | |
5345 | SDValue |
5346 | PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, |
5347 | SmallVectorImpl<SDValue> &InVals) const { |
5348 | SelectionDAG &DAG = CLI.DAG; |
5349 | SDLoc &dl = CLI.DL; |
5350 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; |
5351 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; |
5352 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; |
5353 | SDValue Chain = CLI.Chain; |
5354 | SDValue Callee = CLI.Callee; |
5355 | bool &isTailCall = CLI.IsTailCall; |
5356 | CallingConv::ID CallConv = CLI.CallConv; |
5357 | bool isVarArg = CLI.IsVarArg; |
5358 | bool isPatchPoint = CLI.IsPatchPoint; |
5359 | const CallBase *CB = CLI.CB; |
5360 | |
5361 | if (isTailCall) { |
5362 | if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall())) |
5363 | isTailCall = false; |
5364 | else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) |
5365 | isTailCall = IsEligibleForTailCallOptimization_64SVR4( |
5366 | Callee, CallConv, CB, isVarArg, Outs, Ins, DAG); |
5367 | else |
5368 | isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, |
5369 | Ins, DAG); |
5370 | if (isTailCall) { |
5371 | ++NumTailCalls; |
5372 | if (!getTargetMachine().Options.GuaranteedTailCallOpt) |
5373 | ++NumSiblingCalls; |
5374 | |
5375 | // PC Relative calls no longer guarantee that the callee is a Global |
5376 | // Address Node. The callee could be an indirect tail call in which |
5377 | // case the SDValue for the callee could be a load (to load the address |
5378 | // of a function pointer) or it may be a register copy (to move the |
5379 | // address of the callee from a function parameter into a virtual |
5380 | // register). It may also be an ExternalSymbolSDNode (ex memcopy). |
5381 | assert((Subtarget.isUsingPCRelativeCalls() ||(((Subtarget.isUsingPCRelativeCalls() || isa<GlobalAddressSDNode >(Callee)) && "Callee should be an llvm::Function object." ) ? static_cast<void> (0) : __assert_fail ("(Subtarget.isUsingPCRelativeCalls() || isa<GlobalAddressSDNode>(Callee)) && \"Callee should be an llvm::Function object.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5383, __PRETTY_FUNCTION__)) |
5382 | isa<GlobalAddressSDNode>(Callee)) &&(((Subtarget.isUsingPCRelativeCalls() || isa<GlobalAddressSDNode >(Callee)) && "Callee should be an llvm::Function object." ) ? static_cast<void> (0) : __assert_fail ("(Subtarget.isUsingPCRelativeCalls() || isa<GlobalAddressSDNode>(Callee)) && \"Callee should be an llvm::Function object.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5383, __PRETTY_FUNCTION__)) |
5383 | "Callee should be an llvm::Function object.")(((Subtarget.isUsingPCRelativeCalls() || isa<GlobalAddressSDNode >(Callee)) && "Callee should be an llvm::Function object." ) ? static_cast<void> (0) : __assert_fail ("(Subtarget.isUsingPCRelativeCalls() || isa<GlobalAddressSDNode>(Callee)) && \"Callee should be an llvm::Function object.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5383, __PRETTY_FUNCTION__)); |
5384 | |
5385 | LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("ppc-lowering")) { dbgs() << "TCO caller: " << DAG .getMachineFunction().getName() << "\nTCO callee: "; } } while (false) |
5386 | << "\nTCO callee: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("ppc-lowering")) { dbgs() << "TCO caller: " << DAG .getMachineFunction().getName() << "\nTCO callee: "; } } while (false); |
5387 | LLVM_DEBUG(Callee.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("ppc-lowering")) { Callee.dump(); } } while (false); |
5388 | } |
5389 | } |
5390 | |
5391 | if (!isTailCall && CB && CB->isMustTailCall()) |
5392 | report_fatal_error("failed to perform tail call elimination on a call " |
5393 | "site marked musttail"); |
5394 | |
5395 | // When long calls (i.e. indirect calls) are always used, calls are always |
5396 | // made via function pointer. If we have a function name, first translate it |
5397 | // into a pointer. |
5398 | if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && |
5399 | !isTailCall) |
5400 | Callee = LowerGlobalAddress(Callee, DAG); |
5401 | |
5402 | CallFlags CFlags( |
5403 | CallConv, isTailCall, isVarArg, isPatchPoint, |
5404 | isIndirectCall(Callee, DAG, Subtarget, isPatchPoint), |
5405 | // hasNest |
5406 | Subtarget.is64BitELFABI() && |
5407 | any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }), |
5408 | CLI.NoMerge); |
5409 | |
5410 | if (Subtarget.isAIXABI()) |
5411 | return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, |
5412 | InVals, CB); |
5413 | |
5414 | assert(Subtarget.isSVR4ABI())((Subtarget.isSVR4ABI()) ? static_cast<void> (0) : __assert_fail ("Subtarget.isSVR4ABI()", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5414, __PRETTY_FUNCTION__)); |
5415 | if (Subtarget.isPPC64()) |
5416 | return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, |
5417 | InVals, CB); |
5418 | return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, |
5419 | InVals, CB); |
5420 | } |
5421 | |
5422 | SDValue PPCTargetLowering::LowerCall_32SVR4( |
5423 | SDValue Chain, SDValue Callee, CallFlags CFlags, |
5424 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
5425 | const SmallVectorImpl<SDValue> &OutVals, |
5426 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
5427 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, |
5428 | const CallBase *CB) const { |
5429 | // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description |
5430 | // of the 32-bit SVR4 ABI stack frame layout. |
5431 | |
5432 | const CallingConv::ID CallConv = CFlags.CallConv; |
5433 | const bool IsVarArg = CFlags.IsVarArg; |
5434 | const bool IsTailCall = CFlags.IsTailCall; |
5435 | |
5436 | assert((CallConv == CallingConv::C ||(((CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && "Unknown calling convention!" ) ? static_cast<void> (0) : __assert_fail ("(CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && \"Unknown calling convention!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5438, __PRETTY_FUNCTION__)) |
5437 | CallConv == CallingConv::Cold ||(((CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && "Unknown calling convention!" ) ? static_cast<void> (0) : __assert_fail ("(CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && \"Unknown calling convention!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5438, __PRETTY_FUNCTION__)) |
5438 | CallConv == CallingConv::Fast) && "Unknown calling convention!")(((CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && "Unknown calling convention!" ) ? static_cast<void> (0) : __assert_fail ("(CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && \"Unknown calling convention!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5438, __PRETTY_FUNCTION__)); |
5439 | |
5440 | const Align PtrAlign(4); |
5441 | |
5442 | MachineFunction &MF = DAG.getMachineFunction(); |
5443 | |
5444 | // Mark this function as potentially containing a function that contains a |
5445 | // tail call. As a consequence the frame pointer will be used for dynamicalloc |
5446 | // and restoring the callers stack pointer in this functions epilog. This is |
5447 | // done because by tail calling the called function might overwrite the value |
5448 | // in this function's (MF) stack pointer stack slot 0(SP). |
5449 | if (getTargetMachine().Options.GuaranteedTailCallOpt && |
5450 | CallConv == CallingConv::Fast) |
5451 | MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); |
5452 | |
5453 | // Count how many bytes are to be pushed on the stack, including the linkage |
5454 | // area, parameter list area and the part of the local variable space which |
5455 | // contains copies of aggregates which are passed by value. |
5456 | |
5457 | // Assign locations to all of the outgoing arguments. |
5458 | SmallVector<CCValAssign, 16> ArgLocs; |
5459 | PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); |
5460 | |
5461 | // Reserve space for the linkage area on the stack. |
5462 | CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), |
5463 | PtrAlign); |
5464 | if (useSoftFloat()) |
5465 | CCInfo.PreAnalyzeCallOperands(Outs); |
5466 | |
5467 | if (IsVarArg) { |
5468 | // Handle fixed and variable vector arguments differently. |
5469 | // Fixed vector arguments go into registers as long as registers are |
5470 | // available. Variable vector arguments always go into memory. |
5471 | unsigned NumArgs = Outs.size(); |
5472 | |
5473 | for (unsigned i = 0; i != NumArgs; ++i) { |
5474 | MVT ArgVT = Outs[i].VT; |
5475 | ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; |
5476 | bool Result; |
5477 | |
5478 | if (Outs[i].IsFixed) { |
5479 | Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, |
5480 | CCInfo); |
5481 | } else { |
5482 | Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, |
5483 | ArgFlags, CCInfo); |
5484 | } |
5485 | |
5486 | if (Result) { |
5487 | #ifndef NDEBUG |
5488 | errs() << "Call operand #" << i << " has unhandled type " |
5489 | << EVT(ArgVT).getEVTString() << "\n"; |
5490 | #endif |
5491 | llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5491); |
5492 | } |
5493 | } |
5494 | } else { |
5495 | // All arguments are treated the same. |
5496 | CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); |
5497 | } |
5498 | CCInfo.clearWasPPCF128(); |
5499 | |
5500 | // Assign locations to all of the outgoing aggregate by value arguments. |
5501 | SmallVector<CCValAssign, 16> ByValArgLocs; |
5502 | CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext()); |
5503 | |
5504 | // Reserve stack space for the allocations in CCInfo. |
5505 | CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign); |
5506 | |
5507 | CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); |
5508 | |
5509 | // Size of the linkage area, parameter list area and the part of the local |
5510 | // space variable where copies of aggregates which are passed by value are |
5511 | // stored. |
5512 | unsigned NumBytes = CCByValInfo.getNextStackOffset(); |
5513 | |
5514 | // Calculate by how many bytes the stack has to be adjusted in case of tail |
5515 | // call optimization. |
5516 | int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes); |
5517 | |
5518 | // Adjust the stack pointer for the new arguments... |
5519 | // These operations are automatically eliminated by the prolog/epilog pass |
5520 | Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); |
5521 | SDValue CallSeqStart = Chain; |
5522 | |
5523 | // Load the return address and frame pointer so it can be moved somewhere else |
5524 | // later. |
5525 | SDValue LROp, FPOp; |
5526 | Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); |
5527 | |
5528 | // Set up a copy of the stack pointer for use loading and storing any |
5529 | // arguments that may not fit in the registers available for argument |
5530 | // passing. |
5531 | SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); |
5532 | |
5533 | SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; |
5534 | SmallVector<TailCallArgumentInfo, 8> TailCallArguments; |
5535 | SmallVector<SDValue, 8> MemOpChains; |
5536 | |
5537 | bool seenFloatArg = false; |
5538 | // Walk the register/memloc assignments, inserting copies/loads. |
5539 | // i - Tracks the index into the list of registers allocated for the call |
5540 | // RealArgIdx - Tracks the index into the list of actual function arguments |
5541 | // j - Tracks the index into the list of byval arguments |
5542 | for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size(); |
5543 | i != e; |
5544 | ++i, ++RealArgIdx) { |
5545 | CCValAssign &VA = ArgLocs[i]; |
5546 | SDValue Arg = OutVals[RealArgIdx]; |
5547 | ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags; |
5548 | |
5549 | if (Flags.isByVal()) { |
5550 | // Argument is an aggregate which is passed by value, thus we need to |
5551 | // create a copy of it in the local variable space of the current stack |
5552 | // frame (which is the stack frame of the caller) and pass the address of |
5553 | // this copy to the callee. |
5554 | assert((j < ByValArgLocs.size()) && "Index out of bounds!")(((j < ByValArgLocs.size()) && "Index out of bounds!" ) ? static_cast<void> (0) : __assert_fail ("(j < ByValArgLocs.size()) && \"Index out of bounds!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5554, __PRETTY_FUNCTION__)); |
5555 | CCValAssign &ByValVA = ByValArgLocs[j++]; |
5556 | assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!")(((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!" ) ? static_cast<void> (0) : __assert_fail ("(VA.getValNo() == ByValVA.getValNo()) && \"ValNo mismatch!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5556, __PRETTY_FUNCTION__)); |
5557 | |
5558 | // Memory reserved in the local variable space of the callers stack frame. |
5559 | unsigned LocMemOffset = ByValVA.getLocMemOffset(); |
5560 | |
5561 | SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); |
5562 | PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), |
5563 | StackPtr, PtrOff); |
5564 | |
5565 | // Create a copy of the argument in the local area of the current |
5566 | // stack frame. |
5567 | SDValue MemcpyCall = |
5568 | CreateCopyOfByValArgument(Arg, PtrOff, |
5569 | CallSeqStart.getNode()->getOperand(0), |
5570 | Flags, DAG, dl); |
5571 | |
5572 | // This must go outside the CALLSEQ_START..END. |
5573 | SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0, |
5574 | SDLoc(MemcpyCall)); |
5575 | DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), |
5576 | NewCallSeqStart.getNode()); |
5577 | Chain = CallSeqStart = NewCallSeqStart; |
5578 | |
5579 | // Pass the address of the aggregate copy on the stack either in a |
5580 | // physical register or in the parameter list area of the current stack |
5581 | // frame to the callee. |
5582 | Arg = PtrOff; |
5583 | } |
5584 | |
5585 | // When useCRBits() is true, there can be i1 arguments. |
5586 | // It is because getRegisterType(MVT::i1) => MVT::i1, |
5587 | // and for other integer types getRegisterType() => MVT::i32. |
5588 | // Extend i1 and ensure callee will get i32. |
5589 | if (Arg.getValueType() == MVT::i1) |
5590 | Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, |
5591 | dl, MVT::i32, Arg); |
5592 | |
5593 | if (VA.isRegLoc()) { |
5594 | seenFloatArg |= VA.getLocVT().isFloatingPoint(); |
5595 | // Put argument in a physical register. |
5596 | if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) { |
5597 | bool IsLE = Subtarget.isLittleEndian(); |
5598 | SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, |
5599 | DAG.getIntPtrConstant(IsLE ? 0 : 1, dl)); |
5600 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0))); |
5601 | SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, |
5602 | DAG.getIntPtrConstant(IsLE ? 1 : 0, dl)); |
5603 | RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(), |
5604 | SVal.getValue(0))); |
5605 | } else |
5606 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); |
5607 | } else { |
5608 | // Put argument in the parameter list area of the current stack frame. |
5609 | assert(VA.isMemLoc())((VA.isMemLoc()) ? static_cast<void> (0) : __assert_fail ("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5609, __PRETTY_FUNCTION__)); |
5610 | unsigned LocMemOffset = VA.getLocMemOffset(); |
5611 | |
5612 | if (!IsTailCall) { |
5613 | SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); |
5614 | PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), |
5615 | StackPtr, PtrOff); |
5616 | |
5617 | MemOpChains.push_back( |
5618 | DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); |
5619 | } else { |
5620 | // Calculate and remember argument location. |
5621 | CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, |
5622 | TailCallArguments); |
5623 | } |
5624 | } |
5625 | } |
5626 | |
5627 | if (!MemOpChains.empty()) |
5628 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); |
5629 | |
5630 | // Build a sequence of copy-to-reg nodes chained together with token chain |
5631 | // and flag operands which copy the outgoing args into the appropriate regs. |
5632 | SDValue InFlag; |
5633 | for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { |
5634 | Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, |
5635 | RegsToPass[i].second, InFlag); |
5636 | InFlag = Chain.getValue(1); |
5637 | } |
5638 | |
5639 | // Set CR bit 6 to true if this is a vararg call with floating args passed in |
5640 | // registers. |
5641 | if (IsVarArg) { |
5642 | SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); |
5643 | SDValue Ops[] = { Chain, InFlag }; |
5644 | |
5645 | Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, |
5646 | dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); |
5647 | |
5648 | InFlag = Chain.getValue(1); |
5649 | } |
5650 | |
5651 | if (IsTailCall) |
5652 | PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, |
5653 | TailCallArguments); |
5654 | |
5655 | return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart, |
5656 | Callee, SPDiff, NumBytes, Ins, InVals, CB); |
5657 | } |
5658 | |
5659 | // Copy an argument into memory, being careful to do this outside the |
5660 | // call sequence for the call to which the argument belongs. |
5661 | SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( |
5662 | SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, |
5663 | SelectionDAG &DAG, const SDLoc &dl) const { |
5664 | SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, |
5665 | CallSeqStart.getNode()->getOperand(0), |
5666 | Flags, DAG, dl); |
5667 | // The MEMCPY must go outside the CALLSEQ_START..END. |
5668 | int64_t FrameSize = CallSeqStart.getConstantOperandVal(1); |
5669 | SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0, |
5670 | SDLoc(MemcpyCall)); |
5671 | DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), |
5672 | NewCallSeqStart.getNode()); |
5673 | return NewCallSeqStart; |
5674 | } |
5675 | |
5676 | SDValue PPCTargetLowering::LowerCall_64SVR4( |
5677 | SDValue Chain, SDValue Callee, CallFlags CFlags, |
5678 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
5679 | const SmallVectorImpl<SDValue> &OutVals, |
5680 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
5681 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, |
5682 | const CallBase *CB) const { |
5683 | bool isELFv2ABI = Subtarget.isELFv2ABI(); |
5684 | bool isLittleEndian = Subtarget.isLittleEndian(); |
5685 | unsigned NumOps = Outs.size(); |
5686 | bool IsSibCall = false; |
5687 | bool IsFastCall = CFlags.CallConv == CallingConv::Fast; |
5688 | |
5689 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
5690 | unsigned PtrByteSize = 8; |
5691 | |
5692 | MachineFunction &MF = DAG.getMachineFunction(); |
5693 | |
5694 | if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) |
5695 | IsSibCall = true; |
5696 | |
5697 | // Mark this function as potentially containing a function that contains a |
5698 | // tail call. As a consequence the frame pointer will be used for dynamicalloc |
5699 | // and restoring the callers stack pointer in this functions epilog. This is |
5700 | // done because by tail calling the called function might overwrite the value |
5701 | // in this function's (MF) stack pointer stack slot 0(SP). |
5702 | if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall) |
5703 | MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); |
5704 | |
5705 | assert(!(IsFastCall && CFlags.IsVarArg) &&((!(IsFastCall && CFlags.IsVarArg) && "fastcc not supported on varargs functions" ) ? static_cast<void> (0) : __assert_fail ("!(IsFastCall && CFlags.IsVarArg) && \"fastcc not supported on varargs functions\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5706, __PRETTY_FUNCTION__)) |
5706 | "fastcc not supported on varargs functions")((!(IsFastCall && CFlags.IsVarArg) && "fastcc not supported on varargs functions" ) ? static_cast<void> (0) : __assert_fail ("!(IsFastCall && CFlags.IsVarArg) && \"fastcc not supported on varargs functions\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5706, __PRETTY_FUNCTION__)); |
5707 | |
5708 | // Count how many bytes are to be pushed on the stack, including the linkage |
5709 | // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes |
5710 | // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage |
5711 | // area is 32 bytes reserved space for [SP][CR][LR][TOC]. |
5712 | unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); |
5713 | unsigned NumBytes = LinkageSize; |
5714 | unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; |
5715 | |
5716 | static const MCPhysReg GPR[] = { |
5717 | PPC::X3, PPC::X4, PPC::X5, PPC::X6, |
5718 | PPC::X7, PPC::X8, PPC::X9, PPC::X10, |
5719 | }; |
5720 | static const MCPhysReg VR[] = { |
5721 | PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, |
5722 | PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 |
5723 | }; |
5724 | |
5725 | const unsigned NumGPRs = array_lengthof(GPR); |
5726 | const unsigned NumFPRs = useSoftFloat() ? 0 : 13; |
5727 | const unsigned NumVRs = array_lengthof(VR); |
5728 | |
5729 | // On ELFv2, we can avoid allocating the parameter area if all the arguments |
5730 | // can be passed to the callee in registers. |
5731 | // For the fast calling convention, there is another check below. |
5732 | // Note: We should keep consistent with LowerFormalArguments_64SVR4() |
5733 | bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall; |
5734 | if (!HasParameterArea) { |
5735 | unsigned ParamAreaSize = NumGPRs * PtrByteSize; |
5736 | unsigned AvailableFPRs = NumFPRs; |
5737 | unsigned AvailableVRs = NumVRs; |
5738 | unsigned NumBytesTmp = NumBytes; |
5739 | for (unsigned i = 0; i != NumOps; ++i) { |
5740 | if (Outs[i].Flags.isNest()) continue; |
5741 | if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags, |
5742 | PtrByteSize, LinkageSize, ParamAreaSize, |
5743 | NumBytesTmp, AvailableFPRs, AvailableVRs)) |
5744 | HasParameterArea = true; |
5745 | } |
5746 | } |
5747 | |
5748 | // When using the fast calling convention, we don't provide backing for |
5749 | // arguments that will be in registers. |
5750 | unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; |
5751 | |
5752 | // Avoid allocating parameter area for fastcc functions if all the arguments |
5753 | // can be passed in the registers. |
5754 | if (IsFastCall) |
5755 | HasParameterArea = false; |
5756 | |
5757 | // Add up all the space actually used. |
5758 | for (unsigned i = 0; i != NumOps; ++i) { |
5759 | ISD::ArgFlagsTy Flags = Outs[i].Flags; |
5760 | EVT ArgVT = Outs[i].VT; |
5761 | EVT OrigVT = Outs[i].ArgVT; |
5762 | |
5763 | if (Flags.isNest()) |
5764 | continue; |
5765 | |
5766 | if (IsFastCall) { |
5767 | if (Flags.isByVal()) { |
5768 | NumGPRsUsed += (Flags.getByValSize()+7)/8; |
5769 | if (NumGPRsUsed > NumGPRs) |
5770 | HasParameterArea = true; |
5771 | } else { |
5772 | switch (ArgVT.getSimpleVT().SimpleTy) { |
5773 | default: llvm_unreachable("Unexpected ValueType for argument!")::llvm::llvm_unreachable_internal("Unexpected ValueType for argument!" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/PowerPC/PPCISelLowering.cpp" , 5773); |
5774 | case MVT::i1: |
5775 | case MVT::i32: |
5776 | case MVT::i64: |
5777 | if (++NumGPRsUsed <= NumGPRs) |
5778 | continue; |
5779 | break; |
5780 | case MVT::v4i32: |
5781 | case MVT::v8i16: |
5782 | case MVT::v16i8: |
5783 | case MVT::v2f64: |
5784 | case MVT::v2i64: |
5785 | case MVT::v1i128: |
5786 | case MVT::f128: |
5787 | if (++NumVRsUsed <= NumVRs) |
5788 | continue; |
5789 | break; |
5790 | case MVT::v4f32: |
5791 | if (++NumVRsUsed <= NumVRs) |
5792 | continue; |
5793 | break; |
5794 | case MVT::f32: |
5795 | case MVT::f64: |
5796 | if (++NumFPRsUsed <= NumFPRs) |
5797 | continue; |
5798 | break; |
5799 | } |
5800 | HasParameterArea = true; |
5801 | } |
5802 | } |
5803 | |
5804 | /* Respect alignment of argument on the stack. */ |
5805 | auto Alignement = |
5806 | CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); |
5807 | NumBytes = alignTo(NumBytes, Alignement); |
5808 | |
5809 | NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); |
5810 | if (Flags.isInConsecutiveRegsLast()) |
5811 | NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; |
5812 | } |
5813 | |
5814 | unsigned NumBytesActuallyUsed = NumBytes; |
5815 | |
5816 | // In the old ELFv1 ABI, |
5817 | // the prolog code of the callee may store up to 8 GPR argument registers to |
5818 | // the stack, allowing va_start to index over them in memory if its varargs. |
5819 | // Because we cannot tell if this is needed on the caller side, we have to |
5820 | // conservatively assume that it is needed. As such, make sure we have at |
5821 | // least enough stack space for the caller to store the 8 GPRs. |
5822 | // In the ELFv2 ABI, we allocate the parameter area iff a callee |
5823 | // really requires memory operands, e.g. a vararg function. |
5824 | if (HasParameterArea) |
5825 | NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); |
5826 | else |
5827 | NumBytes = LinkageSize; |
5828 | |
5829 | // Tail call needs the stack to be aligned. |
5830 | if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall) |
5831 | NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); |
5832 | |
5833 | int SPDiff = 0; |
5834 | |
5835 | // Calculate by how many bytes the stack has to be adjusted in case of tail |
5836 | // call optimization. |
5837 | if (!IsSibCall) |
5838 | SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes); |
5839 | |
5840 | // To protect arguments on the stack from being clobbered in a tail call, |
5841 | // force all the loads to happen before doing any other lowering. |
5842 | if (CFlags.IsTailCall) |
5843 | Chain = DAG.getStackArgumentTokenFactor(Chain); |
5844 | |
5845 | // Adjust the stack pointer for the new arguments... |
5846 | // These operations are automatically eliminated by the prolog/epilog pass |
5847 | if (!IsSibCall) |
5848 | Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); |
5849 | SDValue CallSeqStart = Chain; |
5850 | |
5851 | // Load the return address and frame pointer so it can be move somewhere else |
5852 | // later. |
5853 | SDValue LROp, FPOp; |
5854 | Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); |
5855 | |
5856 | // Set up a copy of the stack pointer for use loading and storing any |
5857 | // arguments that may not fit in the registers available for argument |
5858 | // passing. |
5859 | SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); |
5860 | |
5861 | // Figure out which arguments are going to go in registers, and which in |
5862 | // memory. Also, if this is a vararg function, floating point operations |
5863 | // must be stored to our stack, and loaded into integer regs as well, if |
5864 | // any integer regs are available for argument passing. |
5865 | unsigned ArgOffset = LinkageSize; |
5866 | |
5867 | SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; |
5868 | SmallVector<TailCallArgumentInfo, 8> TailCallArguments; |
5869 | |
5870 | SmallVector<SDValue, 8> MemOpChains; |
5871 | for (unsigned i = 0; i != NumOps; ++i) { |
5872 | SDValue Arg = OutVals[i]; |
5873 | ISD::ArgFlagsTy Flags = Outs[i].Flags; |
5874 | EVT ArgVT = Outs[i].VT; |
5875 | EVT OrigVT = Outs[i].ArgVT; |
5876 | |
5877 | // PtrOff will be used to store the current argument to the stack if a |
5878 | // register cannot be found for it. |
5879 | SDValue PtrOff; |
5880 | |
5881 | // We re-align the argument offset for each argument, except when using the |
5882 | // fast calling convention, when we need to make sure we do that only when |
5883 | // we'll actually use a stack slot. |
5884 | auto ComputePtrOff = [&]() { |
5885 | /* Respect alignment of argument on the stack. */ |
5886 | auto Alignment = |
5887 | CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); |
5888 | ArgOffset = alignTo(ArgOffset, Alignment); |
5889 | |
5890 | PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); |
5891 | |
5892 | PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); |
5893 | }; |
5894 | |
5895 | if (!IsFastCall) { |
5896 | ComputePtrOff(); |
5897 | |
5898 | /* Compute GPR index associated with argument offset. */ |
5899 | GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; |
5900 | GPR_idx = std::min(GPR_idx, NumGPRs); |
5901 | } |
5902 | |
5903 | // Promote integers to 64-bit values. |
5904 | if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { |
5905 | // FIXME: Should this use ANY_EXTEND if neither sext nor zext? |
5906 | unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; |
5907 | Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); |
5908 |