Bug Summary

File:llvm/lib/Target/PowerPC/PPCISelLowering.cpp
Warning:line 17387, column 15
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name PPCISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/PowerPC -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/PowerPC -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/PowerPC -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/PowerPC -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
1//===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the PPCISelLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "PPCISelLowering.h"
14#include "MCTargetDesc/PPCPredicates.h"
15#include "PPC.h"
16#include "PPCCCState.h"
17#include "PPCCallingConv.h"
18#include "PPCFrameLowering.h"
19#include "PPCInstrInfo.h"
20#include "PPCMachineFunctionInfo.h"
21#include "PPCPerfectShuffle.h"
22#include "PPCRegisterInfo.h"
23#include "PPCSubtarget.h"
24#include "PPCTargetMachine.h"
25#include "llvm/ADT/APFloat.h"
26#include "llvm/ADT/APInt.h"
27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/None.h"
30#include "llvm/ADT/STLExtras.h"
31#include "llvm/ADT/SmallPtrSet.h"
32#include "llvm/ADT/SmallSet.h"
33#include "llvm/ADT/SmallVector.h"
34#include "llvm/ADT/Statistic.h"
35#include "llvm/ADT/StringRef.h"
36#include "llvm/ADT/StringSwitch.h"
37#include "llvm/CodeGen/CallingConvLower.h"
38#include "llvm/CodeGen/ISDOpcodes.h"
39#include "llvm/CodeGen/MachineBasicBlock.h"
40#include "llvm/CodeGen/MachineFrameInfo.h"
41#include "llvm/CodeGen/MachineFunction.h"
42#include "llvm/CodeGen/MachineInstr.h"
43#include "llvm/CodeGen/MachineInstrBuilder.h"
44#include "llvm/CodeGen/MachineJumpTableInfo.h"
45#include "llvm/CodeGen/MachineLoopInfo.h"
46#include "llvm/CodeGen/MachineMemOperand.h"
47#include "llvm/CodeGen/MachineModuleInfo.h"
48#include "llvm/CodeGen/MachineOperand.h"
49#include "llvm/CodeGen/MachineRegisterInfo.h"
50#include "llvm/CodeGen/RuntimeLibcalls.h"
51#include "llvm/CodeGen/SelectionDAG.h"
52#include "llvm/CodeGen/SelectionDAGNodes.h"
53#include "llvm/CodeGen/TargetInstrInfo.h"
54#include "llvm/CodeGen/TargetLowering.h"
55#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
56#include "llvm/CodeGen/TargetRegisterInfo.h"
57#include "llvm/CodeGen/ValueTypes.h"
58#include "llvm/IR/CallingConv.h"
59#include "llvm/IR/Constant.h"
60#include "llvm/IR/Constants.h"
61#include "llvm/IR/DataLayout.h"
62#include "llvm/IR/DebugLoc.h"
63#include "llvm/IR/DerivedTypes.h"
64#include "llvm/IR/Function.h"
65#include "llvm/IR/GlobalValue.h"
66#include "llvm/IR/IRBuilder.h"
67#include "llvm/IR/Instructions.h"
68#include "llvm/IR/Intrinsics.h"
69#include "llvm/IR/IntrinsicsPowerPC.h"
70#include "llvm/IR/Module.h"
71#include "llvm/IR/Type.h"
72#include "llvm/IR/Use.h"
73#include "llvm/IR/Value.h"
74#include "llvm/MC/MCContext.h"
75#include "llvm/MC/MCExpr.h"
76#include "llvm/MC/MCRegisterInfo.h"
77#include "llvm/MC/MCSectionXCOFF.h"
78#include "llvm/MC/MCSymbolXCOFF.h"
79#include "llvm/Support/AtomicOrdering.h"
80#include "llvm/Support/BranchProbability.h"
81#include "llvm/Support/Casting.h"
82#include "llvm/Support/CodeGen.h"
83#include "llvm/Support/CommandLine.h"
84#include "llvm/Support/Compiler.h"
85#include "llvm/Support/Debug.h"
86#include "llvm/Support/ErrorHandling.h"
87#include "llvm/Support/Format.h"
88#include "llvm/Support/KnownBits.h"
89#include "llvm/Support/MachineValueType.h"
90#include "llvm/Support/MathExtras.h"
91#include "llvm/Support/raw_ostream.h"
92#include "llvm/Target/TargetMachine.h"
93#include "llvm/Target/TargetOptions.h"
94#include <algorithm>
95#include <cassert>
96#include <cstdint>
97#include <iterator>
98#include <list>
99#include <utility>
100#include <vector>
101
102using namespace llvm;
103
104#define DEBUG_TYPE"ppc-lowering" "ppc-lowering"
105
106static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
107cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
108
109static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
110cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
111
112static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
113cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
114
115static cl::opt<bool> DisableSCO("disable-ppc-sco",
116cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
117
118static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
119cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
120
121static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
122cl::desc("use absolute jump tables on ppc"), cl::Hidden);
123
124static cl::opt<bool> EnableQuadwordAtomics(
125 "ppc-quadword-atomics",
126 cl::desc("enable quadword lock-free atomic operations"), cl::init(false),
127 cl::Hidden);
128
129STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"ppc-lowering", "NumTailCalls"
, "Number of tail calls"}
;
130STATISTIC(NumSiblingCalls, "Number of sibling calls")static llvm::Statistic NumSiblingCalls = {"ppc-lowering", "NumSiblingCalls"
, "Number of sibling calls"}
;
131STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM")static llvm::Statistic ShufflesHandledWithVPERM = {"ppc-lowering"
, "ShufflesHandledWithVPERM", "Number of shuffles lowered to a VPERM"
}
;
132STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed")static llvm::Statistic NumDynamicAllocaProbed = {"ppc-lowering"
, "NumDynamicAllocaProbed", "Number of dynamic stack allocation probed"
}
;
133
134static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
135
136static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
137
138static const char AIXSSPCanaryWordName[] = "__ssp_canary_word";
139
140// FIXME: Remove this once the bug has been fixed!
141extern cl::opt<bool> ANDIGlueBug;
142
143PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
144 const PPCSubtarget &STI)
145 : TargetLowering(TM), Subtarget(STI) {
146 // Initialize map that relates the PPC addressing modes to the computed flags
147 // of a load/store instruction. The map is used to determine the optimal
148 // addressing mode when selecting load and stores.
149 initializeAddrModeMap();
150 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
151 // arguments are at least 4/8 bytes aligned.
152 bool isPPC64 = Subtarget.isPPC64();
153 setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
154
155 // Set up the register classes.
156 addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
157 if (!useSoftFloat()) {
158 if (hasSPE()) {
159 addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
160 // EFPU2 APU only supports f32
161 if (!Subtarget.hasEFPU2())
162 addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
163 } else {
164 addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
165 addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
166 }
167 }
168
169 // Match BITREVERSE to customized fast code sequence in the td file.
170 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
171 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
172
173 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
174 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
175
176 // Custom lower inline assembly to check for special registers.
177 setOperationAction(ISD::INLINEASM, MVT::Other, Custom);
178 setOperationAction(ISD::INLINEASM_BR, MVT::Other, Custom);
179
180 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
181 for (MVT VT : MVT::integer_valuetypes()) {
182 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
183 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
184 }
185
186 if (Subtarget.isISA3_0()) {
187 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
188 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
189 setTruncStoreAction(MVT::f64, MVT::f16, Legal);
190 setTruncStoreAction(MVT::f32, MVT::f16, Legal);
191 } else {
192 // No extending loads from f16 or HW conversions back and forth.
193 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
194 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
195 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
196 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
197 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
198 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
199 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
200 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
201 }
202
203 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
204
205 // PowerPC has pre-inc load and store's.
206 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
207 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
208 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
209 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
210 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
211 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
212 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
213 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
214 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
215 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
216 if (!Subtarget.hasSPE()) {
217 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
218 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
219 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
220 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
221 }
222
223 // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
224 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
225 for (MVT VT : ScalarIntVTs) {
226 setOperationAction(ISD::ADDC, VT, Legal);
227 setOperationAction(ISD::ADDE, VT, Legal);
228 setOperationAction(ISD::SUBC, VT, Legal);
229 setOperationAction(ISD::SUBE, VT, Legal);
230 }
231
232 if (Subtarget.useCRBits()) {
233 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
234
235 if (isPPC64 || Subtarget.hasFPCVT()) {
236 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Promote);
237 AddPromotedToType(ISD::STRICT_SINT_TO_FP, MVT::i1,
238 isPPC64 ? MVT::i64 : MVT::i32);
239 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Promote);
240 AddPromotedToType(ISD::STRICT_UINT_TO_FP, MVT::i1,
241 isPPC64 ? MVT::i64 : MVT::i32);
242
243 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
244 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
245 isPPC64 ? MVT::i64 : MVT::i32);
246 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
247 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
248 isPPC64 ? MVT::i64 : MVT::i32);
249
250 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i1, Promote);
251 AddPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::i1,
252 isPPC64 ? MVT::i64 : MVT::i32);
253 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i1, Promote);
254 AddPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::i1,
255 isPPC64 ? MVT::i64 : MVT::i32);
256
257 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
258 AddPromotedToType(ISD::FP_TO_SINT, MVT::i1,
259 isPPC64 ? MVT::i64 : MVT::i32);
260 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
261 AddPromotedToType(ISD::FP_TO_UINT, MVT::i1,
262 isPPC64 ? MVT::i64 : MVT::i32);
263 } else {
264 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Custom);
265 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Custom);
266 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
267 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
268 }
269
270 // PowerPC does not support direct load/store of condition registers.
271 setOperationAction(ISD::LOAD, MVT::i1, Custom);
272 setOperationAction(ISD::STORE, MVT::i1, Custom);
273
274 // FIXME: Remove this once the ANDI glue bug is fixed:
275 if (ANDIGlueBug)
276 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
277
278 for (MVT VT : MVT::integer_valuetypes()) {
279 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
280 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
281 setTruncStoreAction(VT, MVT::i1, Expand);
282 }
283
284 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
285 }
286
287 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
288 // PPC (the libcall is not available).
289 setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
290 setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
291 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::ppcf128, Custom);
292 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::ppcf128, Custom);
293
294 // We do not currently implement these libm ops for PowerPC.
295 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
296 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand);
297 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
298 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand);
299 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
300 setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
301
302 // PowerPC has no SREM/UREM instructions unless we are on P9
303 // On P9 we may use a hardware instruction to compute the remainder.
304 // When the result of both the remainder and the division is required it is
305 // more efficient to compute the remainder from the result of the division
306 // rather than use the remainder instruction. The instructions are legalized
307 // directly because the DivRemPairsPass performs the transformation at the IR
308 // level.
309 if (Subtarget.isISA3_0()) {
310 setOperationAction(ISD::SREM, MVT::i32, Legal);
311 setOperationAction(ISD::UREM, MVT::i32, Legal);
312 setOperationAction(ISD::SREM, MVT::i64, Legal);
313 setOperationAction(ISD::UREM, MVT::i64, Legal);
314 } else {
315 setOperationAction(ISD::SREM, MVT::i32, Expand);
316 setOperationAction(ISD::UREM, MVT::i32, Expand);
317 setOperationAction(ISD::SREM, MVT::i64, Expand);
318 setOperationAction(ISD::UREM, MVT::i64, Expand);
319 }
320
321 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
322 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
323 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
324 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
325 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
326 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
327 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
328 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
329 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
330
331 // Handle constrained floating-point operations of scalar.
332 // TODO: Handle SPE specific operation.
333 setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
334 setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
335 setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
336 setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
337 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
338
339 setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
340 setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
341 setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
342 setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
343
344 if (!Subtarget.hasSPE()) {
345 setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
346 setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
347 }
348
349 if (Subtarget.hasVSX()) {
350 setOperationAction(ISD::STRICT_FRINT, MVT::f32, Legal);
351 setOperationAction(ISD::STRICT_FRINT, MVT::f64, Legal);
352 }
353
354 if (Subtarget.hasFSQRT()) {
355 setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
356 setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
357 }
358
359 if (Subtarget.hasFPRND()) {
360 setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal);
361 setOperationAction(ISD::STRICT_FCEIL, MVT::f32, Legal);
362 setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal);
363 setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal);
364
365 setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal);
366 setOperationAction(ISD::STRICT_FCEIL, MVT::f64, Legal);
367 setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal);
368 setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal);
369 }
370
371 // We don't support sin/cos/sqrt/fmod/pow
372 setOperationAction(ISD::FSIN , MVT::f64, Expand);
373 setOperationAction(ISD::FCOS , MVT::f64, Expand);
374 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
375 setOperationAction(ISD::FREM , MVT::f64, Expand);
376 setOperationAction(ISD::FPOW , MVT::f64, Expand);
377 setOperationAction(ISD::FSIN , MVT::f32, Expand);
378 setOperationAction(ISD::FCOS , MVT::f32, Expand);
379 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
380 setOperationAction(ISD::FREM , MVT::f32, Expand);
381 setOperationAction(ISD::FPOW , MVT::f32, Expand);
382 if (Subtarget.hasSPE()) {
383 setOperationAction(ISD::FMA , MVT::f64, Expand);
384 setOperationAction(ISD::FMA , MVT::f32, Expand);
385 } else {
386 setOperationAction(ISD::FMA , MVT::f64, Legal);
387 setOperationAction(ISD::FMA , MVT::f32, Legal);
388 }
389
390 if (Subtarget.hasSPE())
391 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
392
393 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
394
395 // If we're enabling GP optimizations, use hardware square root
396 if (!Subtarget.hasFSQRT() &&
397 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
398 Subtarget.hasFRE()))
399 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
400
401 if (!Subtarget.hasFSQRT() &&
402 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
403 Subtarget.hasFRES()))
404 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
405
406 if (Subtarget.hasFCPSGN()) {
407 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
408 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
409 } else {
410 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
411 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
412 }
413
414 if (Subtarget.hasFPRND()) {
415 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
416 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
417 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
418 setOperationAction(ISD::FROUND, MVT::f64, Legal);
419
420 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
421 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
422 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
423 setOperationAction(ISD::FROUND, MVT::f32, Legal);
424 }
425
426 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
427 // to speed up scalar BSWAP64.
428 // CTPOP or CTTZ were introduced in P8/P9 respectively
429 setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
430 if (Subtarget.hasP9Vector() && Subtarget.isPPC64())
431 setOperationAction(ISD::BSWAP, MVT::i64 , Custom);
432 else
433 setOperationAction(ISD::BSWAP, MVT::i64 , Expand);
434 if (Subtarget.isISA3_0()) {
435 setOperationAction(ISD::CTTZ , MVT::i32 , Legal);
436 setOperationAction(ISD::CTTZ , MVT::i64 , Legal);
437 } else {
438 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
439 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
440 }
441
442 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
443 setOperationAction(ISD::CTPOP, MVT::i32 , Legal);
444 setOperationAction(ISD::CTPOP, MVT::i64 , Legal);
445 } else {
446 setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
447 setOperationAction(ISD::CTPOP, MVT::i64 , Expand);
448 }
449
450 // PowerPC does not have ROTR
451 setOperationAction(ISD::ROTR, MVT::i32 , Expand);
452 setOperationAction(ISD::ROTR, MVT::i64 , Expand);
453
454 if (!Subtarget.useCRBits()) {
455 // PowerPC does not have Select
456 setOperationAction(ISD::SELECT, MVT::i32, Expand);
457 setOperationAction(ISD::SELECT, MVT::i64, Expand);
458 setOperationAction(ISD::SELECT, MVT::f32, Expand);
459 setOperationAction(ISD::SELECT, MVT::f64, Expand);
460 }
461
462 // PowerPC wants to turn select_cc of FP into fsel when possible.
463 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
464 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
465
466 // PowerPC wants to optimize integer setcc a bit
467 if (!Subtarget.useCRBits())
468 setOperationAction(ISD::SETCC, MVT::i32, Custom);
469
470 if (Subtarget.hasFPU()) {
471 setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
472 setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
473 setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal);
474
475 setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
476 setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
477 setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal);
478 }
479
480 // PowerPC does not have BRCOND which requires SetCC
481 if (!Subtarget.useCRBits())
482 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
483
484 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
485
486 if (Subtarget.hasSPE()) {
487 // SPE has built-in conversions
488 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal);
489 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal);
490 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal);
491 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
492 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
493 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
494
495 // SPE supports signaling compare of f32/f64.
496 setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
497 setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
498 } else {
499 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
500 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
501 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
502
503 // PowerPC does not have [U|S]INT_TO_FP
504 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Expand);
505 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Expand);
506 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
507 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
508 }
509
510 if (Subtarget.hasDirectMove() && isPPC64) {
511 setOperationAction(ISD::BITCAST, MVT::f32, Legal);
512 setOperationAction(ISD::BITCAST, MVT::i32, Legal);
513 setOperationAction(ISD::BITCAST, MVT::i64, Legal);
514 setOperationAction(ISD::BITCAST, MVT::f64, Legal);
515 if (TM.Options.UnsafeFPMath) {
516 setOperationAction(ISD::LRINT, MVT::f64, Legal);
517 setOperationAction(ISD::LRINT, MVT::f32, Legal);
518 setOperationAction(ISD::LLRINT, MVT::f64, Legal);
519 setOperationAction(ISD::LLRINT, MVT::f32, Legal);
520 setOperationAction(ISD::LROUND, MVT::f64, Legal);
521 setOperationAction(ISD::LROUND, MVT::f32, Legal);
522 setOperationAction(ISD::LLROUND, MVT::f64, Legal);
523 setOperationAction(ISD::LLROUND, MVT::f32, Legal);
524 }
525 } else {
526 setOperationAction(ISD::BITCAST, MVT::f32, Expand);
527 setOperationAction(ISD::BITCAST, MVT::i32, Expand);
528 setOperationAction(ISD::BITCAST, MVT::i64, Expand);
529 setOperationAction(ISD::BITCAST, MVT::f64, Expand);
530 }
531
532 // We cannot sextinreg(i1). Expand to shifts.
533 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
534
535 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
536 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
537 // support continuation, user-level threading, and etc.. As a result, no
538 // other SjLj exception interfaces are implemented and please don't build
539 // your own exception handling based on them.
540 // LLVM/Clang supports zero-cost DWARF exception handling.
541 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
542 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
543
544 // We want to legalize GlobalAddress and ConstantPool nodes into the
545 // appropriate instructions to materialize the address.
546 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
547 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
548 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
549 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
550 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
551 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
552 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
553 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
554 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
555 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
556
557 // TRAP is legal.
558 setOperationAction(ISD::TRAP, MVT::Other, Legal);
559
560 // TRAMPOLINE is custom lowered.
561 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
562 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
563
564 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
565 setOperationAction(ISD::VASTART , MVT::Other, Custom);
566
567 if (Subtarget.is64BitELFABI()) {
568 // VAARG always uses double-word chunks, so promote anything smaller.
569 setOperationAction(ISD::VAARG, MVT::i1, Promote);
570 AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
571 setOperationAction(ISD::VAARG, MVT::i8, Promote);
572 AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
573 setOperationAction(ISD::VAARG, MVT::i16, Promote);
574 AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
575 setOperationAction(ISD::VAARG, MVT::i32, Promote);
576 AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
577 setOperationAction(ISD::VAARG, MVT::Other, Expand);
578 } else if (Subtarget.is32BitELFABI()) {
579 // VAARG is custom lowered with the 32-bit SVR4 ABI.
580 setOperationAction(ISD::VAARG, MVT::Other, Custom);
581 setOperationAction(ISD::VAARG, MVT::i64, Custom);
582 } else
583 setOperationAction(ISD::VAARG, MVT::Other, Expand);
584
585 // VACOPY is custom lowered with the 32-bit SVR4 ABI.
586 if (Subtarget.is32BitELFABI())
587 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
588 else
589 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
590
591 // Use the default implementation.
592 setOperationAction(ISD::VAEND , MVT::Other, Expand);
593 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
594 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom);
595 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
596 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom);
597 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
598 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
599 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
600 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
601
602 // We want to custom lower some of our intrinsics.
603 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
604
605 // To handle counter-based loop conditions.
606 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
607
608 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
609 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
610 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
611 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
612
613 // Comparisons that require checking two conditions.
614 if (Subtarget.hasSPE()) {
615 setCondCodeAction(ISD::SETO, MVT::f32, Expand);
616 setCondCodeAction(ISD::SETO, MVT::f64, Expand);
617 setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
618 setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
619 }
620 setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
621 setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
622 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
623 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
624 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
625 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
626 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
627 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
628 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
629 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
630 setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
631 setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
632
633 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
634 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
635
636 if (Subtarget.has64BitSupport()) {
637 // They also have instructions for converting between i64 and fp.
638 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
639 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand);
640 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
641 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand);
642 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
643 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
644 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
645 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
646 // This is just the low 32 bits of a (signed) fp->i64 conversion.
647 // We cannot do this with Promote because i64 is not a legal type.
648 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
649 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
650
651 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) {
652 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
653 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
654 }
655 } else {
656 // PowerPC does not have FP_TO_UINT on 32-bit implementations.
657 if (Subtarget.hasSPE()) {
658 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal);
659 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
660 } else {
661 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Expand);
662 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
663 }
664 }
665
666 // With the instructions enabled under FPCVT, we can do everything.
667 if (Subtarget.hasFPCVT()) {
668 if (Subtarget.has64BitSupport()) {
669 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
670 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
671 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
672 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
673 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
674 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
675 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
676 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
677 }
678
679 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
680 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
681 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
682 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
683 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
684 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
685 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
686 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
687 }
688
689 if (Subtarget.use64BitRegs()) {
690 // 64-bit PowerPC implementations can support i64 types directly
691 addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
692 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
693 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
694 // 64-bit PowerPC wants to expand i128 shifts itself.
695 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
696 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
697 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
698 } else {
699 // 32-bit PowerPC wants to expand i64 shifts itself.
700 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
701 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
702 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
703 }
704
705 // PowerPC has better expansions for funnel shifts than the generic
706 // TargetLowering::expandFunnelShift.
707 if (Subtarget.has64BitSupport()) {
708 setOperationAction(ISD::FSHL, MVT::i64, Custom);
709 setOperationAction(ISD::FSHR, MVT::i64, Custom);
710 }
711 setOperationAction(ISD::FSHL, MVT::i32, Custom);
712 setOperationAction(ISD::FSHR, MVT::i32, Custom);
713
714 if (Subtarget.hasVSX()) {
715 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
716 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
717 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
718 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
719 }
720
721 if (Subtarget.hasAltivec()) {
722 for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
723 setOperationAction(ISD::SADDSAT, VT, Legal);
724 setOperationAction(ISD::SSUBSAT, VT, Legal);
725 setOperationAction(ISD::UADDSAT, VT, Legal);
726 setOperationAction(ISD::USUBSAT, VT, Legal);
727 }
728 // First set operation action for all vector types to expand. Then we
729 // will selectively turn on ones that can be effectively codegen'd.
730 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
731 // add/sub are legal for all supported vector VT's.
732 setOperationAction(ISD::ADD, VT, Legal);
733 setOperationAction(ISD::SUB, VT, Legal);
734
735 // For v2i64, these are only valid with P8Vector. This is corrected after
736 // the loop.
737 if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
738 setOperationAction(ISD::SMAX, VT, Legal);
739 setOperationAction(ISD::SMIN, VT, Legal);
740 setOperationAction(ISD::UMAX, VT, Legal);
741 setOperationAction(ISD::UMIN, VT, Legal);
742 }
743 else {
744 setOperationAction(ISD::SMAX, VT, Expand);
745 setOperationAction(ISD::SMIN, VT, Expand);
746 setOperationAction(ISD::UMAX, VT, Expand);
747 setOperationAction(ISD::UMIN, VT, Expand);
748 }
749
750 if (Subtarget.hasVSX()) {
751 setOperationAction(ISD::FMAXNUM, VT, Legal);
752 setOperationAction(ISD::FMINNUM, VT, Legal);
753 }
754
755 // Vector instructions introduced in P8
756 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
757 setOperationAction(ISD::CTPOP, VT, Legal);
758 setOperationAction(ISD::CTLZ, VT, Legal);
759 }
760 else {
761 setOperationAction(ISD::CTPOP, VT, Expand);
762 setOperationAction(ISD::CTLZ, VT, Expand);
763 }
764
765 // Vector instructions introduced in P9
766 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
767 setOperationAction(ISD::CTTZ, VT, Legal);
768 else
769 setOperationAction(ISD::CTTZ, VT, Expand);
770
771 // We promote all shuffles to v16i8.
772 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
773 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
774
775 // We promote all non-typed operations to v4i32.
776 setOperationAction(ISD::AND , VT, Promote);
777 AddPromotedToType (ISD::AND , VT, MVT::v4i32);
778 setOperationAction(ISD::OR , VT, Promote);
779 AddPromotedToType (ISD::OR , VT, MVT::v4i32);
780 setOperationAction(ISD::XOR , VT, Promote);
781 AddPromotedToType (ISD::XOR , VT, MVT::v4i32);
782 setOperationAction(ISD::LOAD , VT, Promote);
783 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32);
784 setOperationAction(ISD::SELECT, VT, Promote);
785 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
786 setOperationAction(ISD::VSELECT, VT, Legal);
787 setOperationAction(ISD::SELECT_CC, VT, Promote);
788 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
789 setOperationAction(ISD::STORE, VT, Promote);
790 AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
791
792 // No other operations are legal.
793 setOperationAction(ISD::MUL , VT, Expand);
794 setOperationAction(ISD::SDIV, VT, Expand);
795 setOperationAction(ISD::SREM, VT, Expand);
796 setOperationAction(ISD::UDIV, VT, Expand);
797 setOperationAction(ISD::UREM, VT, Expand);
798 setOperationAction(ISD::FDIV, VT, Expand);
799 setOperationAction(ISD::FREM, VT, Expand);
800 setOperationAction(ISD::FNEG, VT, Expand);
801 setOperationAction(ISD::FSQRT, VT, Expand);
802 setOperationAction(ISD::FLOG, VT, Expand);
803 setOperationAction(ISD::FLOG10, VT, Expand);
804 setOperationAction(ISD::FLOG2, VT, Expand);
805 setOperationAction(ISD::FEXP, VT, Expand);
806 setOperationAction(ISD::FEXP2, VT, Expand);
807 setOperationAction(ISD::FSIN, VT, Expand);
808 setOperationAction(ISD::FCOS, VT, Expand);
809 setOperationAction(ISD::FABS, VT, Expand);
810 setOperationAction(ISD::FFLOOR, VT, Expand);
811 setOperationAction(ISD::FCEIL, VT, Expand);
812 setOperationAction(ISD::FTRUNC, VT, Expand);
813 setOperationAction(ISD::FRINT, VT, Expand);
814 setOperationAction(ISD::FNEARBYINT, VT, Expand);
815 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
816 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
817 setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
818 setOperationAction(ISD::MULHU, VT, Expand);
819 setOperationAction(ISD::MULHS, VT, Expand);
820 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
821 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
822 setOperationAction(ISD::UDIVREM, VT, Expand);
823 setOperationAction(ISD::SDIVREM, VT, Expand);
824 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
825 setOperationAction(ISD::FPOW, VT, Expand);
826 setOperationAction(ISD::BSWAP, VT, Expand);
827 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
828 setOperationAction(ISD::ROTL, VT, Expand);
829 setOperationAction(ISD::ROTR, VT, Expand);
830
831 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
832 setTruncStoreAction(VT, InnerVT, Expand);
833 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
834 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
835 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
836 }
837 }
838 setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand);
839 if (!Subtarget.hasP8Vector()) {
840 setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
841 setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
842 setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
843 setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
844 }
845
846 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
847 // with merges, splats, etc.
848 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
849
850 // Vector truncates to sub-word integer that fit in an Altivec/VSX register
851 // are cheap, so handle them before they get expanded to scalar.
852 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
853 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
854 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
855 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
856 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
857
858 setOperationAction(ISD::AND , MVT::v4i32, Legal);
859 setOperationAction(ISD::OR , MVT::v4i32, Legal);
860 setOperationAction(ISD::XOR , MVT::v4i32, Legal);
861 setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
862 setOperationAction(ISD::SELECT, MVT::v4i32,
863 Subtarget.useCRBits() ? Legal : Expand);
864 setOperationAction(ISD::STORE , MVT::v4i32, Legal);
865 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal);
866 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal);
867 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal);
868 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal);
869 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
870 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
871 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
872 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
873 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
874 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
875 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
876 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
877
878 // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8.
879 setOperationAction(ISD::ROTL, MVT::v1i128, Custom);
880 // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
881 if (Subtarget.hasAltivec())
882 for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
883 setOperationAction(ISD::ROTL, VT, Legal);
884 // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
885 if (Subtarget.hasP8Altivec())
886 setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
887
888 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
889 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
890 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
891 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
892
893 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
894 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
895
896 if (Subtarget.hasVSX()) {
897 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
898 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
899 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
900 }
901
902 if (Subtarget.hasP8Altivec())
903 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
904 else
905 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
906
907 if (Subtarget.isISA3_1()) {
908 setOperationAction(ISD::MUL, MVT::v2i64, Legal);
909 setOperationAction(ISD::MULHS, MVT::v2i64, Legal);
910 setOperationAction(ISD::MULHU, MVT::v2i64, Legal);
911 setOperationAction(ISD::MULHS, MVT::v4i32, Legal);
912 setOperationAction(ISD::MULHU, MVT::v4i32, Legal);
913 setOperationAction(ISD::UDIV, MVT::v2i64, Legal);
914 setOperationAction(ISD::SDIV, MVT::v2i64, Legal);
915 setOperationAction(ISD::UDIV, MVT::v4i32, Legal);
916 setOperationAction(ISD::SDIV, MVT::v4i32, Legal);
917 setOperationAction(ISD::UREM, MVT::v2i64, Legal);
918 setOperationAction(ISD::SREM, MVT::v2i64, Legal);
919 setOperationAction(ISD::UREM, MVT::v4i32, Legal);
920 setOperationAction(ISD::SREM, MVT::v4i32, Legal);
921 setOperationAction(ISD::UREM, MVT::v1i128, Legal);
922 setOperationAction(ISD::SREM, MVT::v1i128, Legal);
923 setOperationAction(ISD::UDIV, MVT::v1i128, Legal);
924 setOperationAction(ISD::SDIV, MVT::v1i128, Legal);
925 setOperationAction(ISD::ROTL, MVT::v1i128, Legal);
926 }
927
928 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
929 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
930
931 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
932 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
933
934 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
935 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
936 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
937 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
938
939 // Altivec does not contain unordered floating-point compare instructions
940 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
941 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
942 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand);
943 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
944
945 if (Subtarget.hasVSX()) {
946 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
947 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
948 if (Subtarget.hasP8Vector()) {
949 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
950 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
951 }
952 if (Subtarget.hasDirectMove() && isPPC64) {
953 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
954 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
955 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
956 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
957 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
958 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
959 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
960 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
961 }
962 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
963
964 // The nearbyint variants are not allowed to raise the inexact exception
965 // so we can only code-gen them with unsafe math.
966 if (TM.Options.UnsafeFPMath) {
967 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
968 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
969 }
970
971 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
972 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
973 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
974 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
975 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
976 setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
977 setOperationAction(ISD::FROUND, MVT::f64, Legal);
978 setOperationAction(ISD::FRINT, MVT::f64, Legal);
979
980 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
981 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
982 setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
983 setOperationAction(ISD::FROUND, MVT::f32, Legal);
984 setOperationAction(ISD::FRINT, MVT::f32, Legal);
985
986 setOperationAction(ISD::MUL, MVT::v2f64, Legal);
987 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
988
989 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
990 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
991
992 // Share the Altivec comparison restrictions.
993 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
994 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
995 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand);
996 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
997
998 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
999 setOperationAction(ISD::STORE, MVT::v2f64, Legal);
1000
1001 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
1002
1003 if (Subtarget.hasP8Vector())
1004 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
1005
1006 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
1007
1008 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
1009 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
1010 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
1011
1012 if (Subtarget.hasP8Altivec()) {
1013 setOperationAction(ISD::SHL, MVT::v2i64, Legal);
1014 setOperationAction(ISD::SRA, MVT::v2i64, Legal);
1015 setOperationAction(ISD::SRL, MVT::v2i64, Legal);
1016
1017 // 128 bit shifts can be accomplished via 3 instructions for SHL and
1018 // SRL, but not for SRA because of the instructions available:
1019 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
1020 // doing
1021 setOperationAction(ISD::SHL, MVT::v1i128, Expand);
1022 setOperationAction(ISD::SRL, MVT::v1i128, Expand);
1023 setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1024
1025 setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
1026 }
1027 else {
1028 setOperationAction(ISD::SHL, MVT::v2i64, Expand);
1029 setOperationAction(ISD::SRA, MVT::v2i64, Expand);
1030 setOperationAction(ISD::SRL, MVT::v2i64, Expand);
1031
1032 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
1033
1034 // VSX v2i64 only supports non-arithmetic operations.
1035 setOperationAction(ISD::ADD, MVT::v2i64, Expand);
1036 setOperationAction(ISD::SUB, MVT::v2i64, Expand);
1037 }
1038
1039 if (Subtarget.isISA3_1())
1040 setOperationAction(ISD::SETCC, MVT::v1i128, Legal);
1041 else
1042 setOperationAction(ISD::SETCC, MVT::v1i128, Expand);
1043
1044 setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
1045 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
1046 setOperationAction(ISD::STORE, MVT::v2i64, Promote);
1047 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
1048
1049 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
1050
1051 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal);
1052 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal);
1053 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal);
1054 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal);
1055 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
1056 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
1057 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
1058 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
1059
1060 // Custom handling for partial vectors of integers converted to
1061 // floating point. We already have optimal handling for v2i32 through
1062 // the DAG combine, so those aren't necessary.
1063 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i8, Custom);
1064 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i8, Custom);
1065 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i16, Custom);
1066 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i16, Custom);
1067 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i8, Custom);
1068 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i8, Custom);
1069 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i16, Custom);
1070 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i16, Custom);
1071 setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
1072 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1073 setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
1074 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1075 setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
1076 setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
1077 setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
1078 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
1079
1080 setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
1081 setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
1082 setOperationAction(ISD::FABS, MVT::v4f32, Legal);
1083 setOperationAction(ISD::FABS, MVT::v2f64, Legal);
1084 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
1085 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
1086
1087 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1088 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1089
1090 // Handle constrained floating-point operations of vector.
1091 // The predictor is `hasVSX` because altivec instruction has
1092 // no exception but VSX vector instruction has.
1093 setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
1094 setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
1095 setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
1096 setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
1097 setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
1098 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
1099 setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal);
1100 setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal);
1101 setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal);
1102 setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
1103 setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal);
1104 setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
1105 setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
1106
1107 setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
1108 setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
1109 setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
1110 setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
1111 setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
1112 setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
1113 setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal);
1114 setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal);
1115 setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal);
1116 setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
1117 setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal);
1118 setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
1119 setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
1120
1121 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
1122 addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1123
1124 for (MVT FPT : MVT::fp_valuetypes())
1125 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1126
1127 // Expand the SELECT to SELECT_CC
1128 setOperationAction(ISD::SELECT, MVT::f128, Expand);
1129
1130 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1131 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1132
1133 // No implementation for these ops for PowerPC.
1134 setOperationAction(ISD::FSIN, MVT::f128, Expand);
1135 setOperationAction(ISD::FCOS, MVT::f128, Expand);
1136 setOperationAction(ISD::FPOW, MVT::f128, Expand);
1137 setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1138 setOperationAction(ISD::FREM, MVT::f128, Expand);
1139 }
1140
1141 if (Subtarget.hasP8Altivec()) {
1142 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
1143 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
1144 }
1145
1146 if (Subtarget.hasP9Vector()) {
1147 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1148 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1149
1150 // 128 bit shifts can be accomplished via 3 instructions for SHL and
1151 // SRL, but not for SRA because of the instructions available:
1152 // VS{RL} and VS{RL}O.
1153 setOperationAction(ISD::SHL, MVT::v1i128, Legal);
1154 setOperationAction(ISD::SRL, MVT::v1i128, Legal);
1155 setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1156
1157 setOperationAction(ISD::FADD, MVT::f128, Legal);
1158 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1159 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1160 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1161 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1162
1163 setOperationAction(ISD::FMA, MVT::f128, Legal);
1164 setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
1165 setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
1166 setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
1167 setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
1168 setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
1169 setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
1170
1171 setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
1172 setOperationAction(ISD::FRINT, MVT::f128, Legal);
1173 setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
1174 setOperationAction(ISD::FCEIL, MVT::f128, Legal);
1175 setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
1176 setOperationAction(ISD::FROUND, MVT::f128, Legal);
1177
1178 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1179 setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
1180 setOperationAction(ISD::BITCAST, MVT::i128, Custom);
1181
1182 // Handle constrained floating-point operations of fp128
1183 setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal);
1184 setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal);
1185 setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal);
1186 setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal);
1187 setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal);
1188 setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal);
1189 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal);
1190 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
1191 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
1192 setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal);
1193 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal);
1194 setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal);
1195 setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal);
1196 setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal);
1197 setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal);
1198 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1199 setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
1200 setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
1201 setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
1202 setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
1203 } else if (Subtarget.hasVSX()) {
1204 setOperationAction(ISD::LOAD, MVT::f128, Promote);
1205 setOperationAction(ISD::STORE, MVT::f128, Promote);
1206
1207 AddPromotedToType(ISD::LOAD, MVT::f128, MVT::v4i32);
1208 AddPromotedToType(ISD::STORE, MVT::f128, MVT::v4i32);
1209
1210 // Set FADD/FSUB as libcall to avoid the legalizer to expand the
1211 // fp_to_uint and int_to_fp.
1212 setOperationAction(ISD::FADD, MVT::f128, LibCall);
1213 setOperationAction(ISD::FSUB, MVT::f128, LibCall);
1214
1215 setOperationAction(ISD::FMUL, MVT::f128, Expand);
1216 setOperationAction(ISD::FDIV, MVT::f128, Expand);
1217 setOperationAction(ISD::FNEG, MVT::f128, Expand);
1218 setOperationAction(ISD::FABS, MVT::f128, Expand);
1219 setOperationAction(ISD::FSQRT, MVT::f128, Expand);
1220 setOperationAction(ISD::FMA, MVT::f128, Expand);
1221 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
1222
1223 // Expand the fp_extend if the target type is fp128.
1224 setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand);
1225 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Expand);
1226
1227 // Expand the fp_round if the source type is fp128.
1228 for (MVT VT : {MVT::f32, MVT::f64}) {
1229 setOperationAction(ISD::FP_ROUND, VT, Custom);
1230 setOperationAction(ISD::STRICT_FP_ROUND, VT, Custom);
1231 }
1232
1233 setOperationAction(ISD::SETCC, MVT::f128, Custom);
1234 setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom);
1235 setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom);
1236 setOperationAction(ISD::BR_CC, MVT::f128, Expand);
1237
1238 // Lower following f128 select_cc pattern:
1239 // select_cc x, y, tv, fv, cc -> select_cc (setcc x, y, cc), 0, tv, fv, NE
1240 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
1241
1242 // We need to handle f128 SELECT_CC with integer result type.
1243 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1244 setOperationAction(ISD::SELECT_CC, MVT::i64, isPPC64 ? Custom : Expand);
1245 }
1246
1247 if (Subtarget.hasP9Altivec()) {
1248 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1249 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1250
1251 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal);
1252 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
1253 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
1254 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal);
1255 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1256 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1257 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
1258 }
1259
1260 if (Subtarget.isISA3_1())
1261 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1262 }
1263
1264 if (Subtarget.pairedVectorMemops()) {
1265 addRegisterClass(MVT::v256i1, &PPC::VSRpRCRegClass);
1266 setOperationAction(ISD::LOAD, MVT::v256i1, Custom);
1267 setOperationAction(ISD::STORE, MVT::v256i1, Custom);
1268 }
1269 if (Subtarget.hasMMA()) {
1270 addRegisterClass(MVT::v512i1, &PPC::UACCRCRegClass);
1271 setOperationAction(ISD::LOAD, MVT::v512i1, Custom);
1272 setOperationAction(ISD::STORE, MVT::v512i1, Custom);
1273 setOperationAction(ISD::BUILD_VECTOR, MVT::v512i1, Custom);
1274 }
1275
1276 if (Subtarget.has64BitSupport())
1277 setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1278
1279 if (Subtarget.isISA3_1())
1280 setOperationAction(ISD::SRA, MVT::v1i128, Legal);
1281
1282 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1283
1284 if (!isPPC64) {
1285 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand);
1286 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1287 }
1288
1289 if (EnableQuadwordAtomics && Subtarget.hasQuadwordAtomics()) {
1290 setMaxAtomicSizeInBitsSupported(128);
1291 setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom);
1292 setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom);
1293 setOperationAction(ISD::INTRINSIC_VOID, MVT::i128, Custom);
1294 }
1295
1296 setBooleanContents(ZeroOrOneBooleanContent);
1297
1298 if (Subtarget.hasAltivec()) {
1299 // Altivec instructions set fields to all zeros or all ones.
1300 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1301 }
1302
1303 if (!isPPC64) {
1304 // These libcalls are not available in 32-bit.
1305 setLibcallName(RTLIB::SHL_I128, nullptr);
1306 setLibcallName(RTLIB::SRL_I128, nullptr);
1307 setLibcallName(RTLIB::SRA_I128, nullptr);
1308 setLibcallName(RTLIB::MULO_I64, nullptr);
1309 }
1310
1311 if (!isPPC64)
1312 setMaxAtomicSizeInBitsSupported(32);
1313
1314 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1315
1316 // We have target-specific dag combine patterns for the following nodes:
1317 setTargetDAGCombine(ISD::ADD);
1318 setTargetDAGCombine(ISD::SHL);
1319 setTargetDAGCombine(ISD::SRA);
1320 setTargetDAGCombine(ISD::SRL);
1321 setTargetDAGCombine(ISD::MUL);
1322 setTargetDAGCombine(ISD::FMA);
1323 setTargetDAGCombine(ISD::SINT_TO_FP);
1324 setTargetDAGCombine(ISD::BUILD_VECTOR);
1325 if (Subtarget.hasFPCVT())
1326 setTargetDAGCombine(ISD::UINT_TO_FP);
1327 setTargetDAGCombine(ISD::LOAD);
1328 setTargetDAGCombine(ISD::STORE);
1329 setTargetDAGCombine(ISD::BR_CC);
1330 if (Subtarget.useCRBits())
1331 setTargetDAGCombine(ISD::BRCOND);
1332 setTargetDAGCombine(ISD::BSWAP);
1333 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1334 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1335 setTargetDAGCombine(ISD::INTRINSIC_VOID);
1336
1337 setTargetDAGCombine(ISD::SIGN_EXTEND);
1338 setTargetDAGCombine(ISD::ZERO_EXTEND);
1339 setTargetDAGCombine(ISD::ANY_EXTEND);
1340
1341 setTargetDAGCombine(ISD::TRUNCATE);
1342 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1343
1344
1345 if (Subtarget.useCRBits()) {
1346 setTargetDAGCombine(ISD::TRUNCATE);
1347 setTargetDAGCombine(ISD::SETCC);
1348 setTargetDAGCombine(ISD::SELECT_CC);
1349 }
1350
1351 if (Subtarget.hasP9Altivec()) {
1352 setTargetDAGCombine(ISD::ABS);
1353 setTargetDAGCombine(ISD::VSELECT);
1354 }
1355
1356 setLibcallName(RTLIB::LOG_F128, "logf128");
1357 setLibcallName(RTLIB::LOG2_F128, "log2f128");
1358 setLibcallName(RTLIB::LOG10_F128, "log10f128");
1359 setLibcallName(RTLIB::EXP_F128, "expf128");
1360 setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1361 setLibcallName(RTLIB::SIN_F128, "sinf128");
1362 setLibcallName(RTLIB::COS_F128, "cosf128");
1363 setLibcallName(RTLIB::POW_F128, "powf128");
1364 setLibcallName(RTLIB::FMIN_F128, "fminf128");
1365 setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1366 setLibcallName(RTLIB::REM_F128, "fmodf128");
1367 setLibcallName(RTLIB::SQRT_F128, "sqrtf128");
1368 setLibcallName(RTLIB::CEIL_F128, "ceilf128");
1369 setLibcallName(RTLIB::FLOOR_F128, "floorf128");
1370 setLibcallName(RTLIB::TRUNC_F128, "truncf128");
1371 setLibcallName(RTLIB::ROUND_F128, "roundf128");
1372 setLibcallName(RTLIB::LROUND_F128, "lroundf128");
1373 setLibcallName(RTLIB::LLROUND_F128, "llroundf128");
1374 setLibcallName(RTLIB::RINT_F128, "rintf128");
1375 setLibcallName(RTLIB::LRINT_F128, "lrintf128");
1376 setLibcallName(RTLIB::LLRINT_F128, "llrintf128");
1377 setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintf128");
1378 setLibcallName(RTLIB::FMA_F128, "fmaf128");
1379
1380 // With 32 condition bits, we don't need to sink (and duplicate) compares
1381 // aggressively in CodeGenPrep.
1382 if (Subtarget.useCRBits()) {
1383 setHasMultipleConditionRegisters();
1384 setJumpIsExpensive();
1385 }
1386
1387 setMinFunctionAlignment(Align(4));
1388
1389 switch (Subtarget.getCPUDirective()) {
1390 default: break;
1391 case PPC::DIR_970:
1392 case PPC::DIR_A2:
1393 case PPC::DIR_E500:
1394 case PPC::DIR_E500mc:
1395 case PPC::DIR_E5500:
1396 case PPC::DIR_PWR4:
1397 case PPC::DIR_PWR5:
1398 case PPC::DIR_PWR5X:
1399 case PPC::DIR_PWR6:
1400 case PPC::DIR_PWR6X:
1401 case PPC::DIR_PWR7:
1402 case PPC::DIR_PWR8:
1403 case PPC::DIR_PWR9:
1404 case PPC::DIR_PWR10:
1405 case PPC::DIR_PWR_FUTURE:
1406 setPrefLoopAlignment(Align(16));
1407 setPrefFunctionAlignment(Align(16));
1408 break;
1409 }
1410
1411 if (Subtarget.enableMachineScheduler())
1412 setSchedulingPreference(Sched::Source);
1413 else
1414 setSchedulingPreference(Sched::Hybrid);
1415
1416 computeRegisterProperties(STI.getRegisterInfo());
1417
1418 // The Freescale cores do better with aggressive inlining of memcpy and
1419 // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1420 if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1421 Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1422 MaxStoresPerMemset = 32;
1423 MaxStoresPerMemsetOptSize = 16;
1424 MaxStoresPerMemcpy = 32;
1425 MaxStoresPerMemcpyOptSize = 8;
1426 MaxStoresPerMemmove = 32;
1427 MaxStoresPerMemmoveOptSize = 8;
1428 } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1429 // The A2 also benefits from (very) aggressive inlining of memcpy and
1430 // friends. The overhead of a the function call, even when warm, can be
1431 // over one hundred cycles.
1432 MaxStoresPerMemset = 128;
1433 MaxStoresPerMemcpy = 128;
1434 MaxStoresPerMemmove = 128;
1435 MaxLoadsPerMemcmp = 128;
1436 } else {
1437 MaxLoadsPerMemcmp = 8;
1438 MaxLoadsPerMemcmpOptSize = 4;
1439 }
1440
1441 IsStrictFPEnabled = true;
1442
1443 // Let the subtarget (CPU) decide if a predictable select is more expensive
1444 // than the corresponding branch. This information is used in CGP to decide
1445 // when to convert selects into branches.
1446 PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive();
1447}
1448
1449// *********************************** NOTE ************************************
1450// For selecting load and store instructions, the addressing modes are defined
1451// as ComplexPatterns in PPCInstrInfo.td, which are then utilized in the TD
1452// patterns to match the load the store instructions.
1453//
1454// The TD definitions for the addressing modes correspond to their respective
1455// Select<AddrMode>Form() function in PPCISelDAGToDAG.cpp. These functions rely
1456// on SelectOptimalAddrMode(), which calls computeMOFlags() to compute the
1457// address mode flags of a particular node. Afterwards, the computed address
1458// flags are passed into getAddrModeForFlags() in order to retrieve the optimal
1459// addressing mode. SelectOptimalAddrMode() then sets the Base and Displacement
1460// accordingly, based on the preferred addressing mode.
1461//
1462// Within PPCISelLowering.h, there are two enums: MemOpFlags and AddrMode.
1463// MemOpFlags contains all the possible flags that can be used to compute the
1464// optimal addressing mode for load and store instructions.
1465// AddrMode contains all the possible load and store addressing modes available
1466// on Power (such as DForm, DSForm, DQForm, XForm, etc.)
1467//
1468// When adding new load and store instructions, it is possible that new address
1469// flags may need to be added into MemOpFlags, and a new addressing mode will
1470// need to be added to AddrMode. An entry of the new addressing mode (consisting
1471// of the minimal and main distinguishing address flags for the new load/store
1472// instructions) will need to be added into initializeAddrModeMap() below.
1473// Finally, when adding new addressing modes, the getAddrModeForFlags() will
1474// need to be updated to account for selecting the optimal addressing mode.
1475// *****************************************************************************
1476/// Initialize the map that relates the different addressing modes of the load
1477/// and store instructions to a set of flags. This ensures the load/store
1478/// instruction is correctly matched during instruction selection.
1479void PPCTargetLowering::initializeAddrModeMap() {
1480 AddrModesMap[PPC::AM_DForm] = {
1481 // LWZ, STW
1482 PPC::MOF_ZExt | PPC::MOF_RPlusSImm16 | PPC::MOF_WordInt,
1483 PPC::MOF_ZExt | PPC::MOF_RPlusLo | PPC::MOF_WordInt,
1484 PPC::MOF_ZExt | PPC::MOF_NotAddNorCst | PPC::MOF_WordInt,
1485 PPC::MOF_ZExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_WordInt,
1486 // LBZ, LHZ, STB, STH
1487 PPC::MOF_ZExt | PPC::MOF_RPlusSImm16 | PPC::MOF_SubWordInt,
1488 PPC::MOF_ZExt | PPC::MOF_RPlusLo | PPC::MOF_SubWordInt,
1489 PPC::MOF_ZExt | PPC::MOF_NotAddNorCst | PPC::MOF_SubWordInt,
1490 PPC::MOF_ZExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_SubWordInt,
1491 // LHA
1492 PPC::MOF_SExt | PPC::MOF_RPlusSImm16 | PPC::MOF_SubWordInt,
1493 PPC::MOF_SExt | PPC::MOF_RPlusLo | PPC::MOF_SubWordInt,
1494 PPC::MOF_SExt | PPC::MOF_NotAddNorCst | PPC::MOF_SubWordInt,
1495 PPC::MOF_SExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_SubWordInt,
1496 // LFS, LFD, STFS, STFD
1497 PPC::MOF_RPlusSImm16 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9,
1498 PPC::MOF_RPlusLo | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9,
1499 PPC::MOF_NotAddNorCst | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9,
1500 PPC::MOF_AddrIsSImm32 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9,
1501 };
1502 AddrModesMap[PPC::AM_DSForm] = {
1503 // LWA
1504 PPC::MOF_SExt | PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_WordInt,
1505 PPC::MOF_SExt | PPC::MOF_NotAddNorCst | PPC::MOF_WordInt,
1506 PPC::MOF_SExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_WordInt,
1507 // LD, STD
1508 PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_DoubleWordInt,
1509 PPC::MOF_NotAddNorCst | PPC::MOF_DoubleWordInt,
1510 PPC::MOF_AddrIsSImm32 | PPC::MOF_DoubleWordInt,
1511 // DFLOADf32, DFLOADf64, DSTOREf32, DSTOREf64
1512 PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9,
1513 PPC::MOF_NotAddNorCst | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9,
1514 PPC::MOF_AddrIsSImm32 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9,
1515 };
1516 AddrModesMap[PPC::AM_DQForm] = {
1517 // LXV, STXV
1518 PPC::MOF_RPlusSImm16Mult16 | PPC::MOF_Vector | PPC::MOF_SubtargetP9,
1519 PPC::MOF_NotAddNorCst | PPC::MOF_Vector | PPC::MOF_SubtargetP9,
1520 PPC::MOF_AddrIsSImm32 | PPC::MOF_Vector | PPC::MOF_SubtargetP9,
1521 PPC::MOF_RPlusSImm16Mult16 | PPC::MOF_Vector256 | PPC::MOF_SubtargetP10,
1522 PPC::MOF_NotAddNorCst | PPC::MOF_Vector256 | PPC::MOF_SubtargetP10,
1523 PPC::MOF_AddrIsSImm32 | PPC::MOF_Vector256 | PPC::MOF_SubtargetP10,
1524 };
1525 // TODO: Add mapping for quadword load/store.
1526}
1527
1528/// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1529/// the desired ByVal argument alignment.
1530static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) {
1531 if (MaxAlign == MaxMaxAlign)
1532 return;
1533 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1534 if (MaxMaxAlign >= 32 &&
1535 VTy->getPrimitiveSizeInBits().getFixedSize() >= 256)
1536 MaxAlign = Align(32);
1537 else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 &&
1538 MaxAlign < 16)
1539 MaxAlign = Align(16);
1540 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1541 Align EltAlign;
1542 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1543 if (EltAlign > MaxAlign)
1544 MaxAlign = EltAlign;
1545 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1546 for (auto *EltTy : STy->elements()) {
1547 Align EltAlign;
1548 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1549 if (EltAlign > MaxAlign)
1550 MaxAlign = EltAlign;
1551 if (MaxAlign == MaxMaxAlign)
1552 break;
1553 }
1554 }
1555}
1556
1557/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1558/// function arguments in the caller parameter area.
1559unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1560 const DataLayout &DL) const {
1561 // 16byte and wider vectors are passed on 16byte boundary.
1562 // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1563 Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4);
1564 if (Subtarget.hasAltivec())
1565 getMaxByValAlign(Ty, Alignment, Align(16));
1566 return Alignment.value();
1567}
1568
1569bool PPCTargetLowering::useSoftFloat() const {
1570 return Subtarget.useSoftFloat();
1571}
1572
1573bool PPCTargetLowering::hasSPE() const {
1574 return Subtarget.hasSPE();
1575}
1576
1577bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1578 return VT.isScalarInteger();
1579}
1580
1581const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1582 switch ((PPCISD::NodeType)Opcode) {
1583 case PPCISD::FIRST_NUMBER: break;
1584 case PPCISD::FSEL: return "PPCISD::FSEL";
1585 case PPCISD::XSMAXCDP: return "PPCISD::XSMAXCDP";
1586 case PPCISD::XSMINCDP: return "PPCISD::XSMINCDP";
1587 case PPCISD::FCFID: return "PPCISD::FCFID";
1588 case PPCISD::FCFIDU: return "PPCISD::FCFIDU";
1589 case PPCISD::FCFIDS: return "PPCISD::FCFIDS";
1590 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS";
1591 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
1592 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
1593 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ";
1594 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ";
1595 case PPCISD::FP_TO_UINT_IN_VSR:
1596 return "PPCISD::FP_TO_UINT_IN_VSR,";
1597 case PPCISD::FP_TO_SINT_IN_VSR:
1598 return "PPCISD::FP_TO_SINT_IN_VSR";
1599 case PPCISD::FRE: return "PPCISD::FRE";
1600 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE";
1601 case PPCISD::FTSQRT:
1602 return "PPCISD::FTSQRT";
1603 case PPCISD::FSQRT:
1604 return "PPCISD::FSQRT";
1605 case PPCISD::STFIWX: return "PPCISD::STFIWX";
1606 case PPCISD::VPERM: return "PPCISD::VPERM";
1607 case PPCISD::XXSPLT: return "PPCISD::XXSPLT";
1608 case PPCISD::XXSPLTI_SP_TO_DP:
1609 return "PPCISD::XXSPLTI_SP_TO_DP";
1610 case PPCISD::XXSPLTI32DX:
1611 return "PPCISD::XXSPLTI32DX";
1612 case PPCISD::VECINSERT: return "PPCISD::VECINSERT";
1613 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI";
1614 case PPCISD::VECSHL: return "PPCISD::VECSHL";
1615 case PPCISD::CMPB: return "PPCISD::CMPB";
1616 case PPCISD::Hi: return "PPCISD::Hi";
1617 case PPCISD::Lo: return "PPCISD::Lo";
1618 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY";
1619 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1620 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1621 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC";
1622 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET";
1623 case PPCISD::PROBED_ALLOCA: return "PPCISD::PROBED_ALLOCA";
1624 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
1625 case PPCISD::SRL: return "PPCISD::SRL";
1626 case PPCISD::SRA: return "PPCISD::SRA";
1627 case PPCISD::SHL: return "PPCISD::SHL";
1628 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE";
1629 case PPCISD::CALL: return "PPCISD::CALL";
1630 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP";
1631 case PPCISD::CALL_NOTOC: return "PPCISD::CALL_NOTOC";
1632 case PPCISD::MTCTR: return "PPCISD::MTCTR";
1633 case PPCISD::BCTRL: return "PPCISD::BCTRL";
1634 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC";
1635 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
1636 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE";
1637 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP";
1638 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1639 case PPCISD::MFOCRF: return "PPCISD::MFOCRF";
1640 case PPCISD::MFVSR: return "PPCISD::MFVSR";
1641 case PPCISD::MTVSRA: return "PPCISD::MTVSRA";
1642 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ";
1643 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP";
1644 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP";
1645 case PPCISD::SCALAR_TO_VECTOR_PERMUTED:
1646 return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1647 case PPCISD::ANDI_rec_1_EQ_BIT:
1648 return "PPCISD::ANDI_rec_1_EQ_BIT";
1649 case PPCISD::ANDI_rec_1_GT_BIT:
1650 return "PPCISD::ANDI_rec_1_GT_BIT";
1651 case PPCISD::VCMP: return "PPCISD::VCMP";
1652 case PPCISD::VCMP_rec: return "PPCISD::VCMP_rec";
1653 case PPCISD::LBRX: return "PPCISD::LBRX";
1654 case PPCISD::STBRX: return "PPCISD::STBRX";
1655 case PPCISD::LFIWAX: return "PPCISD::LFIWAX";
1656 case PPCISD::LFIWZX: return "PPCISD::LFIWZX";
1657 case PPCISD::LXSIZX: return "PPCISD::LXSIZX";
1658 case PPCISD::STXSIX: return "PPCISD::STXSIX";
1659 case PPCISD::VEXTS: return "PPCISD::VEXTS";
1660 case PPCISD::LXVD2X: return "PPCISD::LXVD2X";
1661 case PPCISD::STXVD2X: return "PPCISD::STXVD2X";
1662 case PPCISD::LOAD_VEC_BE: return "PPCISD::LOAD_VEC_BE";
1663 case PPCISD::STORE_VEC_BE: return "PPCISD::STORE_VEC_BE";
1664 case PPCISD::ST_VSR_SCAL_INT:
1665 return "PPCISD::ST_VSR_SCAL_INT";
1666 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
1667 case PPCISD::BDNZ: return "PPCISD::BDNZ";
1668 case PPCISD::BDZ: return "PPCISD::BDZ";
1669 case PPCISD::MFFS: return "PPCISD::MFFS";
1670 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ";
1671 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN";
1672 case PPCISD::CR6SET: return "PPCISD::CR6SET";
1673 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET";
1674 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT";
1675 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT";
1676 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1677 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L";
1678 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS";
1679 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA";
1680 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L";
1681 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR";
1682 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1683 case PPCISD::TLSGD_AIX: return "PPCISD::TLSGD_AIX";
1684 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA";
1685 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L";
1686 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR";
1687 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1688 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1689 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L";
1690 case PPCISD::PADDI_DTPREL:
1691 return "PPCISD::PADDI_DTPREL";
1692 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT";
1693 case PPCISD::SC: return "PPCISD::SC";
1694 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB";
1695 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE";
1696 case PPCISD::RFEBB: return "PPCISD::RFEBB";
1697 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD";
1698 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN";
1699 case PPCISD::VABSD: return "PPCISD::VABSD";
1700 case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128";
1701 case PPCISD::BUILD_SPE64: return "PPCISD::BUILD_SPE64";
1702 case PPCISD::EXTRACT_SPE: return "PPCISD::EXTRACT_SPE";
1703 case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI";
1704 case PPCISD::LD_VSX_LH: return "PPCISD::LD_VSX_LH";
1705 case PPCISD::FP_EXTEND_HALF: return "PPCISD::FP_EXTEND_HALF";
1706 case PPCISD::MAT_PCREL_ADDR: return "PPCISD::MAT_PCREL_ADDR";
1707 case PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR:
1708 return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR";
1709 case PPCISD::TLS_LOCAL_EXEC_MAT_ADDR:
1710 return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR";
1711 case PPCISD::ACC_BUILD: return "PPCISD::ACC_BUILD";
1712 case PPCISD::PAIR_BUILD: return "PPCISD::PAIR_BUILD";
1713 case PPCISD::EXTRACT_VSX_REG: return "PPCISD::EXTRACT_VSX_REG";
1714 case PPCISD::XXMFACC: return "PPCISD::XXMFACC";
1715 case PPCISD::LD_SPLAT: return "PPCISD::LD_SPLAT";
1716 case PPCISD::FNMSUB: return "PPCISD::FNMSUB";
1717 case PPCISD::STRICT_FADDRTZ:
1718 return "PPCISD::STRICT_FADDRTZ";
1719 case PPCISD::STRICT_FCTIDZ:
1720 return "PPCISD::STRICT_FCTIDZ";
1721 case PPCISD::STRICT_FCTIWZ:
1722 return "PPCISD::STRICT_FCTIWZ";
1723 case PPCISD::STRICT_FCTIDUZ:
1724 return "PPCISD::STRICT_FCTIDUZ";
1725 case PPCISD::STRICT_FCTIWUZ:
1726 return "PPCISD::STRICT_FCTIWUZ";
1727 case PPCISD::STRICT_FCFID:
1728 return "PPCISD::STRICT_FCFID";
1729 case PPCISD::STRICT_FCFIDU:
1730 return "PPCISD::STRICT_FCFIDU";
1731 case PPCISD::STRICT_FCFIDS:
1732 return "PPCISD::STRICT_FCFIDS";
1733 case PPCISD::STRICT_FCFIDUS:
1734 return "PPCISD::STRICT_FCFIDUS";
1735 case PPCISD::LXVRZX: return "PPCISD::LXVRZX";
1736 }
1737 return nullptr;
1738}
1739
1740EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1741 EVT VT) const {
1742 if (!VT.isVector())
1743 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1744
1745 return VT.changeVectorElementTypeToInteger();
1746}
1747
1748bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1749 assert(VT.isFloatingPoint() && "Non-floating-point FMA?")(static_cast<void> (0));
1750 return true;
1751}
1752
1753//===----------------------------------------------------------------------===//
1754// Node matching predicates, for use by the tblgen matching code.
1755//===----------------------------------------------------------------------===//
1756
1757/// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1758static bool isFloatingPointZero(SDValue Op) {
1759 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1760 return CFP->getValueAPF().isZero();
1761 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1762 // Maybe this has already been legalized into the constant pool?
1763 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1764 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1765 return CFP->getValueAPF().isZero();
1766 }
1767 return false;
1768}
1769
1770/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return
1771/// true if Op is undef or if it matches the specified value.
1772static bool isConstantOrUndef(int Op, int Val) {
1773 return Op < 0 || Op == Val;
1774}
1775
1776/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1777/// VPKUHUM instruction.
1778/// The ShuffleKind distinguishes between big-endian operations with
1779/// two different inputs (0), either-endian operations with two identical
1780/// inputs (1), and little-endian operations with two different inputs (2).
1781/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1782bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1783 SelectionDAG &DAG) {
1784 bool IsLE = DAG.getDataLayout().isLittleEndian();
1785 if (ShuffleKind == 0) {
1786 if (IsLE)
1787 return false;
1788 for (unsigned i = 0; i != 16; ++i)
1789 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1790 return false;
1791 } else if (ShuffleKind == 2) {
1792 if (!IsLE)
1793 return false;
1794 for (unsigned i = 0; i != 16; ++i)
1795 if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1796 return false;
1797 } else if (ShuffleKind == 1) {
1798 unsigned j = IsLE ? 0 : 1;
1799 for (unsigned i = 0; i != 8; ++i)
1800 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) ||
1801 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j))
1802 return false;
1803 }
1804 return true;
1805}
1806
1807/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1808/// VPKUWUM instruction.
1809/// The ShuffleKind distinguishes between big-endian operations with
1810/// two different inputs (0), either-endian operations with two identical
1811/// inputs (1), and little-endian operations with two different inputs (2).
1812/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1813bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1814 SelectionDAG &DAG) {
1815 bool IsLE = DAG.getDataLayout().isLittleEndian();
1816 if (ShuffleKind == 0) {
1817 if (IsLE)
1818 return false;
1819 for (unsigned i = 0; i != 16; i += 2)
1820 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) ||
1821 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3))
1822 return false;
1823 } else if (ShuffleKind == 2) {
1824 if (!IsLE)
1825 return false;
1826 for (unsigned i = 0; i != 16; i += 2)
1827 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) ||
1828 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1))
1829 return false;
1830 } else if (ShuffleKind == 1) {
1831 unsigned j = IsLE ? 0 : 2;
1832 for (unsigned i = 0; i != 8; i += 2)
1833 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) ||
1834 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) ||
1835 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) ||
1836 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1))
1837 return false;
1838 }
1839 return true;
1840}
1841
1842/// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1843/// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1844/// current subtarget.
1845///
1846/// The ShuffleKind distinguishes between big-endian operations with
1847/// two different inputs (0), either-endian operations with two identical
1848/// inputs (1), and little-endian operations with two different inputs (2).
1849/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1850bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1851 SelectionDAG &DAG) {
1852 const PPCSubtarget& Subtarget =
1853 static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1854 if (!Subtarget.hasP8Vector())
1855 return false;
1856
1857 bool IsLE = DAG.getDataLayout().isLittleEndian();
1858 if (ShuffleKind == 0) {
1859 if (IsLE)
1860 return false;
1861 for (unsigned i = 0; i != 16; i += 4)
1862 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) ||
1863 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) ||
1864 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) ||
1865 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7))
1866 return false;
1867 } else if (ShuffleKind == 2) {
1868 if (!IsLE)
1869 return false;
1870 for (unsigned i = 0; i != 16; i += 4)
1871 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) ||
1872 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) ||
1873 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) ||
1874 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3))
1875 return false;
1876 } else if (ShuffleKind == 1) {
1877 unsigned j = IsLE ? 0 : 4;
1878 for (unsigned i = 0; i != 8; i += 4)
1879 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) ||
1880 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) ||
1881 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) ||
1882 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) ||
1883 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) ||
1884 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) ||
1885 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1886 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1887 return false;
1888 }
1889 return true;
1890}
1891
1892/// isVMerge - Common function, used to match vmrg* shuffles.
1893///
1894static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1895 unsigned LHSStart, unsigned RHSStart) {
1896 if (N->getValueType(0) != MVT::v16i8)
1897 return false;
1898 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&(static_cast<void> (0))
1899 "Unsupported merge size!")(static_cast<void> (0));
1900
1901 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units
1902 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit
1903 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1904 LHSStart+j+i*UnitSize) ||
1905 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1906 RHSStart+j+i*UnitSize))
1907 return false;
1908 }
1909 return true;
1910}
1911
1912/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1913/// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1914/// The ShuffleKind distinguishes between big-endian merges with two
1915/// different inputs (0), either-endian merges with two identical inputs (1),
1916/// and little-endian merges with two different inputs (2). For the latter,
1917/// the input operands are swapped (see PPCInstrAltivec.td).
1918bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1919 unsigned ShuffleKind, SelectionDAG &DAG) {
1920 if (DAG.getDataLayout().isLittleEndian()) {
1921 if (ShuffleKind == 1) // unary
1922 return isVMerge(N, UnitSize, 0, 0);
1923 else if (ShuffleKind == 2) // swapped
1924 return isVMerge(N, UnitSize, 0, 16);
1925 else
1926 return false;
1927 } else {
1928 if (ShuffleKind == 1) // unary
1929 return isVMerge(N, UnitSize, 8, 8);
1930 else if (ShuffleKind == 0) // normal
1931 return isVMerge(N, UnitSize, 8, 24);
1932 else
1933 return false;
1934 }
1935}
1936
1937/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1938/// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1939/// The ShuffleKind distinguishes between big-endian merges with two
1940/// different inputs (0), either-endian merges with two identical inputs (1),
1941/// and little-endian merges with two different inputs (2). For the latter,
1942/// the input operands are swapped (see PPCInstrAltivec.td).
1943bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1944 unsigned ShuffleKind, SelectionDAG &DAG) {
1945 if (DAG.getDataLayout().isLittleEndian()) {
1946 if (ShuffleKind == 1) // unary
1947 return isVMerge(N, UnitSize, 8, 8);
1948 else if (ShuffleKind == 2) // swapped
1949 return isVMerge(N, UnitSize, 8, 24);
1950 else
1951 return false;
1952 } else {
1953 if (ShuffleKind == 1) // unary
1954 return isVMerge(N, UnitSize, 0, 0);
1955 else if (ShuffleKind == 0) // normal
1956 return isVMerge(N, UnitSize, 0, 16);
1957 else
1958 return false;
1959 }
1960}
1961
1962/**
1963 * Common function used to match vmrgew and vmrgow shuffles
1964 *
1965 * The indexOffset determines whether to look for even or odd words in
1966 * the shuffle mask. This is based on the of the endianness of the target
1967 * machine.
1968 * - Little Endian:
1969 * - Use offset of 0 to check for odd elements
1970 * - Use offset of 4 to check for even elements
1971 * - Big Endian:
1972 * - Use offset of 0 to check for even elements
1973 * - Use offset of 4 to check for odd elements
1974 * A detailed description of the vector element ordering for little endian and
1975 * big endian can be found at
1976 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1977 * Targeting your applications - what little endian and big endian IBM XL C/C++
1978 * compiler differences mean to you
1979 *
1980 * The mask to the shuffle vector instruction specifies the indices of the
1981 * elements from the two input vectors to place in the result. The elements are
1982 * numbered in array-access order, starting with the first vector. These vectors
1983 * are always of type v16i8, thus each vector will contain 16 elements of size
1984 * 8. More info on the shuffle vector can be found in the
1985 * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1986 * Language Reference.
1987 *
1988 * The RHSStartValue indicates whether the same input vectors are used (unary)
1989 * or two different input vectors are used, based on the following:
1990 * - If the instruction uses the same vector for both inputs, the range of the
1991 * indices will be 0 to 15. In this case, the RHSStart value passed should
1992 * be 0.
1993 * - If the instruction has two different vectors then the range of the
1994 * indices will be 0 to 31. In this case, the RHSStart value passed should
1995 * be 16 (indices 0-15 specify elements in the first vector while indices 16
1996 * to 31 specify elements in the second vector).
1997 *
1998 * \param[in] N The shuffle vector SD Node to analyze
1999 * \param[in] IndexOffset Specifies whether to look for even or odd elements
2000 * \param[in] RHSStartValue Specifies the starting index for the righthand input
2001 * vector to the shuffle_vector instruction
2002 * \return true iff this shuffle vector represents an even or odd word merge
2003 */
2004static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
2005 unsigned RHSStartValue) {
2006 if (N->getValueType(0) != MVT::v16i8)
2007 return false;
2008
2009 for (unsigned i = 0; i < 2; ++i)
2010 for (unsigned j = 0; j < 4; ++j)
2011 if (!isConstantOrUndef(N->getMaskElt(i*4+j),
2012 i*RHSStartValue+j+IndexOffset) ||
2013 !isConstantOrUndef(N->getMaskElt(i*4+j+8),
2014 i*RHSStartValue+j+IndexOffset+8))
2015 return false;
2016 return true;
2017}
2018
2019/**
2020 * Determine if the specified shuffle mask is suitable for the vmrgew or
2021 * vmrgow instructions.
2022 *
2023 * \param[in] N The shuffle vector SD Node to analyze
2024 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
2025 * \param[in] ShuffleKind Identify the type of merge:
2026 * - 0 = big-endian merge with two different inputs;
2027 * - 1 = either-endian merge with two identical inputs;
2028 * - 2 = little-endian merge with two different inputs (inputs are swapped for
2029 * little-endian merges).
2030 * \param[in] DAG The current SelectionDAG
2031 * \return true iff this shuffle mask
2032 */
2033bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
2034 unsigned ShuffleKind, SelectionDAG &DAG) {
2035 if (DAG.getDataLayout().isLittleEndian()) {
2036 unsigned indexOffset = CheckEven ? 4 : 0;
2037 if (ShuffleKind == 1) // Unary
2038 return isVMerge(N, indexOffset, 0);
2039 else if (ShuffleKind == 2) // swapped
2040 return isVMerge(N, indexOffset, 16);
2041 else
2042 return false;
2043 }
2044 else {
2045 unsigned indexOffset = CheckEven ? 0 : 4;
2046 if (ShuffleKind == 1) // Unary
2047 return isVMerge(N, indexOffset, 0);
2048 else if (ShuffleKind == 0) // Normal
2049 return isVMerge(N, indexOffset, 16);
2050 else
2051 return false;
2052 }
2053 return false;
2054}
2055
2056/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
2057/// amount, otherwise return -1.
2058/// The ShuffleKind distinguishes between big-endian operations with two
2059/// different inputs (0), either-endian operations with two identical inputs
2060/// (1), and little-endian operations with two different inputs (2). For the
2061/// latter, the input operands are swapped (see PPCInstrAltivec.td).
2062int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
2063 SelectionDAG &DAG) {
2064 if (N->getValueType(0) != MVT::v16i8)
2065 return -1;
2066
2067 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2068
2069 // Find the first non-undef value in the shuffle mask.
2070 unsigned i;
2071 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
2072 /*search*/;
2073
2074 if (i == 16) return -1; // all undef.
2075
2076 // Otherwise, check to see if the rest of the elements are consecutively
2077 // numbered from this value.
2078 unsigned ShiftAmt = SVOp->getMaskElt(i);
2079 if (ShiftAmt < i) return -1;
2080
2081 ShiftAmt -= i;
2082 bool isLE = DAG.getDataLayout().isLittleEndian();
2083
2084 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
2085 // Check the rest of the elements to see if they are consecutive.
2086 for (++i; i != 16; ++i)
2087 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2088 return -1;
2089 } else if (ShuffleKind == 1) {
2090 // Check the rest of the elements to see if they are consecutive.
2091 for (++i; i != 16; ++i)
2092 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
2093 return -1;
2094 } else
2095 return -1;
2096
2097 if (isLE)
2098 ShiftAmt = 16 - ShiftAmt;
2099
2100 return ShiftAmt;
2101}
2102
2103/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
2104/// specifies a splat of a single element that is suitable for input to
2105/// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
2106bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
2107 assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&(static_cast<void> (0))
2108 EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes")(static_cast<void> (0));
2109
2110 // The consecutive indices need to specify an element, not part of two
2111 // different elements. So abandon ship early if this isn't the case.
2112 if (N->getMaskElt(0) % EltSize != 0)
2113 return false;
2114
2115 // This is a splat operation if each element of the permute is the same, and
2116 // if the value doesn't reference the second vector.
2117 unsigned ElementBase = N->getMaskElt(0);
2118
2119 // FIXME: Handle UNDEF elements too!
2120 if (ElementBase >= 16)
2121 return false;
2122
2123 // Check that the indices are consecutive, in the case of a multi-byte element
2124 // splatted with a v16i8 mask.
2125 for (unsigned i = 1; i != EltSize; ++i)
2126 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
2127 return false;
2128
2129 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
2130 if (N->getMaskElt(i) < 0) continue;
2131 for (unsigned j = 0; j != EltSize; ++j)
2132 if (N->getMaskElt(i+j) != N->getMaskElt(j))
2133 return false;
2134 }
2135 return true;
2136}
2137
2138/// Check that the mask is shuffling N byte elements. Within each N byte
2139/// element of the mask, the indices could be either in increasing or
2140/// decreasing order as long as they are consecutive.
2141/// \param[in] N the shuffle vector SD Node to analyze
2142/// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
2143/// Word/DoubleWord/QuadWord).
2144/// \param[in] StepLen the delta indices number among the N byte element, if
2145/// the mask is in increasing/decreasing order then it is 1/-1.
2146/// \return true iff the mask is shuffling N byte elements.
2147static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
2148 int StepLen) {
2149 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&(static_cast<void> (0))
2150 "Unexpected element width.")(static_cast<void> (0));
2151 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.")(static_cast<void> (0));
2152
2153 unsigned NumOfElem = 16 / Width;
2154 unsigned MaskVal[16]; // Width is never greater than 16
2155 for (unsigned i = 0; i < NumOfElem; ++i) {
2156 MaskVal[0] = N->getMaskElt(i * Width);
2157 if ((StepLen == 1) && (MaskVal[0] % Width)) {
2158 return false;
2159 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
2160 return false;
2161 }
2162
2163 for (unsigned int j = 1; j < Width; ++j) {
2164 MaskVal[j] = N->getMaskElt(i * Width + j);
2165 if (MaskVal[j] != MaskVal[j-1] + StepLen) {
2166 return false;
2167 }
2168 }
2169 }
2170
2171 return true;
2172}
2173
2174bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2175 unsigned &InsertAtByte, bool &Swap, bool IsLE) {
2176 if (!isNByteElemShuffleMask(N, 4, 1))
2177 return false;
2178
2179 // Now we look at mask elements 0,4,8,12
2180 unsigned M0 = N->getMaskElt(0) / 4;
2181 unsigned M1 = N->getMaskElt(4) / 4;
2182 unsigned M2 = N->getMaskElt(8) / 4;
2183 unsigned M3 = N->getMaskElt(12) / 4;
2184 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
2185 unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
2186
2187 // Below, let H and L be arbitrary elements of the shuffle mask
2188 // where H is in the range [4,7] and L is in the range [0,3].
2189 // H, 1, 2, 3 or L, 5, 6, 7
2190 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
2191 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
2192 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
2193 InsertAtByte = IsLE ? 12 : 0;
2194 Swap = M0 < 4;
2195 return true;
2196 }
2197 // 0, H, 2, 3 or 4, L, 6, 7
2198 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
2199 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
2200 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
2201 InsertAtByte = IsLE ? 8 : 4;
2202 Swap = M1 < 4;
2203 return true;
2204 }
2205 // 0, 1, H, 3 or 4, 5, L, 7
2206 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
2207 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
2208 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
2209 InsertAtByte = IsLE ? 4 : 8;
2210 Swap = M2 < 4;
2211 return true;
2212 }
2213 // 0, 1, 2, H or 4, 5, 6, L
2214 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
2215 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
2216 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
2217 InsertAtByte = IsLE ? 0 : 12;
2218 Swap = M3 < 4;
2219 return true;
2220 }
2221
2222 // If both vector operands for the shuffle are the same vector, the mask will
2223 // contain only elements from the first one and the second one will be undef.
2224 if (N->getOperand(1).isUndef()) {
2225 ShiftElts = 0;
2226 Swap = true;
2227 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
2228 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
2229 InsertAtByte = IsLE ? 12 : 0;
2230 return true;
2231 }
2232 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
2233 InsertAtByte = IsLE ? 8 : 4;
2234 return true;
2235 }
2236 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
2237 InsertAtByte = IsLE ? 4 : 8;
2238 return true;
2239 }
2240 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
2241 InsertAtByte = IsLE ? 0 : 12;
2242 return true;
2243 }
2244 }
2245
2246 return false;
2247}
2248
2249bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2250 bool &Swap, bool IsLE) {
2251 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8")(static_cast<void> (0));
2252 // Ensure each byte index of the word is consecutive.
2253 if (!isNByteElemShuffleMask(N, 4, 1))
2254 return false;
2255
2256 // Now we look at mask elements 0,4,8,12, which are the beginning of words.
2257 unsigned M0 = N->getMaskElt(0) / 4;
2258 unsigned M1 = N->getMaskElt(4) / 4;
2259 unsigned M2 = N->getMaskElt(8) / 4;
2260 unsigned M3 = N->getMaskElt(12) / 4;
2261
2262 // If both vector operands for the shuffle are the same vector, the mask will
2263 // contain only elements from the first one and the second one will be undef.
2264 if (N->getOperand(1).isUndef()) {
2265 assert(M0 < 4 && "Indexing into an undef vector?")(static_cast<void> (0));
2266 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2267 return false;
2268
2269 ShiftElts = IsLE ? (4 - M0) % 4 : M0;
2270 Swap = false;
2271 return true;
2272 }
2273
2274 // Ensure each word index of the ShuffleVector Mask is consecutive.
2275 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2276 return false;
2277
2278 if (IsLE) {
2279 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2280 // Input vectors don't need to be swapped if the leading element
2281 // of the result is one of the 3 left elements of the second vector
2282 // (or if there is no shift to be done at all).
2283 Swap = false;
2284 ShiftElts = (8 - M0) % 8;
2285 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2286 // Input vectors need to be swapped if the leading element
2287 // of the result is one of the 3 left elements of the first vector
2288 // (or if we're shifting by 4 - thereby simply swapping the vectors).
2289 Swap = true;
2290 ShiftElts = (4 - M0) % 4;
2291 }
2292
2293 return true;
2294 } else { // BE
2295 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
2296 // Input vectors don't need to be swapped if the leading element
2297 // of the result is one of the 4 elements of the first vector.
2298 Swap = false;
2299 ShiftElts = M0;
2300 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
2301 // Input vectors need to be swapped if the leading element
2302 // of the result is one of the 4 elements of the right vector.
2303 Swap = true;
2304 ShiftElts = M0 - 4;
2305 }
2306
2307 return true;
2308 }
2309}
2310
2311bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
2312 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8")(static_cast<void> (0));
2313
2314 if (!isNByteElemShuffleMask(N, Width, -1))
2315 return false;
2316
2317 for (int i = 0; i < 16; i += Width)
2318 if (N->getMaskElt(i) != i + Width - 1)
2319 return false;
2320
2321 return true;
2322}
2323
2324bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
2325 return isXXBRShuffleMaskHelper(N, 2);
2326}
2327
2328bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2329 return isXXBRShuffleMaskHelper(N, 4);
2330}
2331
2332bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2333 return isXXBRShuffleMaskHelper(N, 8);
2334}
2335
2336bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2337 return isXXBRShuffleMaskHelper(N, 16);
2338}
2339
2340/// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2341/// if the inputs to the instruction should be swapped and set \p DM to the
2342/// value for the immediate.
2343/// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2344/// AND element 0 of the result comes from the first input (LE) or second input
2345/// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2346/// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2347/// mask.
2348bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2349 bool &Swap, bool IsLE) {
2350 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8")(static_cast<void> (0));
2351
2352 // Ensure each byte index of the double word is consecutive.
2353 if (!isNByteElemShuffleMask(N, 8, 1))
2354 return false;
2355
2356 unsigned M0 = N->getMaskElt(0) / 8;
2357 unsigned M1 = N->getMaskElt(8) / 8;
2358 assert(((M0 | M1) < 4) && "A mask element out of bounds?")(static_cast<void> (0));
2359
2360 // If both vector operands for the shuffle are the same vector, the mask will
2361 // contain only elements from the first one and the second one will be undef.
2362 if (N->getOperand(1).isUndef()) {
2363 if ((M0 | M1) < 2) {
2364 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2365 Swap = false;
2366 return true;
2367 } else
2368 return false;
2369 }
2370
2371 if (IsLE) {
2372 if (M0 > 1 && M1 < 2) {
2373 Swap = false;
2374 } else if (M0 < 2 && M1 > 1) {
2375 M0 = (M0 + 2) % 4;
2376 M1 = (M1 + 2) % 4;
2377 Swap = true;
2378 } else
2379 return false;
2380
2381 // Note: if control flow comes here that means Swap is already set above
2382 DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2383 return true;
2384 } else { // BE
2385 if (M0 < 2 && M1 > 1) {
2386 Swap = false;
2387 } else if (M0 > 1 && M1 < 2) {
2388 M0 = (M0 + 2) % 4;
2389 M1 = (M1 + 2) % 4;
2390 Swap = true;
2391 } else
2392 return false;
2393
2394 // Note: if control flow comes here that means Swap is already set above
2395 DM = (M0 << 1) + (M1 & 1);
2396 return true;
2397 }
2398}
2399
2400
2401/// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2402/// appropriate for PPC mnemonics (which have a big endian bias - namely
2403/// elements are counted from the left of the vector register).
2404unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2405 SelectionDAG &DAG) {
2406 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2407 assert(isSplatShuffleMask(SVOp, EltSize))(static_cast<void> (0));
2408 if (DAG.getDataLayout().isLittleEndian())
2409 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2410 else
2411 return SVOp->getMaskElt(0) / EltSize;
2412}
2413
2414/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2415/// by using a vspltis[bhw] instruction of the specified element size, return
2416/// the constant being splatted. The ByteSize field indicates the number of
2417/// bytes of each element [124] -> [bhw].
2418SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2419 SDValue OpVal(nullptr, 0);
2420
2421 // If ByteSize of the splat is bigger than the element size of the
2422 // build_vector, then we have a case where we are checking for a splat where
2423 // multiple elements of the buildvector are folded together into a single
2424 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2425 unsigned EltSize = 16/N->getNumOperands();
2426 if (EltSize < ByteSize) {
2427 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval.
2428 SDValue UniquedVals[4];
2429 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?")(static_cast<void> (0));
2430
2431 // See if all of the elements in the buildvector agree across.
2432 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2433 if (N->getOperand(i).isUndef()) continue;
2434 // If the element isn't a constant, bail fully out.
2435 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2436
2437 if (!UniquedVals[i&(Multiple-1)].getNode())
2438 UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2439 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2440 return SDValue(); // no match.
2441 }
2442
2443 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2444 // either constant or undef values that are identical for each chunk. See
2445 // if these chunks can form into a larger vspltis*.
2446
2447 // Check to see if all of the leading entries are either 0 or -1. If
2448 // neither, then this won't fit into the immediate field.
2449 bool LeadingZero = true;
2450 bool LeadingOnes = true;
2451 for (unsigned i = 0; i != Multiple-1; ++i) {
2452 if (!UniquedVals[i].getNode()) continue; // Must have been undefs.
2453
2454 LeadingZero &= isNullConstant(UniquedVals[i]);
2455 LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2456 }
2457 // Finally, check the least significant entry.
2458 if (LeadingZero) {
2459 if (!UniquedVals[Multiple-1].getNode())
2460 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef
2461 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2462 if (Val < 16) // 0,0,0,4 -> vspltisw(4)
2463 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2464 }
2465 if (LeadingOnes) {
2466 if (!UniquedVals[Multiple-1].getNode())
2467 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2468 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2469 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
2470 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2471 }
2472
2473 return SDValue();
2474 }
2475
2476 // Check to see if this buildvec has a single non-undef value in its elements.
2477 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2478 if (N->getOperand(i).isUndef()) continue;
2479 if (!OpVal.getNode())
2480 OpVal = N->getOperand(i);
2481 else if (OpVal != N->getOperand(i))
2482 return SDValue();
2483 }
2484
2485 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def.
2486
2487 unsigned ValSizeInBytes = EltSize;
2488 uint64_t Value = 0;
2489 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2490 Value = CN->getZExtValue();
2491 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2492 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!")(static_cast<void> (0));
2493 Value = FloatToBits(CN->getValueAPF().convertToFloat());
2494 }
2495
2496 // If the splat value is larger than the element value, then we can never do
2497 // this splat. The only case that we could fit the replicated bits into our
2498 // immediate field for would be zero, and we prefer to use vxor for it.
2499 if (ValSizeInBytes < ByteSize) return SDValue();
2500
2501 // If the element value is larger than the splat value, check if it consists
2502 // of a repeated bit pattern of size ByteSize.
2503 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2504 return SDValue();
2505
2506 // Properly sign extend the value.
2507 int MaskVal = SignExtend32(Value, ByteSize * 8);
2508
2509 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2510 if (MaskVal == 0) return SDValue();
2511
2512 // Finally, if this value fits in a 5 bit sext field, return it
2513 if (SignExtend32<5>(MaskVal) == MaskVal)
2514 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2515 return SDValue();
2516}
2517
2518//===----------------------------------------------------------------------===//
2519// Addressing Mode Selection
2520//===----------------------------------------------------------------------===//
2521
2522/// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2523/// or 64-bit immediate, and if the value can be accurately represented as a
2524/// sign extension from a 16-bit value. If so, this returns true and the
2525/// immediate.
2526bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2527 if (!isa<ConstantSDNode>(N))
2528 return false;
2529
2530 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2531 if (N->getValueType(0) == MVT::i32)
2532 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2533 else
2534 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2535}
2536bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2537 return isIntS16Immediate(Op.getNode(), Imm);
2538}
2539
2540/// Used when computing address flags for selecting loads and stores.
2541/// If we have an OR, check if the LHS and RHS are provably disjoint.
2542/// An OR of two provably disjoint values is equivalent to an ADD.
2543/// Most PPC load/store instructions compute the effective address as a sum,
2544/// so doing this conversion is useful.
2545static bool provablyDisjointOr(SelectionDAG &DAG, const SDValue &N) {
2546 if (N.getOpcode() != ISD::OR)
2547 return false;
2548 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2549 if (!LHSKnown.Zero.getBoolValue())
2550 return false;
2551 KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2552 return (~(LHSKnown.Zero | RHSKnown.Zero) == 0);
2553}
2554
2555/// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2556/// be represented as an indexed [r+r] operation.
2557bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2558 SDValue &Index,
2559 SelectionDAG &DAG) const {
2560 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2561 UI != E; ++UI) {
2562 if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2563 if (Memop->getMemoryVT() == MVT::f64) {
2564 Base = N.getOperand(0);
2565 Index = N.getOperand(1);
2566 return true;
2567 }
2568 }
2569 }
2570 return false;
2571}
2572
2573/// isIntS34Immediate - This method tests if value of node given can be
2574/// accurately represented as a sign extension from a 34-bit value. If so,
2575/// this returns true and the immediate.
2576bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) {
2577 if (!isa<ConstantSDNode>(N))
2578 return false;
2579
2580 Imm = (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2581 return isInt<34>(Imm);
2582}
2583bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) {
2584 return isIntS34Immediate(Op.getNode(), Imm);
2585}
2586
2587/// SelectAddressRegReg - Given the specified addressed, check to see if it
2588/// can be represented as an indexed [r+r] operation. Returns false if it
2589/// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2590/// non-zero and N can be represented by a base register plus a signed 16-bit
2591/// displacement, make a more precise judgement by checking (displacement % \p
2592/// EncodingAlignment).
2593bool PPCTargetLowering::SelectAddressRegReg(
2594 SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG,
2595 MaybeAlign EncodingAlignment) const {
2596 // If we have a PC Relative target flag don't select as [reg+reg]. It will be
2597 // a [pc+imm].
2598 if (SelectAddressPCRel(N, Base))
2599 return false;
2600
2601 int16_t Imm = 0;
2602 if (N.getOpcode() == ISD::ADD) {
2603 // Is there any SPE load/store (f64), which can't handle 16bit offset?
2604 // SPE load/store can only handle 8-bit offsets.
2605 if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2606 return true;
2607 if (isIntS16Immediate(N.getOperand(1), Imm) &&
2608 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2609 return false; // r+i
2610 if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2611 return false; // r+i
2612
2613 Base = N.getOperand(0);
2614 Index = N.getOperand(1);
2615 return true;
2616 } else if (N.getOpcode() == ISD::OR) {
2617 if (isIntS16Immediate(N.getOperand(1), Imm) &&
2618 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2619 return false; // r+i can fold it if we can.
2620
2621 // If this is an or of disjoint bitfields, we can codegen this as an add
2622 // (for better address arithmetic) if the LHS and RHS of the OR are provably
2623 // disjoint.
2624 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2625
2626 if (LHSKnown.Zero.getBoolValue()) {
2627 KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2628 // If all of the bits are known zero on the LHS or RHS, the add won't
2629 // carry.
2630 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2631 Base = N.getOperand(0);
2632 Index = N.getOperand(1);
2633 return true;
2634 }
2635 }
2636 }
2637
2638 return false;
2639}
2640
2641// If we happen to be doing an i64 load or store into a stack slot that has
2642// less than a 4-byte alignment, then the frame-index elimination may need to
2643// use an indexed load or store instruction (because the offset may not be a
2644// multiple of 4). The extra register needed to hold the offset comes from the
2645// register scavenger, and it is possible that the scavenger will need to use
2646// an emergency spill slot. As a result, we need to make sure that a spill slot
2647// is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2648// stack slot.
2649static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2650 // FIXME: This does not handle the LWA case.
2651 if (VT != MVT::i64)
2652 return;
2653
2654 // NOTE: We'll exclude negative FIs here, which come from argument
2655 // lowering, because there are no known test cases triggering this problem
2656 // using packed structures (or similar). We can remove this exclusion if
2657 // we find such a test case. The reason why this is so test-case driven is
2658 // because this entire 'fixup' is only to prevent crashes (from the
2659 // register scavenger) on not-really-valid inputs. For example, if we have:
2660 // %a = alloca i1
2661 // %b = bitcast i1* %a to i64*
2662 // store i64* a, i64 b
2663 // then the store should really be marked as 'align 1', but is not. If it
2664 // were marked as 'align 1' then the indexed form would have been
2665 // instruction-selected initially, and the problem this 'fixup' is preventing
2666 // won't happen regardless.
2667 if (FrameIdx < 0)
2668 return;
2669
2670 MachineFunction &MF = DAG.getMachineFunction();
2671 MachineFrameInfo &MFI = MF.getFrameInfo();
2672
2673 if (MFI.getObjectAlign(FrameIdx) >= Align(4))
2674 return;
2675
2676 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2677 FuncInfo->setHasNonRISpills();
2678}
2679
2680/// Returns true if the address N can be represented by a base register plus
2681/// a signed 16-bit displacement [r+imm], and if it is not better
2682/// represented as reg+reg. If \p EncodingAlignment is non-zero, only accept
2683/// displacements that are multiples of that value.
2684bool PPCTargetLowering::SelectAddressRegImm(
2685 SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
2686 MaybeAlign EncodingAlignment) const {
2687 // FIXME dl should come from parent load or store, not from address
2688 SDLoc dl(N);
2689
2690 // If we have a PC Relative target flag don't select as [reg+imm]. It will be
2691 // a [pc+imm].
2692 if (SelectAddressPCRel(N, Base))
2693 return false;
2694
2695 // If this can be more profitably realized as r+r, fail.
2696 if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2697 return false;
2698
2699 if (N.getOpcode() == ISD::ADD) {
2700 int16_t imm = 0;
2701 if (isIntS16Immediate(N.getOperand(1), imm) &&
2702 (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2703 Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2704 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2705 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2706 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2707 } else {
2708 Base = N.getOperand(0);
2709 }
2710 return true; // [r+i]
2711 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2712 // Match LOAD (ADD (X, Lo(G))).
2713 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()(static_cast<void> (0))
2714 && "Cannot handle constant offsets yet!")(static_cast<void> (0));
2715 Disp = N.getOperand(1).getOperand(0); // The global address.
2716 assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||(static_cast<void> (0))
2717 Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||(static_cast<void> (0))
2718 Disp.getOpcode() == ISD::TargetConstantPool ||(static_cast<void> (0))
2719 Disp.getOpcode() == ISD::TargetJumpTable)(static_cast<void> (0));
2720 Base = N.getOperand(0);
2721 return true; // [&g+r]
2722 }
2723 } else if (N.getOpcode() == ISD::OR) {
2724 int16_t imm = 0;
2725 if (isIntS16Immediate(N.getOperand(1), imm) &&
2726 (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2727 // If this is an or of disjoint bitfields, we can codegen this as an add
2728 // (for better address arithmetic) if the LHS and RHS of the OR are
2729 // provably disjoint.
2730 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2731
2732 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2733 // If all of the bits are known zero on the LHS or RHS, the add won't
2734 // carry.
2735 if (FrameIndexSDNode *FI =
2736 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2737 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2738 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2739 } else {
2740 Base = N.getOperand(0);
2741 }
2742 Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2743 return true;
2744 }
2745 }
2746 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2747 // Loading from a constant address.
2748
2749 // If this address fits entirely in a 16-bit sext immediate field, codegen
2750 // this as "d, 0"
2751 int16_t Imm;
2752 if (isIntS16Immediate(CN, Imm) &&
2753 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) {
2754 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2755 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2756 CN->getValueType(0));
2757 return true;
2758 }
2759
2760 // Handle 32-bit sext immediates with LIS + addr mode.
2761 if ((CN->getValueType(0) == MVT::i32 ||
2762 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2763 (!EncodingAlignment ||
2764 isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2765 int Addr = (int)CN->getZExtValue();
2766
2767 // Otherwise, break this down into an LIS + disp.
2768 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2769
2770 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2771 MVT::i32);
2772 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2773 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2774 return true;
2775 }
2776 }
2777
2778 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2779 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2780 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2781 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2782 } else
2783 Base = N;
2784 return true; // [r+0]
2785}
2786
2787/// Similar to the 16-bit case but for instructions that take a 34-bit
2788/// displacement field (prefixed loads/stores).
2789bool PPCTargetLowering::SelectAddressRegImm34(SDValue N, SDValue &Disp,
2790 SDValue &Base,
2791 SelectionDAG &DAG) const {
2792 // Only on 64-bit targets.
2793 if (N.getValueType() != MVT::i64)
2794 return false;
2795
2796 SDLoc dl(N);
2797 int64_t Imm = 0;
2798
2799 if (N.getOpcode() == ISD::ADD) {
2800 if (!isIntS34Immediate(N.getOperand(1), Imm))
2801 return false;
2802 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2803 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
2804 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2805 else
2806 Base = N.getOperand(0);
2807 return true;
2808 }
2809
2810 if (N.getOpcode() == ISD::OR) {
2811 if (!isIntS34Immediate(N.getOperand(1), Imm))
2812 return false;
2813 // If this is an or of disjoint bitfields, we can codegen this as an add
2814 // (for better address arithmetic) if the LHS and RHS of the OR are
2815 // provably disjoint.
2816 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2817 if ((LHSKnown.Zero.getZExtValue() | ~(uint64_t)Imm) != ~0ULL)
2818 return false;
2819 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
2820 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2821 else
2822 Base = N.getOperand(0);
2823 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2824 return true;
2825 }
2826
2827 if (isIntS34Immediate(N, Imm)) { // If the address is a 34-bit const.
2828 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2829 Base = DAG.getRegister(PPC::ZERO8, N.getValueType());
2830 return true;
2831 }
2832
2833 return false;
2834}
2835
2836/// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2837/// represented as an indexed [r+r] operation.
2838bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2839 SDValue &Index,
2840 SelectionDAG &DAG) const {
2841 // Check to see if we can easily represent this as an [r+r] address. This
2842 // will fail if it thinks that the address is more profitably represented as
2843 // reg+imm, e.g. where imm = 0.
2844 if (SelectAddressRegReg(N, Base, Index, DAG))
2845 return true;
2846
2847 // If the address is the result of an add, we will utilize the fact that the
2848 // address calculation includes an implicit add. However, we can reduce
2849 // register pressure if we do not materialize a constant just for use as the
2850 // index register. We only get rid of the add if it is not an add of a
2851 // value and a 16-bit signed constant and both have a single use.
2852 int16_t imm = 0;
2853 if (N.getOpcode() == ISD::ADD &&
2854 (!isIntS16Immediate(N.getOperand(1), imm) ||
2855 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2856 Base = N.getOperand(0);
2857 Index = N.getOperand(1);
2858 return true;
2859 }
2860
2861 // Otherwise, do it the hard way, using R0 as the base register.
2862 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2863 N.getValueType());
2864 Index = N;
2865 return true;
2866}
2867
2868template <typename Ty> static bool isValidPCRelNode(SDValue N) {
2869 Ty *PCRelCand = dyn_cast<Ty>(N);
2870 return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG);
2871}
2872
2873/// Returns true if this address is a PC Relative address.
2874/// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG
2875/// or if the node opcode is PPCISD::MAT_PCREL_ADDR.
2876bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const {
2877 // This is a materialize PC Relative node. Always select this as PC Relative.
2878 Base = N;
2879 if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR)
2880 return true;
2881 if (isValidPCRelNode<ConstantPoolSDNode>(N) ||
2882 isValidPCRelNode<GlobalAddressSDNode>(N) ||
2883 isValidPCRelNode<JumpTableSDNode>(N) ||
2884 isValidPCRelNode<BlockAddressSDNode>(N))
2885 return true;
2886 return false;
2887}
2888
2889/// Returns true if we should use a direct load into vector instruction
2890/// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2891static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2892
2893 // If there are any other uses other than scalar to vector, then we should
2894 // keep it as a scalar load -> direct move pattern to prevent multiple
2895 // loads.
2896 LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2897 if (!LD)
2898 return false;
2899
2900 EVT MemVT = LD->getMemoryVT();
2901 if (!MemVT.isSimple())
2902 return false;
2903 switch(MemVT.getSimpleVT().SimpleTy) {
2904 case MVT::i64:
2905 break;
2906 case MVT::i32:
2907 if (!ST.hasP8Vector())
2908 return false;
2909 break;
2910 case MVT::i16:
2911 case MVT::i8:
2912 if (!ST.hasP9Vector())
2913 return false;
2914 break;
2915 default:
2916 return false;
2917 }
2918
2919 SDValue LoadedVal(N, 0);
2920 if (!LoadedVal.hasOneUse())
2921 return false;
2922
2923 for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2924 UI != UE; ++UI)
2925 if (UI.getUse().get().getResNo() == 0 &&
2926 UI->getOpcode() != ISD::SCALAR_TO_VECTOR &&
2927 UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED)
2928 return false;
2929
2930 return true;
2931}
2932
2933/// getPreIndexedAddressParts - returns true by value, base pointer and
2934/// offset pointer and addressing mode by reference if the node's address
2935/// can be legally represented as pre-indexed load / store address.
2936bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2937 SDValue &Offset,
2938 ISD::MemIndexedMode &AM,
2939 SelectionDAG &DAG) const {
2940 if (DisablePPCPreinc) return false;
2941
2942 bool isLoad = true;
2943 SDValue Ptr;
2944 EVT VT;
2945 unsigned Alignment;
2946 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2947 Ptr = LD->getBasePtr();
2948 VT = LD->getMemoryVT();
2949 Alignment = LD->getAlignment();
2950 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2951 Ptr = ST->getBasePtr();
2952 VT = ST->getMemoryVT();
2953 Alignment = ST->getAlignment();
2954 isLoad = false;
2955 } else
2956 return false;
2957
2958 // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2959 // instructions because we can fold these into a more efficient instruction
2960 // instead, (such as LXSD).
2961 if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2962 return false;
2963 }
2964
2965 // PowerPC doesn't have preinc load/store instructions for vectors
2966 if (VT.isVector())
2967 return false;
2968
2969 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2970 // Common code will reject creating a pre-inc form if the base pointer
2971 // is a frame index, or if N is a store and the base pointer is either
2972 // the same as or a predecessor of the value being stored. Check for
2973 // those situations here, and try with swapped Base/Offset instead.
2974 bool Swap = false;
2975
2976 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2977 Swap = true;
2978 else if (!isLoad) {
2979 SDValue Val = cast<StoreSDNode>(N)->getValue();
2980 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2981 Swap = true;
2982 }
2983
2984 if (Swap)
2985 std::swap(Base, Offset);
2986
2987 AM = ISD::PRE_INC;
2988 return true;
2989 }
2990
2991 // LDU/STU can only handle immediates that are a multiple of 4.
2992 if (VT != MVT::i64) {
2993 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None))
2994 return false;
2995 } else {
2996 // LDU/STU need an address with at least 4-byte alignment.
2997 if (Alignment < 4)
2998 return false;
2999
3000 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4)))
3001 return false;
3002 }
3003
3004 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
3005 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of
3006 // sext i32 to i64 when addr mode is r+i.
3007 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
3008 LD->getExtensionType() == ISD::SEXTLOAD &&
3009 isa<ConstantSDNode>(Offset))
3010 return false;
3011 }
3012
3013 AM = ISD::PRE_INC;
3014 return true;
3015}
3016
3017//===----------------------------------------------------------------------===//
3018// LowerOperation implementation
3019//===----------------------------------------------------------------------===//
3020
3021/// Return true if we should reference labels using a PICBase, set the HiOpFlags
3022/// and LoOpFlags to the target MO flags.
3023static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
3024 unsigned &HiOpFlags, unsigned &LoOpFlags,
3025 const GlobalValue *GV = nullptr) {
3026 HiOpFlags = PPCII::MO_HA;
3027 LoOpFlags = PPCII::MO_LO;
3028
3029 // Don't use the pic base if not in PIC relocation model.
3030 if (IsPIC) {
3031 HiOpFlags |= PPCII::MO_PIC_FLAG;
3032 LoOpFlags |= PPCII::MO_PIC_FLAG;
3033 }
3034}
3035
3036static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
3037 SelectionDAG &DAG) {
3038 SDLoc DL(HiPart);
3039 EVT PtrVT = HiPart.getValueType();
3040 SDValue Zero = DAG.getConstant(0, DL, PtrVT);
3041
3042 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
3043 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
3044
3045 // With PIC, the first instruction is actually "GR+hi(&G)".
3046 if (isPIC)
3047 Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
3048 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
3049
3050 // Generate non-pic code that has direct accesses to the constant pool.
3051 // The address of the global is just (hi(&g)+lo(&g)).
3052 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
3053}
3054
3055static void setUsesTOCBasePtr(MachineFunction &MF) {
3056 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3057 FuncInfo->setUsesTOCBasePtr();
3058}
3059
3060static void setUsesTOCBasePtr(SelectionDAG &DAG) {
3061 setUsesTOCBasePtr(DAG.getMachineFunction());
3062}
3063
3064SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
3065 SDValue GA) const {
3066 const bool Is64Bit = Subtarget.isPPC64();
3067 EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
3068 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
3069 : Subtarget.isAIXABI()
3070 ? DAG.getRegister(PPC::R2, VT)
3071 : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
3072 SDValue Ops[] = { GA, Reg };
3073 return DAG.getMemIntrinsicNode(
3074 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
3075 MachinePointerInfo::getGOT(DAG.getMachineFunction()), None,
3076 MachineMemOperand::MOLoad);
3077}
3078
3079SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
3080 SelectionDAG &DAG) const {
3081 EVT PtrVT = Op.getValueType();
3082 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
3083 const Constant *C = CP->getConstVal();
3084
3085 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3086 // The actual address of the GlobalValue is stored in the TOC.
3087 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3088 if (Subtarget.isUsingPCRelativeCalls()) {
3089 SDLoc DL(CP);
3090 EVT Ty = getPointerTy(DAG.getDataLayout());
3091 SDValue ConstPool = DAG.getTargetConstantPool(
3092 C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG);
3093 return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool);
3094 }
3095 setUsesTOCBasePtr(DAG);
3096 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0);
3097 return getTOCEntry(DAG, SDLoc(CP), GA);
3098 }
3099
3100 unsigned MOHiFlag, MOLoFlag;
3101 bool IsPIC = isPositionIndependent();
3102 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3103
3104 if (IsPIC && Subtarget.isSVR4ABI()) {
3105 SDValue GA =
3106 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG);
3107 return getTOCEntry(DAG, SDLoc(CP), GA);
3108 }
3109
3110 SDValue CPIHi =
3111 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag);
3112 SDValue CPILo =
3113 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag);
3114 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
3115}
3116
3117// For 64-bit PowerPC, prefer the more compact relative encodings.
3118// This trades 32 bits per jump table entry for one or two instructions
3119// on the jump site.
3120unsigned PPCTargetLowering::getJumpTableEncoding() const {
3121 if (isJumpTableRelative())
3122 return MachineJumpTableInfo::EK_LabelDifference32;
3123
3124 return TargetLowering::getJumpTableEncoding();
3125}
3126
3127bool PPCTargetLowering::isJumpTableRelative() const {
3128 if (UseAbsoluteJumpTables)
3129 return false;
3130 if (Subtarget.isPPC64() || Subtarget.isAIXABI())
3131 return true;
3132 return TargetLowering::isJumpTableRelative();
3133}
3134
3135SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
3136 SelectionDAG &DAG) const {
3137 if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
3138 return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
3139
3140 switch (getTargetMachine().getCodeModel()) {
3141 case CodeModel::Small:
3142 case CodeModel::Medium:
3143 return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
3144 default:
3145 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
3146 getPointerTy(DAG.getDataLayout()));
3147 }
3148}
3149
3150const MCExpr *
3151PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
3152 unsigned JTI,
3153 MCContext &Ctx) const {
3154 if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
3155 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
3156
3157 switch (getTargetMachine().getCodeModel()) {
3158 case CodeModel::Small:
3159 case CodeModel::Medium:
3160 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
3161 default:
3162 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
3163 }
3164}
3165
3166SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
3167 EVT PtrVT = Op.getValueType();
3168 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
3169
3170 // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3171 if (Subtarget.isUsingPCRelativeCalls()) {
3172 SDLoc DL(JT);
3173 EVT Ty = getPointerTy(DAG.getDataLayout());
3174 SDValue GA =
3175 DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG);
3176 SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3177 return MatAddr;
3178 }
3179
3180 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3181 // The actual address of the GlobalValue is stored in the TOC.
3182 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3183 setUsesTOCBasePtr(DAG);
3184 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
3185 return getTOCEntry(DAG, SDLoc(JT), GA);
3186 }
3187
3188 unsigned MOHiFlag, MOLoFlag;
3189 bool IsPIC = isPositionIndependent();
3190 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3191
3192 if (IsPIC && Subtarget.isSVR4ABI()) {
3193 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
3194 PPCII::MO_PIC_FLAG);
3195 return getTOCEntry(DAG, SDLoc(GA), GA);
3196 }
3197
3198 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
3199 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
3200 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
3201}
3202
3203SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
3204 SelectionDAG &DAG) const {
3205 EVT PtrVT = Op.getValueType();
3206 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
3207 const BlockAddress *BA = BASDN->getBlockAddress();
3208
3209 // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3210 if (Subtarget.isUsingPCRelativeCalls()) {
3211 SDLoc DL(BASDN);
3212 EVT Ty = getPointerTy(DAG.getDataLayout());
3213 SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(),
3214 PPCII::MO_PCREL_FLAG);
3215 SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3216 return MatAddr;
3217 }
3218
3219 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3220 // The actual BlockAddress is stored in the TOC.
3221 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3222 setUsesTOCBasePtr(DAG);
3223 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
3224 return getTOCEntry(DAG, SDLoc(BASDN), GA);
3225 }
3226
3227 // 32-bit position-independent ELF stores the BlockAddress in the .got.
3228 if (Subtarget.is32BitELFABI() && isPositionIndependent())
3229 return getTOCEntry(
3230 DAG, SDLoc(BASDN),
3231 DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
3232
3233 unsigned MOHiFlag, MOLoFlag;
3234 bool IsPIC = isPositionIndependent();
3235 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3236 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
3237 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
3238 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
3239}
3240
3241SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
3242 SelectionDAG &DAG) const {
3243 if (Subtarget.isAIXABI())
3244 return LowerGlobalTLSAddressAIX(Op, DAG);
3245
3246 return LowerGlobalTLSAddressLinux(Op, DAG);
3247}
3248
3249SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op,
3250 SelectionDAG &DAG) const {
3251 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3252
3253 if (DAG.getTarget().useEmulatedTLS())
3254 report_fatal_error("Emulated TLS is not yet supported on AIX");
3255
3256 SDLoc dl(GA);
3257 const GlobalValue *GV = GA->getGlobal();
3258 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3259
3260 // The general-dynamic model is the only access model supported for now, so
3261 // all the GlobalTLSAddress nodes are lowered with this model.
3262 // We need to generate two TOC entries, one for the variable offset, one for
3263 // the region handle. The global address for the TOC entry of the region
3264 // handle is created with the MO_TLSGDM_FLAG flag and the global address
3265 // for the TOC entry of the variable offset is created with MO_TLSGD_FLAG.
3266 SDValue VariableOffsetTGA =
3267 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSGD_FLAG);
3268 SDValue RegionHandleTGA =
3269 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSGDM_FLAG);
3270 SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
3271 SDValue RegionHandle = getTOCEntry(DAG, dl, RegionHandleTGA);
3272 return DAG.getNode(PPCISD::TLSGD_AIX, dl, PtrVT, VariableOffset,
3273 RegionHandle);
3274}
3275
3276SDValue PPCTargetLowering::LowerGlobalTLSAddressLinux(SDValue Op,
3277 SelectionDAG &DAG) const {
3278 // FIXME: TLS addresses currently use medium model code sequences,
3279 // which is the most useful form. Eventually support for small and
3280 // large models could be added if users need it, at the cost of
3281 // additional complexity.
3282 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3283 if (DAG.getTarget().useEmulatedTLS())
3284 return LowerToTLSEmulatedModel(GA, DAG);
3285
3286 SDLoc dl(GA);
3287 const GlobalValue *GV = GA->getGlobal();
3288 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3289 bool is64bit = Subtarget.isPPC64();
3290 const Module *M = DAG.getMachineFunction().getFunction().getParent();
3291 PICLevel::Level picLevel = M->getPICLevel();
3292
3293 const TargetMachine &TM = getTargetMachine();
3294 TLSModel::Model Model = TM.getTLSModel(GV);
3295
3296 if (Model == TLSModel::LocalExec) {
3297 if (Subtarget.isUsingPCRelativeCalls()) {
3298 SDValue TLSReg = DAG.getRegister(PPC::X13, MVT::i64);
3299 SDValue TGA = DAG.getTargetGlobalAddress(
3300 GV, dl, PtrVT, 0, (PPCII::MO_PCREL_FLAG | PPCII::MO_TPREL_FLAG));
3301 SDValue MatAddr =
3302 DAG.getNode(PPCISD::TLS_LOCAL_EXEC_MAT_ADDR, dl, PtrVT, TGA);
3303 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, MatAddr);
3304 }
3305
3306 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3307 PPCII::MO_TPREL_HA);
3308 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3309 PPCII::MO_TPREL_LO);
3310 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
3311 : DAG.getRegister(PPC::R2, MVT::i32);
3312
3313 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
3314 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
3315 }
3316
3317 if (Model == TLSModel::InitialExec) {
3318 bool IsPCRel = Subtarget.isUsingPCRelativeCalls();
3319 SDValue TGA = DAG.getTargetGlobalAddress(
3320 GV, dl, PtrVT, 0, IsPCRel ? PPCII::MO_GOT_TPREL_PCREL_FLAG : 0);
3321 SDValue TGATLS = DAG.getTargetGlobalAddress(
3322 GV, dl, PtrVT, 0,
3323 IsPCRel ? (PPCII::MO_TLS | PPCII::MO_PCREL_FLAG) : PPCII::MO_TLS);
3324 SDValue TPOffset;
3325 if (IsPCRel) {
3326 SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, dl, PtrVT, TGA);
3327 TPOffset = DAG.getLoad(MVT::i64, dl, DAG.getEntryNode(), MatPCRel,
3328 MachinePointerInfo());
3329 } else {
3330 SDValue GOTPtr;
3331 if (is64bit) {
3332 setUsesTOCBasePtr(DAG);
3333 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3334 GOTPtr =
3335 DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, PtrVT, GOTReg, TGA);
3336 } else {
3337 if (!TM.isPositionIndependent())
3338 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
3339 else if (picLevel == PICLevel::SmallPIC)
3340 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3341 else
3342 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3343 }
3344 TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, PtrVT, TGA, GOTPtr);
3345 }
3346 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
3347 }
3348
3349 if (Model == TLSModel::GeneralDynamic) {
3350 if (Subtarget.isUsingPCRelativeCalls()) {
3351 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3352 PPCII::MO_GOT_TLSGD_PCREL_FLAG);
3353 return DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3354 }
3355
3356 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3357 SDValue GOTPtr;
3358 if (is64bit) {
3359 setUsesTOCBasePtr(DAG);
3360 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3361 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
3362 GOTReg, TGA);
3363 } else {
3364 if (picLevel == PICLevel::SmallPIC)
3365 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3366 else
3367 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3368 }
3369 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
3370 GOTPtr, TGA, TGA);
3371 }
3372
3373 if (Model == TLSModel::LocalDynamic) {
3374 if (Subtarget.isUsingPCRelativeCalls()) {
3375 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3376 PPCII::MO_GOT_TLSLD_PCREL_FLAG);
3377 SDValue MatPCRel =
3378 DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3379 return DAG.getNode(PPCISD::PADDI_DTPREL, dl, PtrVT, MatPCRel, TGA);
3380 }
3381
3382 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3383 SDValue GOTPtr;
3384 if (is64bit) {
3385 setUsesTOCBasePtr(DAG);
3386 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3387 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
3388 GOTReg, TGA);
3389 } else {
3390 if (picLevel == PICLevel::SmallPIC)
3391 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3392 else
3393 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3394 }
3395 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
3396 PtrVT, GOTPtr, TGA, TGA);
3397 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
3398 PtrVT, TLSAddr, TGA);
3399 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
3400 }
3401
3402 llvm_unreachable("Unknown TLS model!")__builtin_unreachable();
3403}
3404
3405SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
3406 SelectionDAG &DAG) const {
3407 EVT PtrVT = Op.getValueType();
3408 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
3409 SDLoc DL(GSDN);
3410 const GlobalValue *GV = GSDN->getGlobal();
3411
3412 // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
3413 // The actual address of the GlobalValue is stored in the TOC.
3414 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3415 if (Subtarget.isUsingPCRelativeCalls()) {
3416 EVT Ty = getPointerTy(DAG.getDataLayout());
3417 if (isAccessedAsGotIndirect(Op)) {
3418 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3419 PPCII::MO_PCREL_FLAG |
3420 PPCII::MO_GOT_FLAG);
3421 SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3422 SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel,
3423 MachinePointerInfo());
3424 return Load;
3425 } else {
3426 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3427 PPCII::MO_PCREL_FLAG);
3428 return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3429 }
3430 }
3431 setUsesTOCBasePtr(DAG);
3432 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
3433 return getTOCEntry(DAG, DL, GA);
3434 }
3435
3436 unsigned MOHiFlag, MOLoFlag;
3437 bool IsPIC = isPositionIndependent();
3438 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
3439
3440 if (IsPIC && Subtarget.isSVR4ABI()) {
3441 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3442 GSDN->getOffset(),
3443 PPCII::MO_PIC_FLAG);
3444 return getTOCEntry(DAG, DL, GA);
3445 }
3446
3447 SDValue GAHi =
3448 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3449 SDValue GALo =
3450 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3451
3452 return LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3453}
3454
3455SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3456 bool IsStrict = Op->isStrictFPOpcode();
3457 ISD::CondCode CC =
3458 cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
3459 SDValue LHS = Op.getOperand(IsStrict ? 1 : 0);
3460 SDValue RHS = Op.getOperand(IsStrict ? 2 : 1);
3461 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
3462 EVT LHSVT = LHS.getValueType();
3463 SDLoc dl(Op);
3464
3465 // Soften the setcc with libcall if it is fp128.
3466 if (LHSVT == MVT::f128) {
3467 assert(!Subtarget.hasP9Vector() &&(static_cast<void> (0))
3468 "SETCC for f128 is already legal under Power9!")(static_cast<void> (0));
3469 softenSetCCOperands(DAG, LHSVT, LHS, RHS, CC, dl, LHS, RHS, Chain,
3470 Op->getOpcode() == ISD::STRICT_FSETCCS);
3471 if (RHS.getNode())
3472 LHS = DAG.getNode(ISD::SETCC, dl, Op.getValueType(), LHS, RHS,
3473 DAG.getCondCode(CC));
3474 if (IsStrict)
3475 return DAG.getMergeValues({LHS, Chain}, dl);
3476 return LHS;
3477 }
3478
3479 assert(!IsStrict && "Don't know how to handle STRICT_FSETCC!")(static_cast<void> (0));
3480
3481 if (Op.getValueType() == MVT::v2i64) {
3482 // When the operands themselves are v2i64 values, we need to do something
3483 // special because VSX has no underlying comparison operations for these.
3484 if (LHS.getValueType() == MVT::v2i64) {
3485 // Equality can be handled by casting to the legal type for Altivec
3486 // comparisons, everything else needs to be expanded.
3487 if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3488 return DAG.getNode(
3489 ISD::BITCAST, dl, MVT::v2i64,
3490 DAG.getSetCC(dl, MVT::v4i32,
3491 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, LHS),
3492 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, RHS), CC));
3493 }
3494
3495 return SDValue();
3496 }
3497
3498 // We handle most of these in the usual way.
3499 return Op;
3500 }
3501
3502 // If we're comparing for equality to zero, expose the fact that this is
3503 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3504 // fold the new nodes.
3505 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3506 return V;
3507
3508 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
3509 // Leave comparisons against 0 and -1 alone for now, since they're usually
3510 // optimized. FIXME: revisit this when we can custom lower all setcc
3511 // optimizations.
3512 if (C->isAllOnesValue() || C->isNullValue())
3513 return SDValue();
3514 }
3515
3516 // If we have an integer seteq/setne, turn it into a compare against zero
3517 // by xor'ing the rhs with the lhs, which is faster than setting a
3518 // condition register, reading it back out, and masking the correct bit. The
3519 // normal approach here uses sub to do this instead of xor. Using xor exposes
3520 // the result to other bit-twiddling opportunities.
3521 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3522 EVT VT = Op.getValueType();
3523 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, LHS, RHS);
3524 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3525 }
3526 return SDValue();
3527}
3528
3529SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3530 SDNode *Node = Op.getNode();
3531 EVT VT = Node->getValueType(0);
3532 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3533 SDValue InChain = Node->getOperand(0);
3534 SDValue VAListPtr = Node->getOperand(1);
3535 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3536 SDLoc dl(Node);
3537
3538 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only")(static_cast<void> (0));
3539
3540 // gpr_index
3541 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3542 VAListPtr, MachinePointerInfo(SV), MVT::i8);
3543 InChain = GprIndex.getValue(1);
3544
3545 if (VT == MVT::i64) {
3546 // Check if GprIndex is even
3547 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3548 DAG.getConstant(1, dl, MVT::i32));
3549 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3550 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3551 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3552 DAG.getConstant(1, dl, MVT::i32));
3553 // Align GprIndex to be even if it isn't
3554 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3555 GprIndex);
3556 }
3557
3558 // fpr index is 1 byte after gpr
3559 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3560 DAG.getConstant(1, dl, MVT::i32));
3561
3562 // fpr
3563 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3564 FprPtr, MachinePointerInfo(SV), MVT::i8);
3565 InChain = FprIndex.getValue(1);
3566
3567 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3568 DAG.getConstant(8, dl, MVT::i32));
3569
3570 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3571 DAG.getConstant(4, dl, MVT::i32));
3572
3573 // areas
3574 SDValue OverflowArea =
3575 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3576 InChain = OverflowArea.getValue(1);
3577
3578 SDValue RegSaveArea =
3579 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3580 InChain = RegSaveArea.getValue(1);
3581
3582 // select overflow_area if index > 8
3583 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3584 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3585
3586 // adjustment constant gpr_index * 4/8
3587 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3588 VT.isInteger() ? GprIndex : FprIndex,
3589 DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3590 MVT::i32));
3591
3592 // OurReg = RegSaveArea + RegConstant
3593 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3594 RegConstant);
3595
3596 // Floating types are 32 bytes into RegSaveArea
3597 if (VT.isFloatingPoint())
3598 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3599 DAG.getConstant(32, dl, MVT::i32));
3600
3601 // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3602 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3603 VT.isInteger() ? GprIndex : FprIndex,
3604 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3605 MVT::i32));
3606
3607 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3608 VT.isInteger() ? VAListPtr : FprPtr,
3609 MachinePointerInfo(SV), MVT::i8);
3610
3611 // determine if we should load from reg_save_area or overflow_area
3612 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3613
3614 // increase overflow_area by 4/8 if gpr/fpr > 8
3615 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3616 DAG.getConstant(VT.isInteger() ? 4 : 8,
3617 dl, MVT::i32));
3618
3619 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3620 OverflowAreaPlusN);
3621
3622 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3623 MachinePointerInfo(), MVT::i32);
3624
3625 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3626}
3627
3628SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3629 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only")(static_cast<void> (0));
3630
3631 // We have to copy the entire va_list struct:
3632 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3633 return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2),
3634 DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8),
3635 false, true, false, MachinePointerInfo(),
3636 MachinePointerInfo());
3637}
3638
3639SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3640 SelectionDAG &DAG) const {
3641 if (Subtarget.isAIXABI())
3642 report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3643
3644 return Op.getOperand(0);
3645}
3646
3647SDValue PPCTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
3648 MachineFunction &MF = DAG.getMachineFunction();
3649 PPCFunctionInfo &MFI = *MF.getInfo<PPCFunctionInfo>();
3650
3651 assert((Op.getOpcode() == ISD::INLINEASM ||(static_cast<void> (0))
3652 Op.getOpcode() == ISD::INLINEASM_BR) &&(static_cast<void> (0))
3653 "Expecting Inline ASM node.")(static_cast<void> (0));
3654
3655 // If an LR store is already known to be required then there is not point in
3656 // checking this ASM as well.
3657 if (MFI.isLRStoreRequired())
3658 return Op;
3659
3660 // Inline ASM nodes have an optional last operand that is an incoming Flag of
3661 // type MVT::Glue. We want to ignore this last operand if that is the case.
3662 unsigned NumOps = Op.getNumOperands();
3663 if (Op.getOperand(NumOps - 1).getValueType() == MVT::Glue)
3664 --NumOps;
3665
3666 // Check all operands that may contain the LR.
3667 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
3668 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(i))->getZExtValue();
3669 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
3670 ++i; // Skip the ID value.
3671
3672 switch (InlineAsm::getKind(Flags)) {
3673 default:
3674 llvm_unreachable("Bad flags!")__builtin_unreachable();
3675 case InlineAsm::Kind_RegUse:
3676 case InlineAsm::Kind_Imm:
3677 case InlineAsm::Kind_Mem:
3678 i += NumVals;
3679 break;
3680 case InlineAsm::Kind_Clobber:
3681 case InlineAsm::Kind_RegDef:
3682 case InlineAsm::Kind_RegDefEarlyClobber: {
3683 for (; NumVals; --NumVals, ++i) {
3684 Register Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg();
3685 if (Reg != PPC::LR && Reg != PPC::LR8)
3686 continue;
3687 MFI.setLRStoreRequired();
3688 return Op;
3689 }
3690 break;
3691 }
3692 }
3693 }
3694
3695 return Op;
3696}
3697
3698SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3699 SelectionDAG &DAG) const {
3700 if (Subtarget.isAIXABI())
3701 report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3702
3703 SDValue Chain = Op.getOperand(0);
3704 SDValue Trmp = Op.getOperand(1); // trampoline
3705 SDValue FPtr = Op.getOperand(2); // nested function
3706 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3707 SDLoc dl(Op);
3708
3709 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3710 bool isPPC64 = (PtrVT == MVT::i64);
3711 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3712
3713 TargetLowering::ArgListTy Args;
3714 TargetLowering::ArgListEntry Entry;
3715
3716 Entry.Ty = IntPtrTy;
3717 Entry.Node = Trmp; Args.push_back(Entry);
3718
3719 // TrampSize == (isPPC64 ? 48 : 40);
3720 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3721 isPPC64 ? MVT::i64 : MVT::i32);
3722 Args.push_back(Entry);
3723
3724 Entry.Node = FPtr; Args.push_back(Entry);
3725 Entry.Node = Nest; Args.push_back(Entry);
3726
3727 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3728 TargetLowering::CallLoweringInfo CLI(DAG);
3729 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3730 CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3731 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3732
3733 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3734 return CallResult.second;
3735}
3736
3737SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3738 MachineFunction &MF = DAG.getMachineFunction();
3739 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3740 EVT PtrVT = getPointerTy(MF.getDataLayout());
3741
3742 SDLoc dl(Op);
3743
3744 if (Subtarget.isPPC64() || Subtarget.isAIXABI()) {
3745 // vastart just stores the address of the VarArgsFrameIndex slot into the
3746 // memory location argument.
3747 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3748 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3749 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3750 MachinePointerInfo(SV));
3751 }
3752
3753 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3754 // We suppose the given va_list is already allocated.
3755 //
3756 // typedef struct {
3757 // char gpr; /* index into the array of 8 GPRs
3758 // * stored in the register save area
3759 // * gpr=0 corresponds to r3,
3760 // * gpr=1 to r4, etc.
3761 // */
3762 // char fpr; /* index into the array of 8 FPRs
3763 // * stored in the register save area
3764 // * fpr=0 corresponds to f1,
3765 // * fpr=1 to f2, etc.
3766 // */
3767 // char *overflow_arg_area;
3768 // /* location on stack that holds
3769 // * the next overflow argument
3770 // */
3771 // char *reg_save_area;
3772 // /* where r3:r10 and f1:f8 (if saved)
3773 // * are stored
3774 // */
3775 // } va_list[1];
3776
3777 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3778 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3779 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3780 PtrVT);
3781 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3782 PtrVT);
3783
3784 uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3785 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3786
3787 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3788 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3789
3790 uint64_t FPROffset = 1;
3791 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3792
3793 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3794
3795 // Store first byte : number of int regs
3796 SDValue firstStore =
3797 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3798 MachinePointerInfo(SV), MVT::i8);
3799 uint64_t nextOffset = FPROffset;
3800 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3801 ConstFPROffset);
3802
3803 // Store second byte : number of float regs
3804 SDValue secondStore =
3805 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3806 MachinePointerInfo(SV, nextOffset), MVT::i8);
3807 nextOffset += StackOffset;
3808 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3809
3810 // Store second word : arguments given on stack
3811 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3812 MachinePointerInfo(SV, nextOffset));
3813 nextOffset += FrameOffset;
3814 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3815
3816 // Store third word : arguments given in registers
3817 return DAG.getStore(thirdStore, dl, FR, nextPtr,
3818 MachinePointerInfo(SV, nextOffset));
3819}
3820
3821/// FPR - The set of FP registers that should be allocated for arguments
3822/// on Darwin and AIX.
3823static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5,
3824 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10,
3825 PPC::F11, PPC::F12, PPC::F13};
3826
3827/// CalculateStackSlotSize - Calculates the size reserved for this argument on
3828/// the stack.
3829static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3830 unsigned PtrByteSize) {
3831 unsigned ArgSize = ArgVT.getStoreSize();
3832 if (Flags.isByVal())
3833 ArgSize = Flags.getByValSize();
3834
3835 // Round up to multiples of the pointer size, except for array members,
3836 // which are always packed.
3837 if (!Flags.isInConsecutiveRegs())
3838 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3839
3840 return ArgSize;
3841}
3842
3843/// CalculateStackSlotAlignment - Calculates the alignment of this argument
3844/// on the stack.
3845static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3846 ISD::ArgFlagsTy Flags,
3847 unsigned PtrByteSize) {
3848 Align Alignment(PtrByteSize);
3849
3850 // Altivec parameters are padded to a 16 byte boundary.
3851 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3852 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3853 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3854 ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3855 Alignment = Align(16);
3856
3857 // ByVal parameters are aligned as requested.
3858 if (Flags.isByVal()) {
3859 auto BVAlign = Flags.getNonZeroByValAlign();
3860 if (BVAlign > PtrByteSize) {
3861 if (BVAlign.value() % PtrByteSize != 0)
3862 llvm_unreachable(__builtin_unreachable()
3863 "ByVal alignment is not a multiple of the pointer size")__builtin_unreachable();
3864
3865 Alignment = BVAlign;
3866 }
3867 }
3868
3869 // Array members are always packed to their original alignment.
3870 if (Flags.isInConsecutiveRegs()) {
3871 // If the array member was split into multiple registers, the first
3872 // needs to be aligned to the size of the full type. (Except for
3873 // ppcf128, which is only aligned as its f64 components.)
3874 if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3875 Alignment = Align(OrigVT.getStoreSize());
3876 else
3877 Alignment = Align(ArgVT.getStoreSize());
3878 }
3879
3880 return Alignment;
3881}
3882
3883/// CalculateStackSlotUsed - Return whether this argument will use its
3884/// stack slot (instead of being passed in registers). ArgOffset,
3885/// AvailableFPRs, and AvailableVRs must hold the current argument
3886/// position, and will be updated to account for this argument.
3887static bool CalculateStackSlotUsed(const PPCSubtarget &Subtarget, EVT ArgVT,
3888 EVT OrigVT, ISD::ArgFlagsTy Flags,
3889 unsigned PtrByteSize, unsigned LinkageSize,
3890 unsigned ParamAreaSize, unsigned &ArgOffset,
3891 unsigned &AvailableFPRs,
3892 unsigned &AvailableVRs) {
3893 bool UseMemory = false;
3894
3895 // Respect alignment of argument on the stack.
3896 Align Alignment =
3897 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3898 ArgOffset = alignTo(ArgOffset, Alignment);
3899 // If there's no space left in the argument save area, we must
3900 // use memory (this check also catches zero-sized arguments).
3901 if (ArgOffset >= LinkageSize + ParamAreaSize)
3902 UseMemory = true;
3903
3904 // Allocate argument on the stack.
3905 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3906 if (Flags.isInConsecutiveRegsLast())
3907 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3908 // If we overran the argument save area, we must use memory
3909 // (this check catches arguments passed partially in memory)
3910 if (ArgOffset > LinkageSize + ParamAreaSize)
3911 UseMemory = true;
3912
3913 // However, if the argument is actually passed in an FPR or a VR,
3914 // we don't use memory after all.
3915 if (!Flags.isByVal()) {
3916 if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
3917 if (AvailableFPRs > 0) {
3918 --AvailableFPRs;
3919 return false;
3920 }
3921 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3922 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3923 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3924 ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3925 if (AvailableVRs > 0) {
3926 --AvailableVRs;
3927 return false;
3928 }
3929 } else if (Subtarget.isPPC64() && Subtarget.isELFv2ABI() &&
3930 Flags.getByValSize() >= 8)
3931 // For 64-bit ELF v2, passing by value object whose size is no less than 8
3932 // bytes will be copied to parameter save area. This is for compatibility
3933 // for other compiler which requires byval parameters to be stored in
3934 // caller's parameter save area.
3935 return true;
3936
3937 return UseMemory;
3938}
3939
3940/// EnsureStackAlignment - Round stack frame size up from NumBytes to
3941/// ensure minimum alignment required for target.
3942static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3943 unsigned NumBytes) {
3944 return alignTo(NumBytes, Lowering->getStackAlign());
3945}
3946
3947SDValue PPCTargetLowering::LowerFormalArguments(
3948 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3949 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3950 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3951 if (Subtarget.isAIXABI())
3952 return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3953 InVals);
3954 if (Subtarget.is64BitELFABI())
3955 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3956 InVals);
3957 assert(Subtarget.is32BitELFABI())(static_cast<void> (0));
3958 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3959 InVals);
3960}
3961
3962SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3963 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3964 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3965 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3966
3967 // 32-bit SVR4 ABI Stack Frame Layout:
3968 // +-----------------------------------+
3969 // +--> | Back chain |
3970 // | +-----------------------------------+
3971 // | | Floating-point register save area |
3972 // | +-----------------------------------+
3973 // | | General register save area |
3974 // | +-----------------------------------+
3975 // | | CR save word |
3976 // | +-----------------------------------+
3977 // | | VRSAVE save word |
3978 // | +-----------------------------------+
3979 // | | Alignment padding |
3980 // | +-----------------------------------+
3981 // | | Vector register save area |
3982 // | +-----------------------------------+
3983 // | | Local variable space |
3984 // | +-----------------------------------+
3985 // | | Parameter list area |
3986 // | +-----------------------------------+
3987 // | | LR save word |
3988 // | +-----------------------------------+
3989 // SP--> +--- | Back chain |
3990 // +-----------------------------------+
3991 //
3992 // Specifications:
3993 // System V Application Binary Interface PowerPC Processor Supplement
3994 // AltiVec Technology Programming Interface Manual
3995
3996 MachineFunction &MF = DAG.getMachineFunction();
3997 MachineFrameInfo &MFI = MF.getFrameInfo();
3998 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3999
4000 EVT PtrVT = getPointerTy(MF.getDataLayout());
4001 // Potential tail calls could cause overwriting of argument stack slots.
4002 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4003 (CallConv == CallingConv::Fast));
4004 const Align PtrAlign(4);
4005
4006 // Assign locations to all of the incoming arguments.
4007 SmallVector<CCValAssign, 16> ArgLocs;
4008 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
4009 *DAG.getContext());
4010
4011 // Reserve space for the linkage area on the stack.
4012 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4013 CCInfo.AllocateStack(LinkageSize, PtrAlign);
4014 if (useSoftFloat())
4015 CCInfo.PreAnalyzeFormalArguments(Ins);
4016
4017 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
4018 CCInfo.clearWasPPCF128();
4019
4020 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4021 CCValAssign &VA = ArgLocs[i];
4022
4023 // Arguments stored in registers.
4024 if (VA.isRegLoc()) {
4025 const TargetRegisterClass *RC;
4026 EVT ValVT = VA.getValVT();
4027
4028 switch (ValVT.getSimpleVT().SimpleTy) {
4029 default:
4030 llvm_unreachable("ValVT not supported by formal arguments Lowering")__builtin_unreachable();
4031 case MVT::i1:
4032 case MVT::i32:
4033 RC = &PPC::GPRCRegClass;
4034 break;
4035 case MVT::f32:
4036 if (Subtarget.hasP8Vector())
4037 RC = &PPC::VSSRCRegClass;
4038 else if (Subtarget.hasSPE())
4039 RC = &PPC::GPRCRegClass;
4040 else
4041 RC = &PPC::F4RCRegClass;
4042 break;
4043 case MVT::f64:
4044 if (Subtarget.hasVSX())
4045 RC = &PPC::VSFRCRegClass;
4046 else if (Subtarget.hasSPE())
4047 // SPE passes doubles in GPR pairs.
4048 RC = &PPC::GPRCRegClass;
4049 else
4050 RC = &PPC::F8RCRegClass;
4051 break;
4052 case MVT::v16i8:
4053 case MVT::v8i16:
4054 case MVT::v4i32:
4055 RC = &PPC::VRRCRegClass;
4056 break;
4057 case MVT::v4f32:
4058 RC = &PPC::VRRCRegClass;
4059 break;
4060 case MVT::v2f64:
4061 case MVT::v2i64:
4062 RC = &PPC::VRRCRegClass;
4063 break;
4064 }
4065
4066 SDValue ArgValue;
4067 // Transform the arguments stored in physical registers into
4068 // virtual ones.
4069 if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
4070 assert(i + 1 < e && "No second half of double precision argument")(static_cast<void> (0));
4071 unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
4072 unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
4073 SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
4074 SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
4075 if (!Subtarget.isLittleEndian())
4076 std::swap (ArgValueLo, ArgValueHi);
4077 ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
4078 ArgValueHi);
4079 } else {
4080 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
4081 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
4082 ValVT == MVT::i1 ? MVT::i32 : ValVT);
4083 if (ValVT == MVT::i1)
4084 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
4085 }
4086
4087 InVals.push_back(ArgValue);
4088 } else {
4089 // Argument stored in memory.
4090 assert(VA.isMemLoc())(static_cast<void> (0));
4091
4092 // Get the extended size of the argument type in stack
4093 unsigned ArgSize = VA.getLocVT().getStoreSize();
4094 // Get the actual size of the argument type
4095 unsigned ObjSize = VA.getValVT().getStoreSize();
4096 unsigned ArgOffset = VA.getLocMemOffset();
4097 // Stack objects in PPC32 are right justified.
4098 ArgOffset += ArgSize - ObjSize;
4099 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
4100
4101 // Create load nodes to retrieve arguments from the stack.
4102 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4103 InVals.push_back(
4104 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
4105 }
4106 }
4107
4108 // Assign locations to all of the incoming aggregate by value arguments.
4109 // Aggregates passed by value are stored in the local variable space of the
4110 // caller's stack frame, right above the parameter list area.
4111 SmallVector<CCValAssign, 16> ByValArgLocs;
4112 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
4113 ByValArgLocs, *DAG.getContext());
4114
4115 // Reserve stack space for the allocations in CCInfo.
4116 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
4117
4118 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
4119
4120 // Area that is at least reserved in the caller of this function.
4121 unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
4122 MinReservedArea = std::max(MinReservedArea, LinkageSize);
4123
4124 // Set the size that is at least reserved in caller of this function. Tail
4125 // call optimized function's reserved stack space needs to be aligned so that
4126 // taking the difference between two stack areas will result in an aligned
4127 // stack.
4128 MinReservedArea =
4129 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4130 FuncInfo->setMinReservedArea(MinReservedArea);
4131
4132 SmallVector<SDValue, 8> MemOps;
4133
4134 // If the function takes variable number of arguments, make a frame index for
4135 // the start of the first vararg value... for expansion of llvm.va_start.
4136 if (isVarArg) {
4137 static const MCPhysReg GPArgRegs[] = {
4138 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4139 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4140 };
4141 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
4142
4143 static const MCPhysReg FPArgRegs[] = {
4144 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
4145 PPC::F8
4146 };
4147 unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
4148
4149 if (useSoftFloat() || hasSPE())
4150 NumFPArgRegs = 0;
4151
4152 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
4153 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
4154
4155 // Make room for NumGPArgRegs and NumFPArgRegs.
4156 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
4157 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
4158
4159 FuncInfo->setVarArgsStackOffset(
4160 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4161 CCInfo.getNextStackOffset(), true));
4162
4163 FuncInfo->setVarArgsFrameIndex(
4164 MFI.CreateStackObject(Depth, Align(8), false));
4165 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4166
4167 // The fixed integer arguments of a variadic function are stored to the
4168 // VarArgsFrameIndex on the stack so that they may be loaded by
4169 // dereferencing the result of va_next.
4170 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
4171 // Get an existing live-in vreg, or add a new one.
4172 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
4173 if (!VReg)
4174 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
4175
4176 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4177 SDValue Store =
4178 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4179 MemOps.push_back(Store);
4180 // Increment the address by four for the next argument to store
4181 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4182 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4183 }
4184
4185 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
4186 // is set.
4187 // The double arguments are stored to the VarArgsFrameIndex
4188 // on the stack.
4189 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
4190 // Get an existing live-in vreg, or add a new one.
4191 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
4192 if (!VReg)
4193 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
4194
4195 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
4196 SDValue Store =
4197 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4198 MemOps.push_back(Store);
4199 // Increment the address by eight for the next argument to store
4200 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
4201 PtrVT);
4202 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4203 }
4204 }
4205
4206 if (!MemOps.empty())
4207 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4208
4209 return Chain;
4210}
4211
4212// PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4213// value to MVT::i64 and then truncate to the correct register size.
4214SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
4215 EVT ObjectVT, SelectionDAG &DAG,
4216 SDValue ArgVal,
4217 const SDLoc &dl) const {
4218 if (Flags.isSExt())
4219 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
4220 DAG.getValueType(ObjectVT));
4221 else if (Flags.isZExt())
4222 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
4223 DAG.getValueType(ObjectVT));
4224
4225 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
4226}
4227
4228SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
4229 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4230 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4231 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4232 // TODO: add description of PPC stack frame format, or at least some docs.
4233 //
4234 bool isELFv2ABI = Subtarget.isELFv2ABI();
4235 bool isLittleEndian = Subtarget.isLittleEndian();
4236 MachineFunction &MF = DAG.getMachineFunction();
4237 MachineFrameInfo &MFI = MF.getFrameInfo();
4238 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4239
4240 assert(!(CallConv == CallingConv::Fast && isVarArg) &&(static_cast<void> (0))
4241 "fastcc not supported on varargs functions")(static_cast<void> (0));
4242
4243 EVT PtrVT = getPointerTy(MF.getDataLayout());
4244 // Potential tail calls could cause overwriting of argument stack slots.
4245 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4246 (CallConv == CallingConv::Fast));
4247 unsigned PtrByteSize = 8;
4248 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4249
4250 static const MCPhysReg GPR[] = {
4251 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4252 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4253 };
4254 static const MCPhysReg VR[] = {
4255 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4256 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4257 };
4258
4259 const unsigned Num_GPR_Regs = array_lengthof(GPR);
4260 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4261 const unsigned Num_VR_Regs = array_lengthof(VR);
4262
4263 // Do a first pass over the arguments to determine whether the ABI
4264 // guarantees that our caller has allocated the parameter save area
4265 // on its stack frame. In the ELFv1 ABI, this is always the case;
4266 // in the ELFv2 ABI, it is true if this is a vararg function or if
4267 // any parameter is located in a stack slot.
4268
4269 bool HasParameterArea = !isELFv2ABI || isVarArg;
4270 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
4271 unsigned NumBytes = LinkageSize;
4272 unsigned AvailableFPRs = Num_FPR_Regs;
4273 unsigned AvailableVRs = Num_VR_Regs;
4274 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4275 if (Ins[i].Flags.isNest())
4276 continue;
4277
4278 if (CalculateStackSlotUsed(Subtarget, Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
4279 PtrByteSize, LinkageSize, ParamAreaSize,
4280 NumBytes, AvailableFPRs, AvailableVRs))
4281 HasParameterArea = true;
4282 }
4283
4284 // Add DAG nodes to load the arguments or copy them out of registers. On
4285 // entry to a function on PPC, the arguments start after the linkage area,
4286 // although the first ones are often in registers.
4287
4288 unsigned ArgOffset = LinkageSize;
4289 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4290 SmallVector<SDValue, 8> MemOps;
4291 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4292 unsigned CurArgIdx = 0;
4293 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4294 SDValue ArgVal;
4295 bool needsLoad = false;
4296 EVT ObjectVT = Ins[ArgNo].VT;
4297 EVT OrigVT = Ins[ArgNo].ArgVT;
4298 unsigned ObjSize = ObjectVT.getStoreSize();
4299 unsigned ArgSize = ObjSize;
4300 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4301 if (Ins[ArgNo].isOrigArg()) {
4302 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4303 CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4304 }
4305 // We re-align the argument offset for each argument, except when using the
4306 // fast calling convention, when we need to make sure we do that only when
4307 // we'll actually use a stack slot.
4308 unsigned CurArgOffset;
4309 Align Alignment;
4310 auto ComputeArgOffset = [&]() {
4311 /* Respect alignment of argument on the stack. */
4312 Alignment =
4313 CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
4314 ArgOffset = alignTo(ArgOffset, Alignment);
4315 CurArgOffset = ArgOffset;
4316 };
4317
4318 if (CallConv != CallingConv::Fast) {
4319 ComputeArgOffset();
4320
4321 /* Compute GPR index associated with argument offset. */
4322 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4323 GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
4324 }
4325
4326 // FIXME the codegen can be much improved in some cases.
4327 // We do not have to keep everything in memory.
4328 if (Flags.isByVal()) {
4329 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit")(static_cast<void> (0));
4330
4331 if (CallConv == CallingConv::Fast)
4332 ComputeArgOffset();
4333
4334 // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4335 ObjSize = Flags.getByValSize();
4336 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4337 // Empty aggregate parameters do not take up registers. Examples:
4338 // struct { } a;
4339 // union { } b;
4340 // int c[0];
4341 // etc. However, we have to provide a place-holder in InVals, so
4342 // pretend we have an 8-byte item at the current address for that
4343 // purpose.
4344 if (!ObjSize) {
4345 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4346 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4347 InVals.push_back(FIN);
4348 continue;
4349 }
4350
4351 // Create a stack object covering all stack doublewords occupied
4352 // by the argument. If the argument is (fully or partially) on
4353 // the stack, or if the argument is fully in registers but the
4354 // caller has allocated the parameter save anyway, we can refer
4355 // directly to the caller's stack frame. Otherwise, create a
4356 // local copy in our own frame.
4357 int FI;
4358 if (HasParameterArea ||
4359 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
4360 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
4361 else
4362 FI = MFI.CreateStackObject(ArgSize, Alignment, false);
4363 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4364
4365 // Handle aggregates smaller than 8 bytes.
4366 if (ObjSize < PtrByteSize) {
4367 // The value of the object is its address, which differs from the
4368 // address of the enclosing doubleword on big-endian systems.
4369 SDValue Arg = FIN;
4370 if (!isLittleEndian) {
4371 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
4372 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
4373 }
4374 InVals.push_back(Arg);
4375
4376 if (GPR_idx != Num_GPR_Regs) {
4377 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4378 FuncInfo->addLiveInAttr(VReg, Flags);
4379 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4380 SDValue Store;
4381
4382 if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
4383 EVT ObjType = (ObjSize == 1 ? MVT::i8 :
4384 (ObjSize == 2 ? MVT::i16 : MVT::i32));
4385 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
4386 MachinePointerInfo(&*FuncArg), ObjType);
4387 } else {
4388 // For sizes that don't fit a truncating store (3, 5, 6, 7),
4389 // store the whole register as-is to the parameter save area
4390 // slot.
4391 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4392 MachinePointerInfo(&*FuncArg));
4393 }
4394
4395 MemOps.push_back(Store);
4396 }
4397 // Whether we copied from a register or not, advance the offset
4398 // into the parameter save area by a full doubleword.
4399 ArgOffset += PtrByteSize;
4400 continue;
4401 }
4402
4403 // The value of the object is its address, which is the address of
4404 // its first stack doubleword.
4405 InVals.push_back(FIN);
4406
4407 // Store whatever pieces of the object are in registers to memory.
4408 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4409 if (GPR_idx == Num_GPR_Regs)
4410 break;
4411
4412 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4413 FuncInfo->addLiveInAttr(VReg, Flags);
4414 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4415 SDValue Addr = FIN;
4416 if (j) {
4417 SDValue Off = DAG.getConstant(j, dl, PtrVT);
4418 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
4419 }
4420 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
4421 MachinePointerInfo(&*FuncArg, j));
4422 MemOps.push_back(Store);
4423 ++GPR_idx;
4424 }
4425 ArgOffset += ArgSize;
4426 continue;
4427 }
4428
4429 switch (ObjectVT.getSimpleVT().SimpleTy) {
4430 default: llvm_unreachable("Unhandled argument type!")__builtin_unreachable();
4431 case MVT::i1:
4432 case MVT::i32:
4433 case MVT::i64:
4434 if (Flags.isNest()) {
4435 // The 'nest' parameter, if any, is passed in R11.
4436 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
4437 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4438
4439 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4440 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4441
4442 break;
4443 }
4444
4445 // These can be scalar arguments or elements of an integer array type
4446 // passed directly. Clang may use those instead of "byval" aggregate
4447 // types to avoid forcing arguments to memory unnecessarily.
4448 if (GPR_idx != Num_GPR_Regs) {
4449 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4450 FuncInfo->addLiveInAttr(VReg, Flags);
4451 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4452
4453 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4454 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4455 // value to MVT::i64 and then truncate to the correct register size.
4456 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4457 } else {
4458 if (CallConv == CallingConv::Fast)
4459 ComputeArgOffset();
4460
4461 needsLoad = true;
4462 ArgSize = PtrByteSize;
4463 }
4464 if (CallConv != CallingConv::Fast || needsLoad)
4465 ArgOffset += 8;
4466 break;
4467
4468 case MVT::f32:
4469 case MVT::f64:
4470 // These can be scalar arguments or elements of a float array type
4471 // passed directly. The latter are used to implement ELFv2 homogenous
4472 // float aggregates.
4473 if (FPR_idx != Num_FPR_Regs) {
4474 unsigned VReg;
4475
4476 if (ObjectVT == MVT::f32)
4477 VReg = MF.addLiveIn(FPR[FPR_idx],
4478 Subtarget.hasP8Vector()
4479 ? &PPC::VSSRCRegClass
4480 : &PPC::F4RCRegClass);
4481 else
4482 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
4483 ? &PPC::VSFRCRegClass
4484 : &PPC::F8RCRegClass);
4485
4486 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4487 ++FPR_idx;
4488 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4489 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4490 // once we support fp <-> gpr moves.
4491
4492 // This can only ever happen in the presence of f32 array types,
4493 // since otherwise we never run out of FPRs before running out
4494 // of GPRs.
4495 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4496 FuncInfo->addLiveInAttr(VReg, Flags);
4497 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4498
4499 if (ObjectVT == MVT::f32) {
4500 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4501 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4502 DAG.getConstant(32, dl, MVT::i32));
4503 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4504 }
4505
4506 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4507 } else {
4508 if (CallConv == CallingConv::Fast)
4509 ComputeArgOffset();
4510
4511 needsLoad = true;
4512 }
4513
4514 // When passing an array of floats, the array occupies consecutive
4515 // space in the argument area; only round up to the next doubleword
4516 // at the end of the array. Otherwise, each float takes 8 bytes.
4517 if (CallConv != CallingConv::Fast || needsLoad) {
4518 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4519 ArgOffset += ArgSize;
4520 if (Flags.isInConsecutiveRegsLast())
4521 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4522 }
4523 break;
4524 case MVT::v4f32:
4525 case MVT::v4i32:
4526 case MVT::v8i16:
4527 case MVT::v16i8:
4528 case MVT::v2f64:
4529 case MVT::v2i64:
4530 case MVT::v1i128:
4531 case MVT::f128:
4532 // These can be scalar arguments or elements of a vector array type
4533 // passed directly. The latter are used to implement ELFv2 homogenous
4534 // vector aggregates.
4535 if (VR_idx != Num_VR_Regs) {
4536 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4537 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4538 ++VR_idx;
4539 } else {
4540 if (CallConv == CallingConv::Fast)
4541 ComputeArgOffset();
4542 needsLoad = true;
4543 }
4544 if (CallConv != CallingConv::Fast || needsLoad)
4545 ArgOffset += 16;
4546 break;
4547 }
4548
4549 // We need to load the argument to a virtual register if we determined
4550 // above that we ran out of physical registers of the appropriate type.
4551 if (needsLoad) {
4552 if (ObjSize < ArgSize && !isLittleEndian)
4553 CurArgOffset += ArgSize - ObjSize;
4554 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4555 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4556 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4557 }
4558
4559 InVals.push_back(ArgVal);
4560 }
4561
4562 // Area that is at least reserved in the caller of this function.
4563 unsigned MinReservedArea;
4564 if (HasParameterArea)
4565 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4566 else
4567 MinReservedArea = LinkageSize;
4568
4569 // Set the size that is at least reserved in caller of this function. Tail
4570 // call optimized functions' reserved stack space needs to be aligned so that
4571 // taking the difference between two stack areas will result in an aligned
4572 // stack.
4573 MinReservedArea =
4574 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4575 FuncInfo->setMinReservedArea(MinReservedArea);
4576
4577 // If the function takes variable number of arguments, make a frame index for
4578 // the start of the first vararg value... for expansion of llvm.va_start.
4579 // On ELFv2ABI spec, it writes:
4580 // C programs that are intended to be *portable* across different compilers
4581 // and architectures must use the header file <stdarg.h> to deal with variable
4582 // argument lists.
4583 if (isVarArg && MFI.hasVAStart()) {
4584 int Depth = ArgOffset;
4585
4586 FuncInfo->setVarArgsFrameIndex(
4587 MFI.CreateFixedObject(PtrByteSize, Depth, true));
4588 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4589
4590 // If this function is vararg, store any remaining integer argument regs
4591 // to their spots on the stack so that they may be loaded by dereferencing
4592 // the result of va_next.
4593 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4594 GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4595 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4596 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4597 SDValue Store =
4598 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4599 MemOps.push_back(Store);
4600 // Increment the address by four for the next argument to store
4601 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4602 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4603 }
4604 }
4605
4606 if (!MemOps.empty())
4607 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4608
4609 return Chain;
4610}
4611
4612/// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4613/// adjusted to accommodate the arguments for the tailcall.
4614static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4615 unsigned ParamSize) {
4616
4617 if (!isTailCall) return 0;
4618
4619 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4620 unsigned CallerMinReservedArea = FI->getMinReservedArea();
4621 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4622 // Remember only if the new adjustment is bigger.
4623 if (SPDiff < FI->getTailCallSPDelta())
4624 FI->setTailCallSPDelta(SPDiff);
4625
4626 return SPDiff;
4627}
4628
4629static bool isFunctionGlobalAddress(SDValue Callee);
4630
4631static bool callsShareTOCBase(const Function *Caller, SDValue Callee,
4632 const TargetMachine &TM) {
4633 // It does not make sense to call callsShareTOCBase() with a caller that
4634 // is PC Relative since PC Relative callers do not have a TOC.
4635#ifndef NDEBUG1
4636 const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller);
4637 assert(!STICaller->isUsingPCRelativeCalls() &&(static_cast<void> (0))
4638 "PC Relative callers do not have a TOC and cannot share a TOC Base")(static_cast<void> (0));
4639#endif
4640
4641 // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4642 // don't have enough information to determine if the caller and callee share
4643 // the same TOC base, so we have to pessimistically assume they don't for
4644 // correctness.
4645 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4646 if (!G)
4647 return false;
4648
4649 const GlobalValue *GV = G->getGlobal();
4650
4651 // If the callee is preemptable, then the static linker will use a plt-stub
4652 // which saves the toc to the stack, and needs a nop after the call
4653 // instruction to convert to a toc-restore.
4654 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4655 return false;
4656
4657 // Functions with PC Relative enabled may clobber the TOC in the same DSO.
4658 // We may need a TOC restore in the situation where the caller requires a
4659 // valid TOC but the callee is PC Relative and does not.
4660 const Function *F = dyn_cast<Function>(GV);
4661 const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV);
4662
4663 // If we have an Alias we can try to get the function from there.
4664 if (Alias) {
4665 const GlobalObject *GlobalObj = Alias->getBaseObject();
4666 F = dyn_cast<Function>(GlobalObj);
4667 }
4668
4669 // If we still have no valid function pointer we do not have enough
4670 // information to determine if the callee uses PC Relative calls so we must
4671 // assume that it does.
4672 if (!F)
4673 return false;
4674
4675 // If the callee uses PC Relative we cannot guarantee that the callee won't
4676 // clobber the TOC of the caller and so we must assume that the two
4677 // functions do not share a TOC base.
4678 const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F);
4679 if (STICallee->isUsingPCRelativeCalls())
4680 return false;
4681
4682 // If the GV is not a strong definition then we need to assume it can be
4683 // replaced by another function at link time. The function that replaces
4684 // it may not share the same TOC as the caller since the callee may be
4685 // replaced by a PC Relative version of the same function.
4686 if (!GV->isStrongDefinitionForLinker())
4687 return false;
4688
4689 // The medium and large code models are expected to provide a sufficiently
4690 // large TOC to provide all data addressing needs of a module with a
4691 // single TOC.
4692 if (CodeModel::Medium == TM.getCodeModel() ||
4693 CodeModel::Large == TM.getCodeModel())
4694 return true;
4695
4696 // Any explicitly-specified sections and section prefixes must also match.
4697 // Also, if we're using -ffunction-sections, then each function is always in
4698 // a different section (the same is true for COMDAT functions).
4699 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4700 GV->getSection() != Caller->getSection())
4701 return false;
4702 if (const auto *F = dyn_cast<Function>(GV)) {
4703 if (F->getSectionPrefix() != Caller->getSectionPrefix())
4704 return false;
4705 }
4706
4707 return true;
4708}
4709
4710static bool
4711needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4712 const SmallVectorImpl<ISD::OutputArg> &Outs) {
4713 assert(Subtarget.is64BitELFABI())(static_cast<void> (0));
4714
4715 const unsigned PtrByteSize = 8;
4716 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4717
4718 static const MCPhysReg GPR[] = {
4719 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4720 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4721 };
4722 static const MCPhysReg VR[] = {
4723 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4724 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4725 };
4726
4727 const unsigned NumGPRs = array_lengthof(GPR);
4728 const unsigned NumFPRs = 13;
4729 const unsigned NumVRs = array_lengthof(VR);
4730 const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4731
4732 unsigned NumBytes = LinkageSize;
4733 unsigned AvailableFPRs = NumFPRs;
4734 unsigned AvailableVRs = NumVRs;
4735
4736 for (const ISD::OutputArg& Param : Outs) {
4737 if (Param.Flags.isNest()) continue;
4738
4739 if (CalculateStackSlotUsed(Subtarget, Param.VT, Param.ArgVT, Param.Flags,
4740 PtrByteSize, LinkageSize, ParamAreaSize,
4741 NumBytes, AvailableFPRs, AvailableVRs))
4742 return true;
4743 }
4744 return false;
4745}
4746
4747static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {
4748 if (CB.arg_size() != CallerFn->arg_size())
4749 return false;
4750
4751 auto CalleeArgIter = CB.arg_begin();
4752 auto CalleeArgEnd = CB.arg_end();
4753 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4754
4755 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4756 const Value* CalleeArg = *CalleeArgIter;
4757 const Value* CallerArg = &(*CallerArgIter);
4758 if (CalleeArg == CallerArg)
4759 continue;
4760
4761 // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4762 // tail call @callee([4 x i64] undef, [4 x i64] %b)
4763 // }
4764 // 1st argument of callee is undef and has the same type as caller.
4765 if (CalleeArg->getType() == CallerArg->getType() &&
4766 isa<UndefValue>(CalleeArg))
4767 continue;
4768
4769 return false;
4770 }
4771
4772 return true;
4773}
4774
4775// Returns true if TCO is possible between the callers and callees
4776// calling conventions.
4777static bool
4778areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4779 CallingConv::ID CalleeCC) {
4780 // Tail calls are possible with fastcc and ccc.
4781 auto isTailCallableCC = [] (CallingConv::ID CC){
4782 return CC == CallingConv::C || CC == CallingConv::Fast;
4783 };
4784 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4785 return false;
4786
4787 // We can safely tail call both fastcc and ccc callees from a c calling
4788 // convention caller. If the caller is fastcc, we may have less stack space
4789 // than a non-fastcc caller with the same signature so disable tail-calls in
4790 // that case.
4791 return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4792}
4793
4794bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4795 SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg,
4796 const SmallVectorImpl<ISD::OutputArg> &Outs,
4797 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4798 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4799
4800 if (DisableSCO && !TailCallOpt) return false;
4801
4802 // Variadic argument functions are not supported.
4803 if (isVarArg) return false;
4804
4805 auto &Caller = DAG.getMachineFunction().getFunction();
4806 // Check that the calling conventions are compatible for tco.
4807 if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4808 return false;
4809
4810 // Caller contains any byval parameter is not supported.
4811 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4812 return false;
4813
4814 // Callee contains any byval parameter is not supported, too.
4815 // Note: This is a quick work around, because in some cases, e.g.
4816 // caller's stack size > callee's stack size, we are still able to apply
4817 // sibling call optimization. For example, gcc is able to do SCO for caller1
4818 // in the following example, but not for caller2.
4819 // struct test {
4820 // long int a;
4821 // char ary[56];
4822 // } gTest;
4823 // __attribute__((noinline)) int callee(struct test v, struct test *b) {
4824 // b->a = v.a;
4825 // return 0;
4826 // }
4827 // void caller1(struct test a, struct test c, struct test *b) {
4828 // callee(gTest, b); }
4829 // void caller2(struct test *b) { callee(gTest, b); }
4830 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4831 return false;
4832
4833 // If callee and caller use different calling conventions, we cannot pass
4834 // parameters on stack since offsets for the parameter area may be different.
4835 if (Caller.getCallingConv() != CalleeCC &&
4836 needStackSlotPassParameters(Subtarget, Outs))
4837 return false;
4838
4839 // All variants of 64-bit ELF ABIs without PC-Relative addressing require that
4840 // the caller and callee share the same TOC for TCO/SCO. If the caller and
4841 // callee potentially have different TOC bases then we cannot tail call since
4842 // we need to restore the TOC pointer after the call.
4843 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4844 // We cannot guarantee this for indirect calls or calls to external functions.
4845 // When PC-Relative addressing is used, the concept of the TOC is no longer
4846 // applicable so this check is not required.
4847 // Check first for indirect calls.
4848 if (!Subtarget.isUsingPCRelativeCalls() &&
4849 !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee))
4850 return false;
4851
4852 // Check if we share the TOC base.
4853 if (!Subtarget.isUsingPCRelativeCalls() &&
4854 !callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4855 return false;
4856
4857 // TCO allows altering callee ABI, so we don't have to check further.
4858 if (CalleeCC == CallingConv::Fast && TailCallOpt)
4859 return true;
4860
4861 if (DisableSCO) return false;
4862
4863 // If callee use the same argument list that caller is using, then we can
4864 // apply SCO on this case. If it is not, then we need to check if callee needs
4865 // stack for passing arguments.
4866 // PC Relative tail calls may not have a CallBase.
4867 // If there is no CallBase we cannot verify if we have the same argument
4868 // list so assume that we don't have the same argument list.
4869 if (CB && !hasSameArgumentList(&Caller, *CB) &&
4870 needStackSlotPassParameters(Subtarget, Outs))
4871 return false;
4872 else if (!CB && needStackSlotPassParameters(Subtarget, Outs))
4873 return false;
4874
4875 return true;
4876}
4877
4878/// IsEligibleForTailCallOptimization - Check whether the call is eligible
4879/// for tail call optimization. Targets which want to do tail call
4880/// optimization should implement this function.
4881bool
4882PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4883 CallingConv::ID CalleeCC,
4884 bool isVarArg,
4885 const SmallVectorImpl<ISD::InputArg> &Ins,
4886 SelectionDAG& DAG) const {
4887 if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4888 return false;
4889
4890 // Variable argument functions are not supported.
4891 if (isVarArg)
4892 return false;
4893
4894 MachineFunction &MF = DAG.getMachineFunction();
4895 CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4896 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4897 // Functions containing by val parameters are not supported.
4898 for (unsigned i = 0; i != Ins.size(); i++) {
4899 ISD::ArgFlagsTy Flags = Ins[i].Flags;
4900 if (Flags.isByVal()) return false;
4901 }
4902
4903 // Non-PIC/GOT tail calls are supported.
4904 if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4905 return true;
4906
4907 // At the moment we can only do local tail calls (in same module, hidden
4908 // or protected) if we are generating PIC.
4909 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4910 return G->getGlobal()->hasHiddenVisibility()
4911 || G->getGlobal()->hasProtectedVisibility();
4912 }
4913
4914 return false;
4915}
4916
4917/// isCallCompatibleAddress - Return the immediate to use if the specified
4918/// 32-bit value is representable in the immediate field of a BxA instruction.
4919static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
4920 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4921 if (!C) return nullptr;
4922
4923 int Addr = C->getZExtValue();
4924 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
4925 SignExtend32<26>(Addr) != Addr)
4926 return nullptr; // Top 6 bits have to be sext of immediate.
4927
4928 return DAG
4929 .getConstant(
4930 (int)C->getZExtValue() >> 2, SDLoc(Op),
4931 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
4932 .getNode();
4933}
4934
4935namespace {
4936
4937struct TailCallArgumentInfo {
4938 SDValue Arg;
4939 SDValue FrameIdxOp;
4940 int FrameIdx = 0;
4941
4942 TailCallArgumentInfo() = default;
4943};
4944
4945} // end anonymous namespace
4946
4947/// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
4948static void StoreTailCallArgumentsToStackSlot(
4949 SelectionDAG &DAG, SDValue Chain,
4950 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
4951 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
4952 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
4953 SDValue Arg = TailCallArgs[i].Arg;
4954 SDValue FIN = TailCallArgs[i].FrameIdxOp;
4955 int FI = TailCallArgs[i].FrameIdx;
4956 // Store relative to framepointer.
4957 MemOpChains.push_back(DAG.getStore(
4958 Chain, dl, Arg, FIN,
4959 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4960 }
4961}
4962
4963/// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
4964/// the appropriate stack slot for the tail call optimized function call.
4965static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
4966 SDValue OldRetAddr, SDValue OldFP,
4967 int SPDiff, const SDLoc &dl) {
4968 if (SPDiff) {
4969 // Calculate the new stack slot for the return address.
4970 MachineFunction &MF = DAG.getMachineFunction();
4971 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
4972 const PPCFrameLowering *FL = Subtarget.getFrameLowering();
4973 bool isPPC64 = Subtarget.isPPC64();
4974 int SlotSize = isPPC64 ? 8 : 4;
4975 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4976 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
4977 NewRetAddrLoc, true);
4978 EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4979 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
4980 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4981 MachinePointerInfo::getFixedStack(MF, NewRetAddr));
4982 }
4983 return Chain;
4984}
4985
4986/// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
4987/// the position of the argument.
4988static void
4989CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
4990 SDValue Arg, int SPDiff, unsigned ArgOffset,
4991 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
4992 int Offset = ArgOffset + SPDiff;
4993 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
4994 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4995 EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4996 SDValue FIN = DAG.getFrameIndex(FI, VT);
4997 TailCallArgumentInfo Info;
4998 Info.Arg = Arg;
4999 Info.FrameIdxOp = FIN;
5000 Info.FrameIdx = FI;
5001 TailCallArguments.push_back(Info);
5002}
5003
5004/// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
5005/// stack slot. Returns the chain as result and the loaded frame pointers in
5006/// LROpOut/FPOpout. Used when tail calling.
5007SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
5008 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
5009 SDValue &FPOpOut, const SDLoc &dl) const {
5010 if (SPDiff) {
5011 // Load the LR and FP stack slot for later adjusting.
5012 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5013 LROpOut = getReturnAddrFrameIndex(DAG);
5014 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
5015 Chain = SDValue(LROpOut.getNode(), 1);
5016 }
5017 return Chain;
5018}
5019
5020/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
5021/// by "Src" to address "Dst" of size "Size". Alignment information is
5022/// specified by the specific parameter attribute. The copy will be passed as
5023/// a byval function parameter.
5024/// Sometimes what we are copying is the end of a larger object, the part that
5025/// does not fit in registers.
5026static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
5027 SDValue Chain, ISD::ArgFlagsTy Flags,
5028 SelectionDAG &DAG, const SDLoc &dl) {
5029 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
5030 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
5031 Flags.getNonZeroByValAlign(), false, false, false,
5032 MachinePointerInfo(), MachinePointerInfo());
5033}
5034
5035/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
5036/// tail calls.
5037static void LowerMemOpCallTo(
5038 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
5039 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
5040 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
5041 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
5042 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5043 if (!isTailCall) {
5044 if (isVector) {
5045 SDValue StackPtr;
5046 if (isPPC64)
5047 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5048 else
5049 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5050 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
5051 DAG.getConstant(ArgOffset, dl, PtrVT));
5052 }
5053 MemOpChains.push_back(
5054 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5055 // Calculate and remember argument location.
5056 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
5057 TailCallArguments);
5058}
5059
5060static void
5061PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
5062 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
5063 SDValue FPOp,
5064 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
5065 // Emit a sequence of copyto/copyfrom virtual registers for arguments that
5066 // might overwrite each other in case of tail call optimization.
5067 SmallVector<SDValue, 8> MemOpChains2;
5068 // Do not flag preceding copytoreg stuff together with the following stuff.
5069 InFlag = SDValue();
5070 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
5071 MemOpChains2, dl);
5072 if (!MemOpChains2.empty())
5073 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
5074
5075 // Store the return address to the appropriate stack slot.
5076 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
5077
5078 // Emit callseq_end just before tailcall node.
5079 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5080 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
5081 InFlag = Chain.getValue(1);
5082}
5083
5084// Is this global address that of a function that can be called by name? (as
5085// opposed to something that must hold a descriptor for an indirect call).
5086static bool isFunctionGlobalAddress(SDValue Callee) {
5087 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
5088 if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
5089 Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
5090 return false;
5091
5092 return G->getGlobal()->getValueType()->isFunctionTy();
5093 }
5094
5095 return false;
5096}
5097
5098SDValue PPCTargetLowering::LowerCallResult(
5099 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
5100 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5101 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5102 SmallVector<CCValAssign, 16> RVLocs;
5103 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
5104 *DAG.getContext());
5105
5106 CCRetInfo.AnalyzeCallResult(
5107 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
5108 ? RetCC_PPC_Cold
5109 : RetCC_PPC);
5110
5111 // Copy all of the result registers out of their specified physreg.
5112 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5113 CCValAssign &VA = RVLocs[i];
5114 assert(VA.isRegLoc() && "Can only return in registers!")(static_cast<void> (0));
5115
5116 SDValue Val;
5117
5118 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
5119 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5120 InFlag);
5121 Chain = Lo.getValue(1);
5122 InFlag = Lo.getValue(2);
5123 VA = RVLocs[++i]; // skip ahead to next loc
5124 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5125 InFlag);
5126 Chain = Hi.getValue(1);
5127 InFlag = Hi.getValue(2);
5128 if (!Subtarget.isLittleEndian())
5129 std::swap (Lo, Hi);
5130 Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
5131 } else {
5132 Val = DAG.getCopyFromReg(Chain, dl,
5133 VA.getLocReg(), VA.getLocVT(), InFlag);
5134 Chain = Val.getValue(1);
5135 InFlag = Val.getValue(2);
5136 }
5137
5138 switch (VA.getLocInfo()) {
5139 default: llvm_unreachable("Unknown loc info!")__builtin_unreachable();
5140 case CCValAssign::Full: break;
5141 case CCValAssign::AExt:
5142 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5143 break;
5144 case CCValAssign::ZExt:
5145 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5146 DAG.getValueType(VA.getValVT()));
5147 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5148 break;
5149 case CCValAssign::SExt:
5150 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5151 DAG.getValueType(VA.getValVT()));
5152 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5153 break;
5154 }
5155
5156 InVals.push_back(Val);
5157 }
5158
5159 return Chain;
5160}
5161
5162static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
5163 const PPCSubtarget &Subtarget, bool isPatchPoint) {
5164 // PatchPoint calls are not indirect.
5165 if (isPatchPoint)
5166 return false;
5167
5168 if (isFunctionGlobalAddress(Callee) || isa<ExternalSymbolSDNode>(Callee))
5169 return false;
5170
5171 // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
5172 // becuase the immediate function pointer points to a descriptor instead of
5173 // a function entry point. The ELFv2 ABI cannot use a BLA because the function
5174 // pointer immediate points to the global entry point, while the BLA would
5175 // need to jump to the local entry point (see rL211174).
5176 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
5177 isBLACompatibleAddress(Callee, DAG))
5178 return false;
5179
5180 return true;
5181}
5182
5183// AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls.
5184static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) {
5185 return Subtarget.isAIXABI() ||
5186 (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls());
5187}
5188
5189static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
5190 const Function &Caller,
5191 const SDValue &Callee,
5192 const PPCSubtarget &Subtarget,
5193 const TargetMachine &TM) {
5194 if (CFlags.IsTailCall)
5195 return PPCISD::TC_RETURN;
5196
5197 // This is a call through a function pointer.
5198 if (CFlags.IsIndirect) {
5199 // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
5200 // indirect calls. The save of the caller's TOC pointer to the stack will be
5201 // inserted into the DAG as part of call lowering. The restore of the TOC
5202 // pointer is modeled by using a pseudo instruction for the call opcode that
5203 // represents the 2 instruction sequence of an indirect branch and link,
5204 // immediately followed by a load of the TOC pointer from the the stack save
5205 // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC
5206 // as it is not saved or used.
5207 return isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC
5208 : PPCISD::BCTRL;
5209 }
5210
5211 if (Subtarget.isUsingPCRelativeCalls()) {
5212 assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.")(static_cast<void> (0));
5213 return PPCISD::CALL_NOTOC;
5214 }
5215
5216 // The ABIs that maintain a TOC pointer accross calls need to have a nop
5217 // immediately following the call instruction if the caller and callee may
5218 // have different TOC bases. At link time if the linker determines the calls
5219 // may not share a TOC base, the call is redirected to a trampoline inserted
5220 // by the linker. The trampoline will (among other things) save the callers
5221 // TOC pointer at an ABI designated offset in the linkage area and the linker
5222 // will rewrite the nop to be a load of the TOC pointer from the linkage area
5223 // into gpr2.
5224 if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5225 return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
5226 : PPCISD::CALL_NOP;
5227
5228 return PPCISD::CALL;
5229}
5230
5231static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
5232 const SDLoc &dl, const PPCSubtarget &Subtarget) {
5233 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
5234 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
5235 return SDValue(Dest, 0);
5236
5237 // Returns true if the callee is local, and false otherwise.
5238 auto isLocalCallee = [&]() {
5239 const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
5240 const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5241 const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5242
5243 return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
5244 !dyn_cast_or_null<GlobalIFunc>(GV);
5245 };
5246
5247 // The PLT is only used in 32-bit ELF PIC mode. Attempting to use the PLT in
5248 // a static relocation model causes some versions of GNU LD (2.17.50, at
5249 // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5250 // built with secure-PLT.
5251 bool UsePlt =
5252 Subtarget.is32BitELFABI() && !isLocalCallee() &&
5253 Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
5254
5255 const auto getAIXFuncEntryPointSymbolSDNode = [&](const GlobalValue *GV) {
5256 const TargetMachine &TM = Subtarget.getTargetMachine();
5257 const TargetLoweringObjectFile *TLOF = TM.getObjFileLowering();
5258 MCSymbolXCOFF *S =
5259 cast<MCSymbolXCOFF>(TLOF->getFunctionEntryPointSymbol(GV, TM));
5260
5261 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5262 return DAG.getMCSymbol(S, PtrVT);
5263 };
5264
5265 if (isFunctionGlobalAddress(Callee)) {
5266 const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
5267
5268 if (Subtarget.isAIXABI()) {
5269 assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.")(static_cast<void> (0));
5270 return getAIXFuncEntryPointSymbolSDNode(GV);
5271 }
5272 return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5273 UsePlt ? PPCII::MO_PLT : 0);
5274 }
5275
5276 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5277 const char *SymName = S->getSymbol();
5278 if (Subtarget.isAIXABI()) {
5279 // If there exists a user-declared function whose name is the same as the
5280 // ExternalSymbol's, then we pick up the user-declared version.
5281 const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5282 if (const Function *F =
5283 dyn_cast_or_null<Function>(Mod->getNamedValue(SymName)))
5284 return getAIXFuncEntryPointSymbolSDNode(F);
5285
5286 // On AIX, direct function calls reference the symbol for the function's
5287 // entry point, which is named by prepending a "." before the function's
5288 // C-linkage name. A Qualname is returned here because an external
5289 // function entry point is a csect with XTY_ER property.
5290 const auto getExternalFunctionEntryPointSymbol = [&](StringRef SymName) {
5291 auto &Context = DAG.getMachineFunction().getMMI().getContext();
5292 MCSectionXCOFF *Sec = Context.getXCOFFSection(
5293 (Twine(".") + Twine(SymName)).str(), SectionKind::getMetadata(),
5294 XCOFF::CsectProperties(XCOFF::XMC_PR, XCOFF::XTY_ER));
5295 return Sec->getQualNameSymbol();
5296 };
5297
5298 SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data();
5299 }
5300 return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5301 UsePlt ? PPCII::MO_PLT : 0);
5302 }
5303
5304 // No transformation needed.
5305 assert(Callee.getNode() && "What no callee?")(static_cast<void> (0));
5306 return Callee;
5307}
5308
5309static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
5310 assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&(static_cast<void> (0))
5311 "Expected a CALLSEQ_STARTSDNode.")(static_cast<void> (0));
5312
5313 // The last operand is the chain, except when the node has glue. If the node
5314 // has glue, then the last operand is the glue, and the chain is the second
5315 // last operand.
5316 SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5317 if (LastValue.getValueType() != MVT::Glue)
5318 return LastValue;
5319
5320 return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5321}
5322
5323// Creates the node that moves a functions address into the count register
5324// to prepare for an indirect call instruction.
5325static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5326 SDValue &Glue, SDValue &Chain,
5327 const SDLoc &dl) {
5328 SDValue MTCTROps[] = {Chain, Callee, Glue};
5329 EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5330 Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
5331 makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5332 // The glue is the second value produced.
5333 Glue = Chain.getValue(1);
5334}
5335
5336static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5337 SDValue &Glue, SDValue &Chain,
5338 SDValue CallSeqStart,
5339 const CallBase *CB, const SDLoc &dl,
5340 bool hasNest,
5341 const PPCSubtarget &Subtarget) {
5342 // Function pointers in the 64-bit SVR4 ABI do not point to the function
5343 // entry point, but to the function descriptor (the function entry point
5344 // address is part of the function descriptor though).
5345 // The function descriptor is a three doubleword structure with the
5346 // following fields: function entry point, TOC base address and
5347 // environment pointer.
5348 // Thus for a call through a function pointer, the following actions need
5349 // to be performed:
5350 // 1. Save the TOC of the caller in the TOC save area of its stack
5351 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5352 // 2. Load the address of the function entry point from the function
5353 // descriptor.
5354 // 3. Load the TOC of the callee from the function descriptor into r2.
5355 // 4. Load the environment pointer from the function descriptor into
5356 // r11.
5357 // 5. Branch to the function entry point address.
5358 // 6. On return of the callee, the TOC of the caller needs to be
5359 // restored (this is done in FinishCall()).
5360 //
5361 // The loads are scheduled at the beginning of the call sequence, and the
5362 // register copies are flagged together to ensure that no other
5363 // operations can be scheduled in between. E.g. without flagging the
5364 // copies together, a TOC access in the caller could be scheduled between
5365 // the assignment of the callee TOC and the branch to the callee, which leads
5366 // to incorrect code.
5367
5368 // Start by loading the function address from the descriptor.
5369 SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5370 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5371 ? (MachineMemOperand::MODereferenceable |
5372 MachineMemOperand::MOInvariant)
5373 : MachineMemOperand::MONone;
5374
5375 MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr);
5376
5377 // Registers used in building the DAG.
5378 const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5379 const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5380
5381 // Offsets of descriptor members.
5382 const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5383 const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5384
5385 const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5386 const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
5387
5388 // One load for the functions entry point address.
5389 SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5390 Alignment, MMOFlags);
5391
5392 // One for loading the TOC anchor for the module that contains the called
5393 // function.
5394 SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5395 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5396 SDValue TOCPtr =
5397 DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5398 MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5399
5400 // One for loading the environment pointer.
5401 SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5402 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5403 SDValue LoadEnvPtr =
5404 DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5405 MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5406
5407
5408 // Then copy the newly loaded TOC anchor to the TOC pointer.
5409 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5410 Chain = TOCVal.getValue(0);
5411 Glue = TOCVal.getValue(1);
5412
5413 // If the function call has an explicit 'nest' parameter, it takes the
5414 // place of the environment pointer.
5415 assert((!hasNest || !Subtarget.isAIXABI()) &&(static_cast<void> (0))
5416 "Nest parameter is not supported on AIX.")(static_cast<void> (0));
5417 if (!hasNest) {
5418 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5419 Chain = EnvVal.getValue(0);
5420 Glue = EnvVal.getValue(1);
5421 }
5422
5423 // The rest of the indirect call sequence is the same as the non-descriptor
5424 // DAG.
5425 prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5426}
5427
5428static void
5429buildCallOperands(SmallVectorImpl<SDValue> &Ops,
5430 PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
5431 SelectionDAG &DAG,
5432 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5433 SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5434 const PPCSubtarget &Subtarget) {
5435 const bool IsPPC64 = Subtarget.isPPC64();
5436 // MVT for a general purpose register.
5437 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5438
5439 // First operand is always the chain.
5440 Ops.push_back(Chain);
5441
5442 // If it's a direct call pass the callee as the second operand.
5443 if (!CFlags.IsIndirect)
5444 Ops.push_back(Callee);
5445 else {
5446 assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.")(static_cast<void> (0));
5447
5448 // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5449 // on the stack (this would have been done in `LowerCall_64SVR4` or
5450 // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5451 // represents both the indirect branch and a load that restores the TOC
5452 // pointer from the linkage area. The operand for the TOC restore is an add
5453 // of the TOC save offset to the stack pointer. This must be the second
5454 // operand: after the chain input but before any other variadic arguments.
5455 // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not
5456 // saved or used.
5457 if (isTOCSaveRestoreRequired(Subtarget)) {
5458 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5459
5460 SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5461 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5462 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5463 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5464 Ops.push_back(AddTOC);
5465 }
5466
5467 // Add the register used for the environment pointer.
5468 if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest)
5469 Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
5470 RegVT));
5471
5472
5473 // Add CTR register as callee so a bctr can be emitted later.
5474 if (CFlags.IsTailCall)
5475 Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5476 }
5477
5478 // If this is a tail call add stack pointer delta.
5479 if (CFlags.IsTailCall)
5480 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5481
5482 // Add argument registers to the end of the list so that they are known live
5483 // into the call.
5484 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5485 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5486 RegsToPass[i].second.getValueType()));
5487
5488 // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5489 // no way to mark dependencies as implicit here.
5490 // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5491 if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) &&
5492 !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls())
5493 Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5494
5495 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5496 if (CFlags.IsVarArg && Subtarget.is32BitELFABI())
5497 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5498
5499 // Add a register mask operand representing the call-preserved registers.
5500 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5501 const uint32_t *Mask =
5502 TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv);
5503 assert(Mask && "Missing call preserved mask for calling convention")(static_cast<void> (0));
5504 Ops.push_back(DAG.getRegisterMask(Mask));
5505
5506 // If the glue is valid, it is the last operand.
5507 if (Glue.getNode())
5508 Ops.push_back(Glue);
5509}
5510
5511SDValue PPCTargetLowering::FinishCall(
5512 CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
5513 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5514 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5515 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5516 SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {
5517
5518 if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
5519 Subtarget.isAIXABI())
5520 setUsesTOCBasePtr(DAG);
5521
5522 unsigned CallOpc =
5523 getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee,
5524 Subtarget, DAG.getTarget());
5525
5526 if (!CFlags.IsIndirect)
5527 Callee = transformCallee(Callee, DAG, dl, Subtarget);
5528 else if (Subtarget.usesFunctionDescriptors())
5529 prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB,
5530 dl, CFlags.HasNest, Subtarget);
5531 else
5532 prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5533
5534 // Build the operand list for the call instruction.
5535 SmallVector<SDValue, 8> Ops;
5536 buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee,
5537 SPDiff, Subtarget);
5538
5539 // Emit tail call.
5540 if (CFlags.IsTailCall) {
5541 // Indirect tail call when using PC Relative calls do not have the same
5542 // constraints.
5543 assert(((Callee.getOpcode() == ISD::Register &&(static_cast<void> (0))
5544 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||(static_cast<void> (0))
5545 Callee.getOpcode() == ISD::TargetExternalSymbol ||(static_cast<void> (0))
5546 Callee.getOpcode() == ISD::TargetGlobalAddress ||(static_cast<void> (0))
5547 isa<ConstantSDNode>(Callee) ||(static_cast<void> (0))
5548 (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&(static_cast<void> (0))
5549 "Expecting a global address, external symbol, absolute value, "(static_cast<void> (0))
5550 "register or an indirect tail call when PC Relative calls are "(static_cast<void> (0))
5551 "used.")(static_cast<void> (0));
5552 // PC Relative calls also use TC_RETURN as the way to mark tail calls.
5553 assert(CallOpc == PPCISD::TC_RETURN &&(static_cast<void> (0))
5554 "Unexpected call opcode for a tail call.")(static_cast<void> (0));
5555 DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5556 return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5557 }
5558
5559 std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5560 Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5561 DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge);
5562 Glue = Chain.getValue(1);
5563
5564 // When performing tail call optimization the callee pops its arguments off
5565 // the stack. Account for this here so these bytes can be pushed back on in
5566 // PPCFrameLowering::eliminateCallFramePseudoInstr.
5567 int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast &&
5568 getTargetMachine().Options.GuaranteedTailCallOpt)
5569 ? NumBytes
5570 : 0;
5571
5572 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5573 DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5574 Glue, dl);
5575 Glue = Chain.getValue(1);
5576
5577 return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
5578 DAG, InVals);
5579}
5580
5581SDValue
5582PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5583 SmallVectorImpl<SDValue> &InVals) const {
5584 SelectionDAG &DAG = CLI.DAG;
5585 SDLoc &dl = CLI.DL;
5586 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5587 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
5588 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
5589 SDValue Chain = CLI.Chain;
5590 SDValue Callee = CLI.Callee;
5591 bool &isTailCall = CLI.IsTailCall;
5592 CallingConv::ID CallConv = CLI.CallConv;
5593 bool isVarArg = CLI.IsVarArg;
5594 bool isPatchPoint = CLI.IsPatchPoint;
5595 const CallBase *CB = CLI.CB;
5596
5597 if (isTailCall) {
5598 if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall()))
5599 isTailCall = false;
5600 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5601 isTailCall = IsEligibleForTailCallOptimization_64SVR4(
5602 Callee, CallConv, CB, isVarArg, Outs, Ins, DAG);
5603 else
5604 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5605 Ins, DAG);
5606 if (isTailCall) {
5607 ++NumTailCalls;
5608 if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5609 ++NumSiblingCalls;
5610
5611 // PC Relative calls no longer guarantee that the callee is a Global
5612 // Address Node. The callee could be an indirect tail call in which
5613 // case the SDValue for the callee could be a load (to load the address
5614 // of a function pointer) or it may be a register copy (to move the
5615 // address of the callee from a function parameter into a virtual
5616 // register). It may also be an ExternalSymbolSDNode (ex memcopy).
5617 assert((Subtarget.isUsingPCRelativeCalls() ||(static_cast<void> (0))
5618 isa<GlobalAddressSDNode>(Callee)) &&(static_cast<void> (0))
5619 "Callee should be an llvm::Function object.")(static_cast<void> (0));
5620
5621 LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()do { } while (false)
5622 << "\nTCO callee: ")do { } while (false);
5623 LLVM_DEBUG(Callee.dump())do { } while (false);
5624 }
5625 }
5626
5627 if (!isTailCall && CB && CB->isMustTailCall())
5628 report_fatal_error("failed to perform tail call elimination on a call "
5629 "site marked musttail");
5630
5631 // When long calls (i.e. indirect calls) are always used, calls are always
5632 // made via function pointer. If we have a function name, first translate it
5633 // into a pointer.
5634 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5635 !isTailCall)
5636 Callee = LowerGlobalAddress(Callee, DAG);
5637
5638 CallFlags CFlags(
5639 CallConv, isTailCall, isVarArg, isPatchPoint,
5640 isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
5641 // hasNest
5642 Subtarget.is64BitELFABI() &&
5643 any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
5644 CLI.NoMerge);
5645
5646 if (Subtarget.isAIXABI())
5647 return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5648 InVals, CB);
5649
5650 assert(Subtarget.isSVR4ABI())(static_cast<void> (0));
5651 if (Subtarget.isPPC64())
5652 return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5653 InVals, CB);
5654 return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5655 InVals, CB);
5656}
5657
5658SDValue PPCTargetLowering::LowerCall_32SVR4(
5659 SDValue Chain, SDValue Callee, CallFlags CFlags,
5660 const SmallVectorImpl<ISD::OutputArg> &Outs,
5661 const SmallVectorImpl<SDValue> &OutVals,
5662 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5663 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5664 const CallBase *CB) const {
5665 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5666 // of the 32-bit SVR4 ABI stack frame layout.
5667
5668 const CallingConv::ID CallConv = CFlags.CallConv;
5669 const bool IsVarArg = CFlags.IsVarArg;
5670 const bool IsTailCall = CFlags.IsTailCall;
5671
5672 assert((CallConv == CallingConv::C ||(static_cast<void> (0))
5673 CallConv == CallingConv::Cold ||(static_cast<void> (0))
5674 CallConv == CallingConv::Fast) && "Unknown calling convention!")(static_cast<void> (0));
5675
5676 const Align PtrAlign(4);
5677
5678 MachineFunction &MF = DAG.getMachineFunction();
5679
5680 // Mark this function as potentially containing a function that contains a
5681 // tail call. As a consequence the frame pointer will be used for dynamicalloc
5682 // and restoring the callers stack pointer in this functions epilog. This is
5683 // done because by tail calling the called function might overwrite the value
5684 // in this function's (MF) stack pointer stack slot 0(SP).
5685 if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5686 CallConv == CallingConv::Fast)
5687 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5688
5689 // Count how many bytes are to be pushed on the stack, including the linkage
5690 // area, parameter list area and the part of the local variable space which
5691 // contains copies of aggregates which are passed by value.
5692
5693 // Assign locations to all of the outgoing arguments.
5694 SmallVector<CCValAssign, 16> ArgLocs;
5695 PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5696
5697 // Reserve space for the linkage area on the stack.
5698 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5699 PtrAlign);
5700 if (useSoftFloat())
5701 CCInfo.PreAnalyzeCallOperands(Outs);
5702
5703 if (IsVarArg) {
5704 // Handle fixed and variable vector arguments differently.
5705 // Fixed vector arguments go into registers as long as registers are
5706 // available. Variable vector arguments always go into memory.
5707 unsigned NumArgs = Outs.size();
5708
5709 for (unsigned i = 0; i != NumArgs; ++i) {
5710 MVT ArgVT = Outs[i].VT;
5711 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5712 bool Result;
5713
5714 if (Outs[i].IsFixed) {
5715 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5716 CCInfo);
5717 } else {
5718 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5719 ArgFlags, CCInfo);
5720 }
5721
5722 if (Result) {
5723#ifndef NDEBUG1
5724 errs() << "Call operand #" << i << " has unhandled type "
5725 << EVT(ArgVT).getEVTString() << "\n";
5726#endif
5727 llvm_unreachable(nullptr)__builtin_unreachable();
5728 }
5729 }
5730 } else {
5731 // All arguments are treated the same.
5732 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5733 }
5734 CCInfo.clearWasPPCF128();
5735
5736 // Assign locations to all of the outgoing aggregate by value arguments.
5737 SmallVector<CCValAssign, 16> ByValArgLocs;
5738 CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
5739
5740 // Reserve stack space for the allocations in CCInfo.
5741 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
5742
5743 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5744
5745 // Size of the linkage area, parameter list area and the part of the local
5746 // space variable where copies of aggregates which are passed by value are
5747 // stored.
5748 unsigned NumBytes = CCByValInfo.getNextStackOffset();
5749
5750 // Calculate by how many bytes the stack has to be adjusted in case of tail
5751 // call optimization.
5752 int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes);
5753
5754 // Adjust the stack pointer for the new arguments...
5755 // These operations are automatically eliminated by the prolog/epilog pass
5756 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5757 SDValue CallSeqStart = Chain;
5758
5759 // Load the return address and frame pointer so it can be moved somewhere else
5760 // later.
5761 SDValue LROp, FPOp;
5762 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5763
5764 // Set up a copy of the stack pointer for use loading and storing any
5765 // arguments that may not fit in the registers available for argument
5766 // passing.
5767 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5768
5769 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5770 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5771 SmallVector<SDValue, 8> MemOpChains;
5772
5773 bool seenFloatArg = false;
5774 // Walk the register/memloc assignments, inserting copies/loads.
5775 // i - Tracks the index into the list of registers allocated for the call
5776 // RealArgIdx - Tracks the index into the list of actual function arguments
5777 // j - Tracks the index into the list of byval arguments
5778 for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5779 i != e;
5780 ++i, ++RealArgIdx) {
5781 CCValAssign &VA = ArgLocs[i];
5782 SDValue Arg = OutVals[RealArgIdx];
5783 ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5784
5785 if (Flags.isByVal()) {
5786 // Argument is an aggregate which is passed by value, thus we need to
5787 // create a copy of it in the local variable space of the current stack
5788 // frame (which is the stack frame of the caller) and pass the address of
5789 // this copy to the callee.
5790 assert((j < ByValArgLocs.size()) && "Index out of bounds!")(static_cast<void> (0));
5791 CCValAssign &ByValVA = ByValArgLocs[j++];
5792 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!")(static_cast<void> (0));
5793
5794 // Memory reserved in the local variable space of the callers stack frame.
5795 unsigned LocMemOffset = ByValVA.getLocMemOffset();
5796
5797 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5798 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5799 StackPtr, PtrOff);
5800
5801 // Create a copy of the argument in the local area of the current
5802 // stack frame.
5803 SDValue MemcpyCall =
5804 CreateCopyOfByValArgument(Arg, PtrOff,
5805 CallSeqStart.getNode()->getOperand(0),
5806 Flags, DAG, dl);
5807
5808 // This must go outside the CALLSEQ_START..END.
5809 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5810 SDLoc(MemcpyCall));
5811 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5812 NewCallSeqStart.getNode());
5813 Chain = CallSeqStart = NewCallSeqStart;
5814
5815 // Pass the address of the aggregate copy on the stack either in a
5816 // physical register or in the parameter list area of the current stack
5817 // frame to the callee.
5818 Arg = PtrOff;
5819 }
5820
5821 // When useCRBits() is true, there can be i1 arguments.
5822 // It is because getRegisterType(MVT::i1) => MVT::i1,
5823 // and for other integer types getRegisterType() => MVT::i32.
5824 // Extend i1 and ensure callee will get i32.
5825 if (Arg.getValueType() == MVT::i1)
5826 Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5827 dl, MVT::i32, Arg);
5828
5829 if (VA.isRegLoc()) {
5830 seenFloatArg |= VA.getLocVT().isFloatingPoint();
5831 // Put argument in a physical register.
5832 if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5833 bool IsLE = Subtarget.isLittleEndian();
5834 SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5835 DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5836 RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5837 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5838 DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5839 RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5840 SVal.getValue(0)));
5841 } else
5842 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5843 } else {
5844 // Put argument in the parameter list area of the current stack frame.
5845 assert(VA.isMemLoc())(static_cast<void> (0));
5846 unsigned LocMemOffset = VA.getLocMemOffset();
5847
5848 if (!IsTailCall) {
5849 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5850 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5851 StackPtr, PtrOff);
5852
5853 MemOpChains.push_back(
5854 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5855 } else {
5856 // Calculate and remember argument location.
5857 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5858 TailCallArguments);
5859 }
5860 }
5861 }
5862
5863 if (!MemOpChains.empty())
5864 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5865
5866 // Build a sequence of copy-to-reg nodes chained together with token chain
5867 // and flag operands which copy the outgoing args into the appropriate regs.
5868 SDValue InFlag;
5869 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5870 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5871 RegsToPass[i].second, InFlag);
5872 InFlag = Chain.getValue(1);
5873 }
5874
5875 // Set CR bit 6 to true if this is a vararg call with floating args passed in
5876 // registers.
5877 if (IsVarArg) {
5878 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5879 SDValue Ops[] = { Chain, InFlag };
5880
5881 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5882 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5883
5884 InFlag = Chain.getValue(1);
5885 }
5886
5887 if (IsTailCall)
5888 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5889 TailCallArguments);
5890
5891 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
5892 Callee, SPDiff, NumBytes, Ins, InVals, CB);
5893}
5894
5895// Copy an argument into memory, being careful to do this outside the
5896// call sequence for the call to which the argument belongs.
5897SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5898 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5899 SelectionDAG &DAG, const SDLoc &dl) const {
5900 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5901 CallSeqStart.getNode()->getOperand(0),
5902 Flags, DAG, dl);
5903 // The MEMCPY must go outside the CALLSEQ_START..END.
5904 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
5905 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
5906 SDLoc(MemcpyCall));
5907 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5908 NewCallSeqStart.getNode());
5909 return NewCallSeqStart;
5910}
5911
5912SDValue PPCTargetLowering::LowerCall_64SVR4(
5913 SDValue Chain, SDValue Callee, CallFlags CFlags,
5914 const SmallVectorImpl<ISD::OutputArg> &Outs,
5915 const SmallVectorImpl<SDValue> &OutVals,
5916 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5917 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5918 const CallBase *CB) const {
5919 bool isELFv2ABI = Subtarget.isELFv2ABI();
5920 bool isLittleEndian = Subtarget.isLittleEndian();
5921 unsigned NumOps = Outs.size();
5922 bool IsSibCall = false;
5923 bool IsFastCall = CFlags.CallConv == CallingConv::Fast;
5924
5925 EVT PtrVT = getPointerTy(DAG.getDataLayout());
5926 unsigned PtrByteSize = 8;
5927
5928 MachineFunction &MF = DAG.getMachineFunction();
5929
5930 if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
5931 IsSibCall = true;
5932
5933 // Mark this function as potentially containing a function that contains a
5934 // tail call. As a consequence the frame pointer will be used for dynamicalloc
5935 // and restoring the callers stack pointer in this functions epilog. This is
5936 // done because by tail calling the called function might overwrite the value
5937 // in this function's (MF) stack pointer stack slot 0(SP).
5938 if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
5939 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5940
5941 assert(!(IsFastCall && CFlags.IsVarArg) &&(static_cast<void> (0))
5942 "fastcc not supported on varargs functions")(static_cast<void> (0));
5943
5944 // Count how many bytes are to be pushed on the stack, including the linkage
5945 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes
5946 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
5947 // area is 32 bytes reserved space for [SP][CR][LR][TOC].
5948 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5949 unsigned NumBytes = LinkageSize;
5950 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5951
5952 static const MCPhysReg GPR[] = {
5953 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5954 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5955 };
5956 static const MCPhysReg VR[] = {
5957 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5958 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5959 };
5960
5961 const unsigned NumGPRs = array_lengthof(GPR);
5962 const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
5963 const unsigned NumVRs = array_lengthof(VR);
5964
5965 // On ELFv2, we can avoid allocating the parameter area if all the arguments
5966 // can be passed to the callee in registers.
5967 // For the fast calling convention, there is another check below.
5968 // Note: We should keep consistent with LowerFormalArguments_64SVR4()
5969 bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
5970 if (!HasParameterArea) {
5971 unsigned ParamAreaSize = NumGPRs * PtrByteSize;
5972 unsigned AvailableFPRs = NumFPRs;
5973 unsigned AvailableVRs = NumVRs;
5974 unsigned NumBytesTmp = NumBytes;
5975 for (unsigned i = 0; i != NumOps; ++i) {
5976 if (Outs[i].Flags.isNest()) continue;
5977 if (CalculateStackSlotUsed(Subtarget, Outs[i].VT, Outs[i].ArgVT,
5978 Outs[i].Flags, PtrByteSize, LinkageSize,
5979 ParamAreaSize, NumBytesTmp, AvailableFPRs,
5980 AvailableVRs))
5981 HasParameterArea = true;
5982 }
5983 }
5984
5985 // When using the fast calling convention, we don't provide backing for
5986 // arguments that will be in registers.
5987 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5988
5989 // Avoid allocating parameter area for fastcc functions if all the arguments
5990 // can be passed in the registers.
5991 if (IsFastCall)
5992 HasParameterArea = false;
5993
5994 // Add up all the space actually used.
5995 for (unsigned i = 0; i != NumOps; ++i) {
5996 ISD::ArgFlagsTy Flags = Outs[i].Flags;
5997 EVT ArgVT = Outs[i].VT;
5998 EVT OrigVT = Outs[i].ArgVT;
5999
6000 if (Flags.isNest())
6001 continue;
6002
6003 if (IsFastCall) {
6004 if (Flags.isByVal()) {
6005 NumGPRsUsed += (Flags.getByValSize()+7)/8;
6006 if (NumGPRsUsed > NumGPRs)
6007 HasParameterArea = true;
6008 } else {
6009 switch (ArgVT.getSimpleVT().SimpleTy) {
6010 default: llvm_unreachable("Unexpected ValueType for argument!")__builtin_unreachable();
6011 case MVT::i1:
6012 case MVT::i32:
6013 case MVT::i64:
6014 if (++NumGPRsUsed <= NumGPRs)
6015 continue;
6016 break;
6017 case MVT::v4i32:
6018 case MVT::v8i16:
6019 case MVT::v16i8:
6020 case MVT::v2f64:
6021 case MVT::v2i64:
6022 case MVT::v1i128:
6023 case MVT::f128:
6024 if (++NumVRsUsed <= NumVRs)
6025 continue;
6026 break;
6027 case MVT::v4f32:
6028 if (++NumVRsUsed <= NumVRs)
6029 continue;
6030 break;
6031 case MVT::f32:
6032 case MVT::f64:
6033 if (++NumFPRsUsed <= NumFPRs)
6034 continue;
6035 break;
6036 }
6037 HasParameterArea = true;
6038 }
6039 }
6040
6041 /* Respect alignment of argument on the stack. */
6042 auto Alignement =
6043 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6044 NumBytes = alignTo(NumBytes, Alignement);
6045
6046 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6047 if (Flags.isInConsecutiveRegsLast())
6048 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6049 }
6050
6051 unsigned NumBytesActuallyUsed = NumBytes;
6052
6053 // In the old ELFv1 ABI,
6054 // the prolog code of the callee may store up to 8 GPR argument registers to
6055 // the stack, allowing va_start to index over them in memory if its varargs.
6056 // Because we cannot tell if this is needed on the caller side, we have to
6057 // conservatively assume that it is needed. As such, make sure we have at
6058 // least enough stack space for the caller to store the 8 GPRs.
6059 // In the ELFv2 ABI, we allocate the parameter area iff a callee
6060 // really requires memory operands, e.g. a vararg function.
6061 if (HasParameterArea)
6062 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6063 else
6064 NumBytes = LinkageSize;
6065
6066 // Tail call needs the stack to be aligned.
6067 if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
6068 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6069
6070 int SPDiff = 0;
6071
6072 // Calculate by how many bytes the stack has to be adjusted in case of tail
6073 // call optimization.
6074 if (!IsSibCall)
6075 SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6076
6077 // To protect arguments on the stack from being clobbered in a tail call,
6078 // force all the loads to happen before doing any other lowering.
6079 if (CFlags.IsTailCall)
6080 Chain = DAG.getStackArgumentTokenFactor(Chain);
6081
6082 // Adjust the stack pointer for the new arguments...
6083 // These operations are automatically eliminated by the prolog/epilog pass
6084 if (!IsSibCall)
6085 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6086 SDValue CallSeqStart = Chain;
6087
6088 // Load the return address and frame pointer so it can be move somewhere else
6089 // later.
6090 SDValue LROp, FPOp;
6091 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6092
6093 // Set up a copy of the stack pointer for use loading and storing any
6094 // arguments that may not fit in the registers available for argument
6095 // passing.
6096 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6097
6098 // Figure out which arguments are going to go in registers, and which in
6099 // memory. Also, if this is a vararg function, floating point operations
6100 // must be stored to our stack, and loaded into integer regs as well, if
6101 // any integer regs are available for argument passing.
6102 unsigned ArgOffset = LinkageSize;
6103
6104 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6105 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6106
6107 SmallVector<SDValue, 8> MemOpChains;
6108 for (unsigned i = 0; i != NumOps; ++i) {
6109 SDValue Arg = OutVals[i];
6110 ISD::ArgFlagsTy Flags = Outs[i].Flags;
6111 EVT ArgVT = Outs[i].VT;
6112 EVT OrigVT = Outs[i].ArgVT;
6113
6114 // PtrOff will be used to store the current argument to the stack if a
6115 // register cannot be found for it.
6116 SDValue PtrOff;
6117
6118 // We re-align the argument offset for each argument, except when using the
6119 // fast calling convention, when we need to make sure we do that only when
6120 // we'll actually use a stack slot.
6121 auto ComputePtrOff = [&]() {
6122 /* Respect alignment of argument on the stack. */
6123 auto Alignment =
6124 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6125 ArgOffset = alignTo(ArgOffset, Alignment);
6126
6127 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6128
6129 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6130 };
6131
6132 if (!IsFastCall) {
6133 ComputePtrOff();
6134
6135 /* Compute GPR index associated with argument offset. */
6136 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
6137 GPR_idx = std::min(GPR_idx, NumGPRs);
6138 }
6139
6140 // Promote integers to 64-bit values.
6141 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
6142 // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6143 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6144 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6145 }
6146
6147 // FIXME memcpy is used way more than necessary. Correctness first.
6148 // Note: "by value" is code for passing a structure by value, not
6149 // basic types.
6150 if (Flags.isByVal()) {
6151 // Note: Size includes alignment padding, so
6152 // struct x { short a; char b; }
6153 // will have Size = 4. With #pragma pack(1), it will have Size = 3.
6154 // These are the proper values we need for right-justifying the
6155 // aggregate in a parameter register.
6156 unsigned Size = Flags.getByValSize();
6157
6158 // An empty aggregate parameter takes up no storage and no
6159 // registers.
6160 if (Size == 0)
6161 continue;
6162
6163 if (IsFastCall)
6164 ComputePtrOff();
6165
6166 // All aggregates smaller than 8 bytes must be passed right-justified.
6167 if (Size==1 || Size==2 || Size==4) {
6168 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
6169 if (GPR_idx != NumGPRs) {
6170 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6171 MachinePointerInfo(), VT);
6172 MemOpChains.push_back(Load.getValue(1));
6173 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6174
6175 ArgOffset += PtrByteSize;
6176 continue;
6177 }
6178 }
6179
6180 if (GPR_idx == NumGPRs && Size < 8) {
6181 SDValue AddPtr = PtrOff;
6182 if (!isLittleEndian) {
6183 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6184 PtrOff.getValueType());
6185 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6186 }
6187 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6188 CallSeqStart,
6189 Flags, DAG, dl);
6190 ArgOffset += PtrByteSize;
6191 continue;
6192 }
6193 // Copy entire object into memory. There are cases where gcc-generated
6194 // code assumes it is there, even if it could be put entirely into
6195 // registers. (This is not what the doc says.)
6196
6197 // FIXME: The above statement is likely due to a misunderstanding of the
6198 // documents. All arguments must be copied into the parameter area BY
6199 // THE CALLEE in the event that the callee takes the address of any
6200 // formal argument. That has not yet been implemented. However, it is
6201 // reasonable to use the stack area as a staging area for the register
6202 // load.
6203
6204 // Skip this for small aggregates, as we will use the same slot for a
6205 // right-justified copy, below.
6206 if (Size >= 8)
6207 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6208 CallSeqStart,
6209 Flags, DAG, dl);
6210
6211 // When a register is available, pass a small aggregate right-justified.
6212 if (Size < 8 && GPR_idx != NumGPRs) {
6213 // The easiest way to get this right-justified in a register
6214 // is to copy the structure into the rightmost portion of a
6215 // local variable slot, then load the whole slot into the
6216 // register.
6217 // FIXME: The memcpy seems to produce pretty awful code for
6218 // small aggregates, particularly for packed ones.
6219 // FIXME: It would be preferable to use the slot in the
6220 // parameter save area instead of a new local variable.
6221 SDValue AddPtr = PtrOff;
6222 if (!isLittleEndian) {
6223 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
6224 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6225 }
6226 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6227 CallSeqStart,
6228 Flags, DAG, dl);
6229
6230 // Load the slot into the register.
6231 SDValue Load =
6232 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
6233 MemOpChains.push_back(Load.getValue(1));
6234 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6235
6236 // Done with this argument.
6237 ArgOffset += PtrByteSize;
6238 continue;
6239 }
6240
6241 // For aggregates larger than PtrByteSize, copy the pieces of the
6242 // object that fit into registers from the parameter save area.
6243 for (unsigned j=0; j<Size; j+=PtrByteSize) {
6244 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6245 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6246 if (GPR_idx != NumGPRs) {
6247 SDValue Load =
6248 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6249 MemOpChains.push_back(Load.getValue(1));
6250 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6251 ArgOffset += PtrByteSize;
6252 } else {
6253 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6254 break;
6255 }
6256 }
6257 continue;
6258 }
6259
6260 switch (Arg.getSimpleValueType().SimpleTy) {
6261 default: llvm_unreachable("Unexpected ValueType for argument!")__builtin_unreachable();
6262 case MVT::i1:
6263 case MVT::i32:
6264 case MVT::i64:
6265 if (Flags.isNest()) {
6266 // The 'nest' parameter, if any, is passed in R11.
6267 RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6268 break;
6269 }
6270
6271 // These can be scalar arguments or elements of an integer array type
6272 // passed directly. Clang may use those instead of "byval" aggregate
6273 // types to avoid forcing arguments to memory unnecessarily.
6274 if (GPR_idx != NumGPRs) {
6275 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6276 } else {
6277 if (IsFastCall)
6278 ComputePtrOff();
6279
6280 assert(HasParameterArea &&(static_cast<void> (0))
6281 "Parameter area must exist to pass an argument in memory.")(static_cast<void> (0));
6282 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6283 true, CFlags.IsTailCall, false, MemOpChains,
6284 TailCallArguments, dl);
6285 if (IsFastCall)
6286 ArgOffset += PtrByteSize;
6287 }
6288 if (!IsFastCall)
6289 ArgOffset += PtrByteSize;
6290 break;
6291 case MVT::f32:
6292 case MVT::f64: {
6293 // These can be scalar arguments or elements of a float array type
6294 // passed directly. The latter are used to implement ELFv2 homogenous
6295 // float aggregates.
6296
6297 // Named arguments go into FPRs first, and once they overflow, the
6298 // remaining arguments go into GPRs and then the parameter save area.
6299 // Unnamed arguments for vararg functions always go to GPRs and
6300 // then the parameter save area. For now, put all arguments to vararg
6301 // routines always in both locations (FPR *and* GPR or stack slot).
6302 bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
6303 bool NeededLoad = false;
6304
6305 // First load the argument into the next available FPR.
6306 if (FPR_idx != NumFPRs)
6307 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6308
6309 // Next, load the argument into GPR or stack slot if needed.
6310 if (!NeedGPROrStack)
6311 ;
6312 else if (GPR_idx != NumGPRs && !IsFastCall) {
6313 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6314 // once we support fp <-> gpr moves.
6315
6316 // In the non-vararg case, this can only ever happen in the
6317 // presence of f32 array types, since otherwise we never run
6318 // out of FPRs before running out of GPRs.
6319 SDValue ArgVal;
6320
6321 // Double values are always passed in a single GPR.
6322 if (Arg.getValueType() != MVT::f32) {
6323 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6324
6325 // Non-array float values are extended and passed in a GPR.
6326 } else if (!Flags.isInConsecutiveRegs()) {
6327 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6328 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6329
6330 // If we have an array of floats, we collect every odd element
6331 // together with its predecessor into one GPR.
6332 } else if (ArgOffset % PtrByteSize != 0) {
6333 SDValue Lo, Hi;
6334 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6335 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6336 if (!isLittleEndian)
6337 std::swap(Lo, Hi);
6338 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6339
6340 // The final element, if even, goes into the first half of a GPR.
6341 } else if (Flags.isInConsecutiveRegsLast()) {
6342 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6343 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6344 if (!isLittleEndian)
6345 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6346 DAG.getConstant(32, dl, MVT::i32));
6347
6348 // Non-final even elements are skipped; they will be handled
6349 // together the with subsequent argument on the next go-around.
6350 } else
6351 ArgVal = SDValue();
6352
6353 if (ArgVal.getNode())
6354 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6355 } else {
6356 if (IsFastCall)
6357 ComputePtrOff();
6358
6359 // Single-precision floating-point values are mapped to the
6360 // second (rightmost) word of the stack doubleword.
6361 if (Arg.getValueType() == MVT::f32 &&
6362 !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6363 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6364 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6365 }
6366
6367 assert(HasParameterArea &&(static_cast<void> (0))
6368 "Parameter area must exist to pass an argument in memory.")(static_cast<void> (0));
6369 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6370 true, CFlags.IsTailCall, false, MemOpChains,
6371 TailCallArguments, dl);
6372
6373 NeededLoad = true;
6374 }
6375 // When passing an array of floats, the array occupies consecutive
6376 // space in the argument area; only round up to the next doubleword
6377 // at the end of the array. Otherwise, each float takes 8 bytes.
6378 if (!IsFastCall || NeededLoad) {
6379 ArgOffset += (Arg.getValueType() == MVT::f32 &&
6380 Flags.isInConsecutiveRegs()) ? 4 : 8;
6381 if (Flags.isInConsecutiveRegsLast())
6382 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6383 }
6384 break;
6385 }
6386 case MVT::v4f32:
6387 case MVT::v4i32:
6388 case MVT::v8i16:
6389 case MVT::v16i8:
6390 case MVT::v2f64:
6391 case MVT::v2i64:
6392 case MVT::v1i128:
6393 case MVT::f128:
6394 // These can be scalar arguments or elements of a vector array type
6395 // passed directly. The latter are used to implement ELFv2 homogenous
6396 // vector aggregates.
6397
6398 // For a varargs call, named arguments go into VRs or on the stack as
6399 // usual; unnamed arguments always go to the stack or the corresponding
6400 // GPRs when within range. For now, we always put the value in both
6401 // locations (or even all three).
6402 if (CFlags.IsVarArg) {
6403 assert(HasParameterArea &&(static_cast<void> (0))
6404 "Parameter area must exist if we have a varargs call.")(static_cast<void> (0));
6405 // We could elide this store in the case where the object fits
6406 // entirely in R registers. Maybe later.
6407 SDValue Store =
6408 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6409 MemOpChains.push_back(Store);
6410 if (VR_idx != NumVRs) {
6411 SDValue Load =
6412 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6413 MemOpChains.push_back(Load.getValue(1));
6414 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6415 }
6416 ArgOffset += 16;
6417 for (unsigned i=0; i<16; i+=PtrByteSize) {
6418 if (GPR_idx == NumGPRs)
6419 break;
6420 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6421 DAG.getConstant(i, dl, PtrVT));
6422 SDValue Load =
6423 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6424 MemOpChains.push_back(Load.getValue(1));
6425 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6426 }
6427 break;
6428 }
6429
6430 // Non-varargs Altivec params go into VRs or on the stack.
6431 if (VR_idx != NumVRs) {
6432 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6433 } else {
6434 if (IsFastCall)
6435 ComputePtrOff();
6436
6437 assert(HasParameterArea &&(static_cast<void> (0))
6438 "Parameter area must exist to pass an argument in memory.")(static_cast<void> (0));
6439 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6440 true, CFlags.IsTailCall, true, MemOpChains,
6441 TailCallArguments, dl);
6442 if (IsFastCall)
6443 ArgOffset += 16;
6444 }
6445
6446 if (!IsFastCall)
6447 ArgOffset += 16;
6448 break;
6449 }
6450 }
6451
6452 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&(static_cast<void> (0))
6453 "mismatch in size of parameter area")(static_cast<void> (0));
6454 (void)NumBytesActuallyUsed;
6455
6456 if (!MemOpChains.empty())
6457 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6458
6459 // Check if this is an indirect call (MTCTR/BCTRL).
6460 // See prepareDescriptorIndirectCall and buildCallOperands for more
6461 // information about calls through function pointers in the 64-bit SVR4 ABI.
6462 if (CFlags.IsIndirect) {
6463 // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the
6464 // caller in the TOC save area.
6465 if (isTOCSaveRestoreRequired(Subtarget)) {
6466 assert(!CFlags.IsTailCall && "Indirect tails calls not supported")(static_cast<void> (0));
6467 // Load r2 into a virtual register and store it to the TOC save area.
6468 setUsesTOCBasePtr(DAG);
6469 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6470 // TOC save area offset.
6471 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6472 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6473 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6474 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
6475 MachinePointerInfo::getStack(
6476 DAG.getMachineFunction(), TOCSaveOffset));
6477 }
6478 // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6479 // This does not mean the MTCTR instruction must use R12; it's easier
6480 // to model this as an extra parameter, so do that.
6481 if (isELFv2ABI && !CFlags.IsPatchPoint)
6482 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6483 }
6484
6485 // Build a sequence of copy-to-reg nodes chained together with token chain
6486 // and flag operands which copy the outgoing args into the appropriate regs.
6487 SDValue InFlag;
6488 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6489 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6490 RegsToPass[i].second, InFlag);
6491 InFlag = Chain.getValue(1);
6492 }
6493
6494 if (CFlags.IsTailCall && !IsSibCall)
6495 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6496 TailCallArguments);
6497
6498 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6499 Callee, SPDiff, NumBytes, Ins, InVals, CB);
6500}
6501
6502// Returns true when the shadow of a general purpose argument register
6503// in the parameter save area is aligned to at least 'RequiredAlign'.
6504static bool isGPRShadowAligned(MCPhysReg Reg, Align RequiredAlign) {
6505 assert(RequiredAlign.value() <= 16 &&(static_cast<void> (0))
6506 "Required alignment greater than stack alignment.")(static_cast<void> (0));
6507 switch (Reg) {
6508 default:
6509 report_fatal_error("called on invalid register.");
6510 case PPC::R5:
6511 case PPC::R9:
6512 case PPC::X3:
6513 case PPC::X5:
6514 case PPC::X7:
6515 case PPC::X9:
6516 // These registers are 16 byte aligned which is the most strict aligment
6517 // we can support.
6518 return true;
6519 case PPC::R3:
6520 case PPC::R7:
6521 case PPC::X4:
6522 case PPC::X6:
6523 case PPC::X8:
6524 case PPC::X10:
6525 // The shadow of these registers in the PSA is 8 byte aligned.
6526 return RequiredAlign <= 8;
6527 case PPC::R4:
6528 case PPC::R6:
6529 case PPC::R8:
6530 case PPC::R10:
6531 return RequiredAlign <= 4;
6532 }
6533}
6534
6535static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6536 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
6537 CCState &S) {
6538 AIXCCState &State = static_cast<AIXCCState &>(S);
6539 const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
6540 State.getMachineFunction().getSubtarget());
6541 const bool IsPPC64 = Subtarget.isPPC64();
6542 const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
6543 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
6544
6545 if (ValVT == MVT::f128)
6546 report_fatal_error("f128 is unimplemented on AIX.");
6547
6548 if (ArgFlags.isNest())
6549 report_fatal_error("Nest arguments are unimplemented.");
6550
6551 static const MCPhysReg GPR_32[] = {// 32-bit registers.
6552 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6553 PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6554 static const MCPhysReg GPR_64[] = {// 64-bit registers.
6555 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6556 PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6557
6558 static const MCPhysReg VR[] = {// Vector registers.
6559 PPC::V2, PPC::V3, PPC::V4, PPC::V5,
6560 PPC::V6, PPC::V7, PPC::V8, PPC::V9,
6561 PPC::V10, PPC::V11, PPC::V12, PPC::V13};
6562
6563 if (ArgFlags.isByVal()) {
6564 if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
6565 report_fatal_error("Pass-by-value arguments with alignment greater than "
6566 "register width are not supported.");
6567
6568 const unsigned ByValSize = ArgFlags.getByValSize();
6569
6570 // An empty aggregate parameter takes up no storage and no registers,
6571 // but needs a MemLoc for a stack slot for the formal arguments side.
6572 if (ByValSize == 0) {
6573 State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6574 State.getNextStackOffset(), RegVT,
6575 LocInfo));
6576 return false;
6577 }
6578
6579 const unsigned StackSize = alignTo(ByValSize, PtrAlign);
6580 unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
6581 for (const unsigned E = Offset + StackSize; Offset < E;
6582 Offset += PtrAlign.value()) {
6583 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6584 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6585 else {
6586 State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6587 Offset, MVT::INVALID_SIMPLE_VALUE_TYPE,
6588 LocInfo));
6589 break;
6590 }
6591 }
6592 return false;
6593 }
6594
6595 // Arguments always reserve parameter save area.
6596 switch (ValVT.SimpleTy) {
6597 default:
6598 report_fatal_error("Unhandled value type for argument.");
6599 case MVT::i64:
6600 // i64 arguments should have been split to i32 for PPC32.
6601 assert(IsPPC64 && "PPC32 should have split i64 values.")(static_cast<void> (0));
6602 LLVM_FALLTHROUGH[[gnu::fallthrough]];
6603 case MVT::i1:
6604 case MVT::i32: {
6605 const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
6606 // AIX integer arguments are always passed in register width.
6607 if (ValVT.getFixedSizeInBits() < RegVT.getFixedSizeInBits())
6608 LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
6609 : CCValAssign::LocInfo::ZExt;
6610 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6611 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6612 else
6613 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
6614
6615 return false;
6616 }
6617 case MVT::f32:
6618 case MVT::f64: {
6619 // Parameter save area (PSA) is reserved even if the float passes in fpr.
6620 const unsigned StoreSize = LocVT.getStoreSize();
6621 // Floats are always 4-byte aligned in the PSA on AIX.
6622 // This includes f64 in 64-bit mode for ABI compatibility.
6623 const unsigned Offset =
6624 State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4));
6625 unsigned FReg = State.AllocateReg(FPR);
6626 if (FReg)
6627 State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
6628
6629 // Reserve and initialize GPRs or initialize the PSA as required.
6630 for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
6631 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
6632 assert(FReg && "An FPR should be available when a GPR is reserved.")(static_cast<void> (0));
6633 if (State.isVarArg()) {
6634 // Successfully reserved GPRs are only initialized for vararg calls.
6635 // Custom handling is required for:
6636 // f64 in PPC32 needs to be split into 2 GPRs.
6637 // f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
6638 State.addLoc(
6639 CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6640 }
6641 } else {
6642 // If there are insufficient GPRs, the PSA needs to be initialized.
6643 // Initialization occurs even if an FPR was initialized for
6644 // compatibility with the AIX XL compiler. The full memory for the
6645 // argument will be initialized even if a prior word is saved in GPR.
6646 // A custom memLoc is used when the argument also passes in FPR so
6647 // that the callee handling can skip over it easily.
6648 State.addLoc(
6649 FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
6650 LocInfo)
6651 : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6652 break;
6653 }
6654 }
6655
6656 return false;
6657 }
6658 case MVT::v4f32:
6659 case MVT::v4i32:
6660 case MVT::v8i16:
6661 case MVT::v16i8:
6662 case MVT::v2i64:
6663 case MVT::v2f64:
6664 case MVT::v1i128: {
6665 const unsigned VecSize = 16;
6666 const Align VecAlign(VecSize);
6667
6668 if (!State.isVarArg()) {
6669 // If there are vector registers remaining we don't consume any stack
6670 // space.
6671 if (unsigned VReg = State.AllocateReg(VR)) {
6672 State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo));
6673 return false;
6674 }
6675 // Vectors passed on the stack do not shadow GPRs or FPRs even though they
6676 // might be allocated in the portion of the PSA that is shadowed by the
6677 // GPRs.
6678 const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6679 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6680 return false;
6681 }
6682
6683 const unsigned PtrSize = IsPPC64 ? 8 : 4;
6684 ArrayRef<MCPhysReg> GPRs = IsPPC64 ? GPR_64 : GPR_32;
6685
6686 unsigned NextRegIndex = State.getFirstUnallocated(GPRs);
6687 // Burn any underaligned registers and their shadowed stack space until
6688 // we reach the required alignment.
6689 while (NextRegIndex != GPRs.size() &&
6690 !isGPRShadowAligned(GPRs[NextRegIndex], VecAlign)) {
6691 // Shadow allocate register and its stack shadow.
6692 unsigned Reg = State.AllocateReg(GPRs);
6693 State.AllocateStack(PtrSize, PtrAlign);
6694 assert(Reg && "Allocating register unexpectedly failed.")(static_cast<void> (0));
6695 (void)Reg;
6696 NextRegIndex = State.getFirstUnallocated(GPRs);
6697 }
6698
6699 // Vectors that are passed as fixed arguments are handled differently.
6700 // They are passed in VRs if any are available (unlike arguments passed
6701 // through ellipses) and shadow GPRs (unlike arguments to non-vaarg
6702 // functions)
6703 if (State.isFixed(ValNo)) {
6704 if (unsigned VReg = State.AllocateReg(VR)) {
6705 State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo));
6706 // Shadow allocate GPRs and stack space even though we pass in a VR.
6707 for (unsigned I = 0; I != VecSize; I += PtrSize)
6708 State.AllocateReg(GPRs);
6709 State.AllocateStack(VecSize, VecAlign);
6710 return false;
6711 }
6712 // No vector registers remain so pass on the stack.
6713 const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6714 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6715 return false;
6716 }
6717
6718 // If all GPRS are consumed then we pass the argument fully on the stack.
6719 if (NextRegIndex == GPRs.size()) {
6720 const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6721 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6722 return false;
6723 }
6724
6725 // Corner case for 32-bit codegen. We have 2 registers to pass the first
6726 // half of the argument, and then need to pass the remaining half on the
6727 // stack.
6728 if (GPRs[NextRegIndex] == PPC::R9) {
6729 const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6730 State.addLoc(
6731 CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6732
6733 const unsigned FirstReg = State.AllocateReg(PPC::R9);
6734 const unsigned SecondReg = State.AllocateReg(PPC::R10);
6735 assert(FirstReg && SecondReg &&(static_cast<void> (0))
6736 "Allocating R9 or R10 unexpectedly failed.")(static_cast<void> (0));
6737 State.addLoc(
6738 CCValAssign::getCustomReg(ValNo, ValVT, FirstReg, RegVT, LocInfo));
6739 State.addLoc(
6740 CCValAssign::getCustomReg(ValNo, ValVT, SecondReg, RegVT, LocInfo));
6741 return false;
6742 }
6743
6744 // We have enough GPRs to fully pass the vector argument, and we have
6745 // already consumed any underaligned registers. Start with the custom
6746 // MemLoc and then the custom RegLocs.
6747 const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6748 State.addLoc(
6749 CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6750 for (unsigned I = 0; I != VecSize; I += PtrSize) {
6751 const unsigned Reg = State.AllocateReg(GPRs);
6752 assert(Reg && "Failed to allocated register for vararg vector argument")(static_cast<void> (0));
6753 State.addLoc(
6754 CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6755 }
6756 return false;
6757 }
6758 }
6759 return true;
6760}
6761
6762// So far, this function is only used by LowerFormalArguments_AIX()
6763static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
6764 bool IsPPC64,
6765 bool HasP8Vector,
6766 bool HasVSX) {
6767 assert((IsPPC64 || SVT != MVT::i64) &&(static_cast<void> (0))
6768 "i64 should have been split for 32-bit codegen.")(static_cast<void> (0));
6769
6770 switch (SVT) {
6771 default:
6772 report_fatal_error("Unexpected value type for formal argument");
6773 case MVT::i1:
6774 case MVT::i32:
6775 case MVT::i64:
6776 return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6777 case MVT::f32:
6778 return HasP8Vector ? &PPC::VSSRCRegClass : &PPC::F4RCRegClass;
6779 case MVT::f64:
6780 return HasVSX ? &PPC::VSFRCRegClass : &PPC::F8RCRegClass;
6781 case MVT::v4f32:
6782 case MVT::v4i32:
6783 case MVT::v8i16:
6784 case MVT::v16i8:
6785 case MVT::v2i64:
6786 case MVT::v2f64:
6787 case MVT::v1i128:
6788 return &PPC::VRRCRegClass;
6789 }
6790}
6791
6792static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
6793 SelectionDAG &DAG, SDValue ArgValue,
6794 MVT LocVT, const SDLoc &dl) {
6795 assert(ValVT.isScalarInteger() && LocVT.isScalarInteger())(static_cast<void> (0));
6796 assert(ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits())(static_cast<void> (0));
6797
6798 if (Flags.isSExt())
6799 ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
6800 DAG.getValueType(ValVT));
6801 else if (Flags.isZExt())
6802 ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
6803 DAG.getValueType(ValVT));
6804
6805 return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
6806}
6807
6808static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
6809 const unsigned LASize = FL->getLinkageSize();
6810
6811 if (PPC::GPRCRegClass.contains(Reg)) {
6812 assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&(static_cast<void> (0))
6813 "Reg must be a valid argument register!")(static_cast<void> (0));
6814 return LASize + 4 * (Reg - PPC::R3);
6815 }
6816
6817 if (PPC::G8RCRegClass.contains(Reg)) {
6818 assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&(static_cast<void> (0))
6819 "Reg must be a valid argument register!")(static_cast<void> (0));
6820 return LASize + 8 * (Reg - PPC::X3);
6821 }
6822
6823 llvm_unreachable("Only general purpose registers expected.")__builtin_unreachable();
6824}
6825
6826// AIX ABI Stack Frame Layout:
6827//
6828// Low Memory +--------------------------------------------+
6829// SP +---> | Back chain | ---+
6830// | +--------------------------------------------+ |
6831// | | Saved Condition Register | |
6832// | +--------------------------------------------+ |
6833// | | Saved Linkage Register | |
6834// | +--------------------------------------------+ | Linkage Area
6835// | | Reserved for compilers | |
6836// | +--------------------------------------------+ |
6837// | | Reserved for binders | |
6838// | +--------------------------------------------+ |
6839// | | Saved TOC pointer | ---+
6840// | +--------------------------------------------+
6841// | | Parameter save area |
6842// | +--------------------------------------------+
6843// | | Alloca space |
6844// | +--------------------------------------------+
6845// | | Local variable space |
6846// | +--------------------------------------------+
6847// | | Float/int conversion temporary |
6848// | +--------------------------------------------+
6849// | | Save area for AltiVec registers |
6850// | +--------------------------------------------+
6851// | | AltiVec alignment padding |
6852// | +--------------------------------------------+
6853// | | Save area for VRSAVE register |
6854// | +--------------------------------------------+
6855// | | Save area for General Purpose registers |
6856// | +--------------------------------------------+
6857// | | Save area for Floating Point registers |
6858// | +--------------------------------------------+
6859// +---- | Back chain |
6860// High Memory +--------------------------------------------+
6861//
6862// Specifications:
6863// AIX 7.2 Assembler Language Reference
6864// Subroutine linkage convention
6865
6866SDValue PPCTargetLowering::LowerFormalArguments_AIX(
6867 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
6868 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6869 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
6870
6871 assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||(static_cast<void> (0))
6872 CallConv == CallingConv::Fast) &&(static_cast<void> (0))
6873 "Unexpected calling convention!")(static_cast<void> (0));
6874
6875 if (getTargetMachine().Options.GuaranteedTailCallOpt)
6876 report_fatal_error("Tail call support is unimplemented on AIX.");
6877
6878 if (useSoftFloat())
6879 report_fatal_error("Soft float support is unimplemented on AIX.");
6880
6881 const PPCSubtarget &Subtarget =
6882 static_cast<const PPCSubtarget &>(DAG.getSubtarget());
6883
6884 const bool IsPPC64 = Subtarget.isPPC64();
6885 const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
6886
6887 // Assign locations to all of the incoming arguments.
6888 SmallVector<CCValAssign, 16> ArgLocs;
6889 MachineFunction &MF = DAG.getMachineFunction();
6890 MachineFrameInfo &MFI = MF.getFrameInfo();
6891 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
6892 AIXCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
6893
6894 const EVT PtrVT = getPointerTy(MF.getDataLayout());
6895 // Reserve space for the linkage area on the stack.
6896 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6897 CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
6898 CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
6899
6900 SmallVector<SDValue, 8> MemOps;
6901
6902 for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) {
6903 CCValAssign &VA = ArgLocs[I++];
6904 MVT LocVT = VA.getLocVT();
6905 MVT ValVT = VA.getValVT();
6906 ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags;
6907 // For compatibility with the AIX XL compiler, the float args in the
6908 // parameter save area are initialized even if the argument is available
6909 // in register. The caller is required to initialize both the register
6910 // and memory, however, the callee can choose to expect it in either.
6911 // The memloc is dismissed here because the argument is retrieved from
6912 // the register.
6913 if (VA.isMemLoc() && VA.needsCustom() && ValVT.isFloatingPoint())
6914 continue;
6915
6916 auto HandleMemLoc = [&]() {
6917 const unsigned LocSize = LocVT.getStoreSize();
6918 const unsigned ValSize = ValVT.getStoreSize();
6919 assert((ValSize <= LocSize) &&(static_cast<void> (0))
6920 "Object size is larger than size of MemLoc")(static_cast<void> (0));
6921 int CurArgOffset = VA.getLocMemOffset();
6922 // Objects are right-justified because AIX is big-endian.
6923 if (LocSize > ValSize)
6924 CurArgOffset += LocSize - ValSize;
6925 // Potential tail calls could cause overwriting of argument stack slots.
6926 const bool IsImmutable =
6927 !(getTargetMachine().Options.GuaranteedTailCallOpt &&
6928 (CallConv == CallingConv::Fast));
6929 int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable);
6930 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6931 SDValue ArgValue =
6932 DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
6933 InVals.push_back(ArgValue);
6934 };
6935
6936 // Vector arguments to VaArg functions are passed both on the stack, and
6937 // in any available GPRs. Load the value from the stack and add the GPRs
6938 // as live ins.
6939 if (VA.isMemLoc() && VA.needsCustom()) {
6940 assert(ValVT.isVector() && "Unexpected Custom MemLoc type.")(static_cast<void> (0));
6941 assert(isVarArg && "Only use custom memloc for vararg.")(static_cast<void> (0));
6942 // ValNo of the custom MemLoc, so we can compare it to the ValNo of the
6943 // matching custom RegLocs.
6944 const unsigned OriginalValNo = VA.getValNo();
6945 (void)OriginalValNo;
6946
6947 auto HandleCustomVecRegLoc = [&]() {
6948 assert(I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&(static_cast<void> (0))
6949 "Missing custom RegLoc.")(static_cast<void> (0));
6950 VA = ArgLocs[I++];
6951 assert(VA.getValVT().isVector() &&(static_cast<void> (0))
6952 "Unexpected Val type for custom RegLoc.")(static_cast<void> (0));
6953 assert(VA.getValNo() == OriginalValNo &&(static_cast<void> (0))
6954 "ValNo mismatch between custom MemLoc and RegLoc.")(static_cast<void> (0));
6955 MVT::SimpleValueType SVT = VA.getLocVT().SimpleTy;
6956 MF.addLiveIn(VA.getLocReg(),
6957 getRegClassForSVT(SVT, IsPPC64, Subtarget.hasP8Vector(),
6958 Subtarget.hasVSX()));
6959 };
6960
6961 HandleMemLoc();
6962 // In 64-bit there will be exactly 2 custom RegLocs that follow, and in
6963 // in 32-bit there will be 2 custom RegLocs if we are passing in R9 and
6964 // R10.
6965 HandleCustomVecRegLoc();
6966 HandleCustomVecRegLoc();
6967
6968 // If we are targeting 32-bit, there might be 2 extra custom RegLocs if
6969 // we passed the vector in R5, R6, R7 and R8.
6970 if (I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom()) {
6971 assert(!IsPPC64 &&(static_cast<void> (0))
6972 "Only 2 custom RegLocs expected for 64-bit codegen.")(static_cast<void> (0));
6973 HandleCustomVecRegLoc();
6974 HandleCustomVecRegLoc();
6975 }
6976
6977 continue;
6978 }
6979
6980 if (VA.isRegLoc()) {
6981 if (VA.getValVT().isScalarInteger())
6982 FuncInfo->appendParameterType(PPCFunctionInfo::FixedType);
6983 else if (VA.getValVT().isFloatingPoint() && !VA.getValVT().isVector()) {
6984 switch (VA.getValVT().SimpleTy) {
6985 default:
6986 report_fatal_error("Unhandled value type for argument.");
6987 case MVT::f32:
6988 FuncInfo->appendParameterType(PPCFunctionInfo::ShortFloatingPoint);
6989 break;
6990 case MVT::f64:
6991 FuncInfo->appendParameterType(PPCFunctionInfo::LongFloatingPoint);
6992 break;
6993 }
6994 } else if (VA.getValVT().isVector()) {
6995 switch (VA.getValVT().SimpleTy) {
6996 default:
6997 report_fatal_error("Unhandled value type for argument.");
6998 case MVT::v16i8:
6999 FuncInfo->appendParameterType(PPCFunctionInfo::VectorChar);
7000 break;
7001 case MVT::v8i16:
7002 FuncInfo->appendParameterType(PPCFunctionInfo::VectorShort);
7003 break;
7004 case MVT::v4i32:
7005 case MVT::v2i64:
7006 case MVT::v1i128:
7007 FuncInfo->appendParameterType(PPCFunctionInfo::VectorInt);
7008 break;
7009 case MVT::v4f32:
7010 case MVT::v2f64:
7011 FuncInfo->appendParameterType(PPCFunctionInfo::VectorFloat);
7012 break;
7013 }
7014 }
7015 }
7016
7017 if (Flags.isByVal() && VA.isMemLoc()) {
7018 const unsigned Size =
7019 alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize,
7020 PtrByteSize);
7021 const int FI = MF.getFrameInfo().CreateFixedObject(
7022 Size, VA.getLocMemOffset(), /* IsImmutable */ false,
7023 /* IsAliased */ true);
7024 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7025 InVals.push_back(FIN);
7026
7027 continue;
7028 }
7029
7030 if (Flags.isByVal()) {
7031 assert(VA.isRegLoc() && "MemLocs should already be handled.")(static_cast<void> (0));
7032
7033 const MCPhysReg ArgReg = VA.getLocReg();
7034 const PPCFrameLowering *FL = Subtarget.getFrameLowering();
7035
7036 if (Flags.getNonZeroByValAlign() > PtrByteSize)
7037 report_fatal_error("Over aligned byvals not supported yet.");
7038
7039 const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
7040 const int FI = MF.getFrameInfo().CreateFixedObject(
7041 StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false,
7042 /* IsAliased */ true);
7043 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7044 InVals.push_back(FIN);
7045
7046 // Add live ins for all the RegLocs for the same ByVal.
7047 const TargetRegisterClass *RegClass =
7048 IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7049
7050 auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
7051 unsigned Offset) {
7052 const unsigned VReg = MF.addLiveIn(PhysReg, RegClass);
7053 // Since the callers side has left justified the aggregate in the
7054 // register, we can simply store the entire register into the stack
7055 // slot.
7056 SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7057 // The store to the fixedstack object is needed becuase accessing a
7058 // field of the ByVal will use a gep and load. Ideally we will optimize
7059 // to extracting the value from the register directly, and elide the
7060 // stores when the arguments address is not taken, but that will need to
7061 // be future work.
7062 SDValue Store = DAG.getStore(
7063 CopyFrom.getValue(1), dl, CopyFrom,
7064 DAG.getObjectPtrOffset(dl, FIN, TypeSize::Fixed(Offset)),
7065 MachinePointerInfo::getFixedStack(MF, FI, Offset));
7066
7067 MemOps.push_back(Store);
7068 };
7069
7070 unsigned Offset = 0;
7071 HandleRegLoc(VA.getLocReg(), Offset);
7072 Offset += PtrByteSize;
7073 for (; Offset != StackSize && ArgLocs[I].isRegLoc();
7074 Offset += PtrByteSize) {
7075 assert(ArgLocs[I].getValNo() == VA.getValNo() &&(static_cast<void> (0))
7076 "RegLocs should be for ByVal argument.")(static_cast<void> (0));
7077
7078 const CCValAssign RL = ArgLocs[I++];
7079 HandleRegLoc(RL.getLocReg(), Offset);
7080 FuncInfo->appendParameterType(PPCFunctionInfo::FixedType);
7081 }
7082
7083 if (Offset != StackSize) {
7084 assert(ArgLocs[I].getValNo() == VA.getValNo() &&(static_cast<void> (0))
7085 "Expected MemLoc for remaining bytes.")(static_cast<void> (0));
7086 assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes.")(static_cast<void> (0));
7087 // Consume the MemLoc.The InVal has already been emitted, so nothing
7088 // more needs to be done.
7089 ++I;
7090 }
7091
7092 continue;
7093 }
7094
7095 if (VA.isRegLoc() && !VA.needsCustom()) {
7096 MVT::SimpleValueType SVT = ValVT.SimpleTy;
7097 Register VReg =
7098 MF.addLiveIn(VA.getLocReg(),
7099 getRegClassForSVT(SVT, IsPPC64, Subtarget.hasP8Vector(),
7100 Subtarget.hasVSX()));
7101 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7102 if (ValVT.isScalarInteger() &&
7103 (ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits())) {
7104 ArgValue =
7105 truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
7106 }
7107 InVals.push_back(ArgValue);
7108 continue;
7109 }
7110 if (VA.isMemLoc()) {
7111 HandleMemLoc();
7112 continue;
7113 }
7114 }
7115
7116 // On AIX a minimum of 8 words is saved to the parameter save area.
7117 const unsigned MinParameterSaveArea = 8 * PtrByteSize;
7118 // Area that is at least reserved in the caller of this function.
7119 unsigned CallerReservedArea =
7120 std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea);
7121
7122 // Set the size that is at least reserved in caller of this function. Tail
7123 // call optimized function's reserved stack space needs to be aligned so
7124 // that taking the difference between two stack areas will result in an
7125 // aligned stack.
7126 CallerReservedArea =
7127 EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea);
7128 FuncInfo->setMinReservedArea(CallerReservedArea);
7129
7130 if (isVarArg) {
7131 FuncInfo->setVarArgsFrameIndex(
7132 MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true));
7133 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
7134
7135 static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7136 PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7137
7138 static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7139 PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7140 const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32);
7141
7142 // The fixed integer arguments of a variadic function are stored to the
7143 // VarArgsFrameIndex on the stack so that they may be loaded by
7144 // dereferencing the result of va_next.
7145 for (unsigned GPRIndex =
7146 (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
7147 GPRIndex < NumGPArgRegs; ++GPRIndex) {
7148
7149 const unsigned VReg =
7150 IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
7151 : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
7152
7153 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
7154 SDValue Store =
7155 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
7156 MemOps.push_back(Store);
7157 // Increment the address for the next argument to store.
7158 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
7159 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
7160 }
7161 }
7162
7163 if (!MemOps.empty())
7164 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
7165
7166 return Chain;
7167}
7168
7169SDValue PPCTargetLowering::LowerCall_AIX(
7170 SDValue Chain, SDValue Callee, CallFlags CFlags,
7171 const SmallVectorImpl<ISD::OutputArg> &Outs,
7172 const SmallVectorImpl<SDValue> &OutVals,
7173 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7174 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
7175 const CallBase *CB) const {
7176 // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the
7177 // AIX ABI stack frame layout.
7178
7179 assert((CFlags.CallConv == CallingConv::C ||(static_cast<void> (0))
7180 CFlags.CallConv == CallingConv::Cold ||(static_cast<void> (0))
7181 CFlags.CallConv == CallingConv::Fast) &&(static_cast<void> (0))
7182 "Unexpected calling convention!")(static_cast<void> (0));
7183
7184 if (CFlags.IsPatchPoint)
7185 report_fatal_error("This call type is unimplemented on AIX.");
7186
7187 const PPCSubtarget& Subtarget =
7188 static_cast<const PPCSubtarget&>(DAG.getSubtarget());
7189
7190 MachineFunction &MF = DAG.getMachineFunction();
7191 SmallVector<CCValAssign, 16> ArgLocs;
7192 AIXCCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
7193 *DAG.getContext());
7194
7195 // Reserve space for the linkage save area (LSA) on the stack.
7196 // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
7197 // [SP][CR][LR][2 x reserved][TOC].
7198 // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
7199 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7200 const bool IsPPC64 = Subtarget.isPPC64();
7201 const EVT PtrVT = getPointerTy(DAG.getDataLayout());
7202 const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7203 CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7204 CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
7205
7206 // The prolog code of the callee may store up to 8 GPR argument registers to
7207 // the stack, allowing va_start to index over them in memory if the callee
7208 // is variadic.
7209 // Because we cannot tell if this is needed on the caller side, we have to
7210 // conservatively assume that it is needed. As such, make sure we have at
7211 // least enough stack space for the caller to store the 8 GPRs.
7212 const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
7213 const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize,
7214 CCInfo.getNextStackOffset());
7215
7216 // Adjust the stack pointer for the new arguments...
7217 // These operations are automatically eliminated by the prolog/epilog pass.
7218 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
7219 SDValue CallSeqStart = Chain;
7220
7221 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
7222 SmallVector<SDValue, 8> MemOpChains;
7223
7224 // Set up a copy of the stack pointer for loading and storing any
7225 // arguments that may not fit in the registers available for argument
7226 // passing.
7227 const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64)
7228 : DAG.getRegister(PPC::R1, MVT::i32);
7229
7230 for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
7231 const unsigned ValNo = ArgLocs[I].getValNo();
7232 SDValue Arg = OutVals[ValNo];
7233 ISD::ArgFlagsTy Flags = Outs[ValNo].Flags;
7234
7235 if (Flags.isByVal()) {
7236 const unsigned ByValSize = Flags.getByValSize();
7237
7238 // Nothing to do for zero-sized ByVals on the caller side.
7239 if (!ByValSize) {
7240 ++I;
7241 continue;
7242 }
7243
7244 auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
7245 return DAG.getExtLoad(
7246 ISD::ZEXTLOAD, dl, PtrVT, Chain,
7247 (LoadOffset != 0)
7248 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
7249 : Arg,
7250 MachinePointerInfo(), VT);
7251 };
7252
7253 unsigned LoadOffset = 0;
7254
7255 // Initialize registers, which are fully occupied by the by-val argument.
7256 while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) {
7257 SDValue Load = GetLoad(PtrVT, LoadOffset);
7258 MemOpChains.push_back(Load.getValue(1));
7259 LoadOffset += PtrByteSize;
7260 const CCValAssign &ByValVA = ArgLocs[I++];
7261 assert(ByValVA.getValNo() == ValNo &&(static_cast<void> (0))
7262 "Unexpected location for pass-by-value argument.")(static_cast<void> (0));
7263 RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load));
7264 }
7265
7266 if (LoadOffset == ByValSize)
7267 continue;
7268
7269 // There must be one more loc to handle the remainder.
7270 assert(ArgLocs[I].getValNo() == ValNo &&(static_cast<void> (0))
7271 "Expected additional location for by-value argument.")(static_cast<void> (0));
7272
7273 if (ArgLocs[I].isMemLoc()) {
7274 assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg.")(static_cast<void> (0));
7275 const CCValAssign &ByValVA = ArgLocs[I++];
7276 ISD::ArgFlagsTy MemcpyFlags = Flags;
7277 // Only memcpy the bytes that don't pass in register.
7278 MemcpyFlags.setByValSize(ByValSize - LoadOffset);
7279 Chain = CallSeqStart = createMemcpyOutsideCallSeq(
7280 (LoadOffset != 0)
7281 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
7282 : Arg,
7283 DAG.getObjectPtrOffset(dl, StackPtr,
7284 TypeSize::Fixed(ByValVA.getLocMemOffset())),
7285 CallSeqStart, MemcpyFlags, DAG, dl);
7286 continue;
7287 }
7288
7289 // Initialize the final register residue.
7290 // Any residue that occupies the final by-val arg register must be
7291 // left-justified on AIX. Loads must be a power-of-2 size and cannot be
7292 // larger than the ByValSize. For example: a 7 byte by-val arg requires 4,
7293 // 2 and 1 byte loads.
7294 const unsigned ResidueBytes = ByValSize % PtrByteSize;
7295 assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&(static_cast<void> (0))
7296 "Unexpected register residue for by-value argument.")(static_cast<void> (0));
7297 SDValue ResidueVal;
7298 for (unsigned Bytes = 0; Bytes != ResidueBytes;) {
7299 const unsigned N = PowerOf2Floor(ResidueBytes - Bytes);
7300 const MVT VT =
7301 N == 1 ? MVT::i8
7302 : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64));
7303 SDValue Load = GetLoad(VT, LoadOffset);
7304 MemOpChains.push_back(Load.getValue(1));
7305 LoadOffset += N;
7306 Bytes += N;
7307
7308 // By-val arguments are passed left-justfied in register.
7309 // Every load here needs to be shifted, otherwise a full register load
7310 // should have been used.
7311 assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&(static_cast<void> (0))
7312 "Unexpected load emitted during handling of pass-by-value "(static_cast<void> (0))
7313 "argument.")(static_cast<void> (0));
7314 unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
7315 EVT ShiftAmountTy =
7316 getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout());
7317 SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy);
7318 SDValue ShiftedLoad =
7319 DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt);
7320 ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal,
7321 ShiftedLoad)
7322 : ShiftedLoad;
7323 }
7324
7325 const CCValAssign &ByValVA = ArgLocs[I++];
7326 RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal));
7327 continue;
7328 }
7329
7330 CCValAssign &VA = ArgLocs[I++];
7331 const MVT LocVT = VA.getLocVT();
7332 const MVT ValVT = VA.getValVT();
7333
7334 switch (VA.getLocInfo()) {
7335 default:
7336 report_fatal_error("Unexpected argument extension type.");
7337 case CCValAssign::Full:
7338 break;
7339 case CCValAssign::ZExt:
7340 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7341 break;
7342 case CCValAssign::SExt:
7343 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7344 break;
7345 }
7346
7347 if (VA.isRegLoc() && !VA.needsCustom()) {
7348 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
7349 continue;
7350 }
7351
7352 // Vector arguments passed to VarArg functions need custom handling when
7353 // they are passed (at least partially) in GPRs.
7354 if (VA.isMemLoc() && VA.needsCustom() && ValVT.isVector()) {
7355 assert(CFlags.IsVarArg && "Custom MemLocs only used for Vector args.")(static_cast<void> (0));
7356 // Store value to its stack slot.
7357 SDValue PtrOff =
7358 DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7359 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7360 SDValue Store =
7361 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
7362 MemOpChains.push_back(Store);
7363 const unsigned OriginalValNo = VA.getValNo();
7364 // Then load the GPRs from the stack
7365 unsigned LoadOffset = 0;
7366 auto HandleCustomVecRegLoc = [&]() {
7367 assert(I != E && "Unexpected end of CCvalAssigns.")(static_cast<void> (0));
7368 assert(ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&(static_cast<void> (0))
7369 "Expected custom RegLoc.")(static_cast<void> (0));
7370 CCValAssign RegVA = ArgLocs[I++];
7371 assert(RegVA.getValNo() == OriginalValNo &&(static_cast<void> (0))
7372 "Custom MemLoc ValNo and custom RegLoc ValNo must match.")(static_cast<void> (0));
7373 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
7374 DAG.getConstant(LoadOffset, dl, PtrVT));
7375 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Add, MachinePointerInfo());
7376 MemOpChains.push_back(Load.getValue(1));
7377 RegsToPass.push_back(std::make_pair(RegVA.getLocReg(), Load));
7378 LoadOffset += PtrByteSize;
7379 };
7380
7381 // In 64-bit there will be exactly 2 custom RegLocs that follow, and in
7382 // in 32-bit there will be 2 custom RegLocs if we are passing in R9 and
7383 // R10.
7384 HandleCustomVecRegLoc();
7385 HandleCustomVecRegLoc();
7386
7387 if (I != E && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&
7388 ArgLocs[I].getValNo() == OriginalValNo) {
7389 assert(!IsPPC64 &&(static_cast<void> (0))
7390 "Only 2 custom RegLocs expected for 64-bit codegen.")(static_cast<void> (0));
7391 HandleCustomVecRegLoc();
7392 HandleCustomVecRegLoc();
7393 }
7394
7395 continue;
7396 }
7397
7398 if (VA.isMemLoc()) {
7399 SDValue PtrOff =
7400 DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7401 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7402 MemOpChains.push_back(
7403 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
7404
7405 continue;
7406 }
7407
7408 if (!ValVT.isFloatingPoint())
7409 report_fatal_error(
7410 "Unexpected register handling for calling convention.");
7411
7412 // Custom handling is used for GPR initializations for vararg float
7413 // arguments.
7414 assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg &&(static_cast<void> (0))
7415 LocVT.isInteger() &&(static_cast<void> (0))
7416 "Custom register handling only expected for VarArg.")(static_cast<void> (0));
7417
7418 SDValue ArgAsInt =
7419 DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg);
7420
7421 if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize())
7422 // f32 in 32-bit GPR
7423 // f64 in 64-bit GPR
7424 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
7425 else if (Arg.getValueType().getFixedSizeInBits() <
7426 LocVT.getFixedSizeInBits())
7427 // f32 in 64-bit GPR.
7428 RegsToPass.push_back(std::make_pair(
7429 VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT)));
7430 else {
7431 // f64 in two 32-bit GPRs
7432 // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
7433 assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&(static_cast<void> (0))
7434 "Unexpected custom register for argument!")(static_cast<void> (0));
7435 CCValAssign &GPR1 = VA;
7436 SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
7437 DAG.getConstant(32, dl, MVT::i8));
7438 RegsToPass.push_back(std::make_pair(
7439 GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
7440
7441 if (I != E) {
7442 // If only 1 GPR was available, there will only be one custom GPR and
7443 // the argument will also pass in memory.
7444 CCValAssign &PeekArg = ArgLocs[I];
7445 if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) {
7446 assert(PeekArg.needsCustom() && "A second custom GPR is expected.")(static_cast<void> (0));
7447 CCValAssign &GPR2 = ArgLocs[I++];
7448 RegsToPass.push_back(std::make_pair(
7449 GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
7450 }
7451 }
7452 }
7453 }
7454
7455 if (!MemOpChains.empty())
7456 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7457
7458 // For indirect calls, we need to save the TOC base to the stack for
7459 // restoration after the call.
7460 if (CFlags.IsIndirect) {
7461 assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.")(static_cast<void> (0));
7462 const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
7463 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
7464 const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
7465 const unsigned TOCSaveOffset =
7466 Subtarget.getFrameLowering()->getTOCSaveOffset();
7467
7468 setUsesTOCBasePtr(DAG);
7469 SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
7470 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
7471 SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
7472 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7473 Chain = DAG.getStore(
7474 Val.getValue(1), dl, Val, AddPtr,
7475 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7476 }
7477
7478 // Build a sequence of copy-to-reg nodes chained together with token chain
7479 // and flag operands which copy the outgoing args into the appropriate regs.
7480 SDValue InFlag;
7481 for (auto Reg : RegsToPass) {
7482 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
7483 InFlag = Chain.getValue(1);
7484 }
7485
7486 const int SPDiff = 0;
7487 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7488 Callee, SPDiff, NumBytes, Ins, InVals, CB);
7489}
7490
7491bool
7492PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7493 MachineFunction &MF, bool isVarArg,
7494 const SmallVectorImpl<ISD::OutputArg> &Outs,
7495 LLVMContext &Context) const {
7496 SmallVector<CCValAssign, 16> RVLocs;
7497 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7498 return CCInfo.CheckReturn(
7499 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7500 ? RetCC_PPC_Cold
7501 : RetCC_PPC);
7502}
7503
7504SDValue
7505PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7506 bool isVarArg,
7507 const SmallVectorImpl<ISD::OutputArg> &Outs,
7508 const SmallVectorImpl<SDValue> &OutVals,
7509 const SDLoc &dl, SelectionDAG &DAG) const {
7510 SmallVector<CCValAssign, 16> RVLocs;
7511 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7512 *DAG.getContext());
7513 CCInfo.AnalyzeReturn(Outs,
7514 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7515 ? RetCC_PPC_Cold
7516 : RetCC_PPC);
7517
7518 SDValue Flag;
7519 SmallVector<SDValue, 4> RetOps(1, Chain);
7520
7521 // Copy the result values into the output registers.
7522 for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7523 CCValAssign &VA = RVLocs[i];
7524 assert(VA.isRegLoc() && "Can only return in registers!")(static_cast<void> (0));
7525
7526 SDValue Arg = OutVals[RealResIdx];
7527
7528 switch (VA.getLocInfo()) {
7529 default: llvm_unreachable("Unknown loc info!")__builtin_unreachable();
7530 case CCValAssign::Full: break;
7531 case CCValAssign::AExt:
7532 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7533 break;
7534 case CCValAssign::ZExt:
7535 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7536 break;
7537 case CCValAssign::SExt:
7538 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7539 break;
7540 }
7541 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7542 bool isLittleEndian = Subtarget.isLittleEndian();
7543 // Legalize ret f64 -> ret 2 x i32.
7544 SDValue SVal =
7545 DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7546 DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7547 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7548 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7549 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7550 DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7551 Flag = Chain.getValue(1);
7552 VA = RVLocs[++i]; // skip ahead to next loc
7553 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7554 } else
7555 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
7556 Flag = Chain.getValue(1);
7557 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7558 }
7559
7560 RetOps[0] = Chain; // Update chain.
7561
7562 // Add the flag if we have it.
7563 if (Flag.getNode())
7564 RetOps.push_back(Flag);
7565
7566 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
7567}
7568
7569SDValue
7570PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7571 SelectionDAG &DAG) const {
7572 SDLoc dl(Op);
7573
7574 // Get the correct type for integers.
7575 EVT IntVT = Op.getValueType();
7576
7577 // Get the inputs.
7578 SDValue Chain = Op.getOperand(0);
7579 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7580 // Build a DYNAREAOFFSET node.
7581 SDValue Ops[2] = {Chain, FPSIdx};
7582 SDVTList VTs = DAG.getVTList(IntVT);
7583 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7584}
7585
7586SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7587 SelectionDAG &DAG) const {
7588 // When we pop the dynamic allocation we need to restore the SP link.
7589 SDLoc dl(Op);
7590
7591 // Get the correct type for pointers.
7592 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7593
7594 // Construct the stack pointer operand.
7595 bool isPPC64 = Subtarget.isPPC64();
7596 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7597 SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7598
7599 // Get the operands for the STACKRESTORE.
7600 SDValue Chain = Op.getOperand(0);
7601 SDValue SaveSP = Op.getOperand(1);
7602
7603 // Load the old link SP.
7604 SDValue LoadLinkSP =
7605 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7606
7607 // Restore the stack pointer.
7608 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7609
7610 // Store the old link SP.
7611 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7612}
7613
7614SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7615 MachineFunction &MF = DAG.getMachineFunction();
7616 bool isPPC64 = Subtarget.isPPC64();
7617 EVT PtrVT = getPointerTy(MF.getDataLayout());
7618
7619 // Get current frame pointer save index. The users of this index will be
7620 // primarily DYNALLOC instructions.
7621 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7622 int RASI = FI->getReturnAddrSaveIndex();
7623
7624 // If the frame pointer save index hasn't been defined yet.
7625 if (!RASI) {
7626 // Find out what the fix offset of the frame pointer save area.
7627 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7628 // Allocate the frame index for frame pointer save area.
7629 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7630 // Save the result.
7631 FI->setReturnAddrSaveIndex(RASI);
7632 }
7633 return DAG.getFrameIndex(RASI, PtrVT);
7634}
7635
7636SDValue
7637PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7638 MachineFunction &MF = DAG.getMachineFunction();
7639 bool isPPC64 = Subtarget.isPPC64();
7640 EVT PtrVT = getPointerTy(MF.getDataLayout());
7641
7642 // Get current frame pointer save index. The users of this index will be
7643 // primarily DYNALLOC instructions.
7644 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7645 int FPSI = FI->getFramePointerSaveIndex();
7646
7647 // If the frame pointer save index hasn't been defined yet.
7648 if (!FPSI) {
7649 // Find out what the fix offset of the frame pointer save area.
7650 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7651 // Allocate the frame index for frame pointer save area.
7652 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7653 // Save the result.
7654 FI->setFramePointerSaveIndex(FPSI);
7655 }
7656 return DAG.getFrameIndex(FPSI, PtrVT);
7657}
7658
7659SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7660 SelectionDAG &DAG) const {
7661 MachineFunction &MF = DAG.getMachineFunction();
7662 // Get the inputs.
7663 SDValue Chain = Op.getOperand(0);
7664 SDValue Size = Op.getOperand(1);
7665 SDLoc dl(Op);
7666
7667 // Get the correct type for pointers.
7668 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7669 // Negate the size.
7670 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7671 DAG.getConstant(0, dl, PtrVT), Size);
7672 // Construct a node for the frame pointer save index.
7673 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7674 SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7675 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7676 if (hasInlineStackProbe(MF))
7677 return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops);
7678 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7679}
7680
7681SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7682 SelectionDAG &DAG) const {
7683 MachineFunction &MF = DAG.getMachineFunction();
7684
7685 bool isPPC64 = Subtarget.isPPC64();
7686 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7687
7688 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7689 return DAG.getFrameIndex(FI, PtrVT);
7690}
7691
7692SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7693 SelectionDAG &DAG) const {
7694 SDLoc DL(Op);
7695 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7696 DAG.getVTList(MVT::i32, MVT::Other),
7697 Op.getOperand(0), Op.getOperand(1));
7698}
7699
7700SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7701 SelectionDAG &DAG) const {
7702 SDLoc DL(Op);
7703 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7704 Op.getOperand(0), Op.getOperand(1));
7705}
7706
7707SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7708 if (Op.getValueType().isVector())
7709 return LowerVectorLoad(Op, DAG);
7710
7711 assert(Op.getValueType() == MVT::i1 &&(static_cast<void> (0))
7712 "Custom lowering only for i1 loads")(static_cast<void> (0));
7713
7714 // First, load 8 bits into 32 bits, then truncate to 1 bit.
7715
7716 SDLoc dl(Op);
7717 LoadSDNode *LD = cast<LoadSDNode>(Op);
7718
7719 SDValue Chain = LD->getChain();
7720 SDValue BasePtr = LD->getBasePtr();
7721 MachineMemOperand *MMO = LD->getMemOperand();
7722
7723 SDValue NewLD =
7724 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7725 BasePtr, MVT::i8, MMO);
7726 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7727
7728 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7729 return DAG.getMergeValues(Ops, dl);
7730}
7731
7732SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7733 if (Op.getOperand(1).getValueType().isVector())
7734 return LowerVectorStore(Op, DAG);
7735
7736 assert(Op.getOperand(1).getValueType() == MVT::i1 &&(static_cast<void> (0))
7737 "Custom lowering only for i1 stores")(static_cast<void> (0));
7738
7739 // First, zero extend to 32 bits, then use a truncating store to 8 bits.
7740
7741 SDLoc dl(Op);
7742 StoreSDNode *ST = cast<StoreSDNode>(Op);
7743
7744 SDValue Chain = ST->getChain();
7745 SDValue BasePtr = ST->getBasePtr();
7746 SDValue Value = ST->getValue();
7747 MachineMemOperand *MMO = ST->getMemOperand();
7748
7749 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
7750 Value);
7751 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
7752}
7753
7754// FIXME: Remove this once the ANDI glue bug is fixed:
7755SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
7756 assert(Op.getValueType() == MVT::i1 &&(static_cast<void> (0))
7757 "Custom lowering only for i1 results")(static_cast<void> (0));
7758
7759 SDLoc DL(Op);
7760 return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
7761}
7762
7763SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
7764 SelectionDAG &DAG) const {
7765
7766 // Implements a vector truncate that fits in a vector register as a shuffle.
7767 // We want to legalize vector truncates down to where the source fits in
7768 // a vector register (and target is therefore smaller than vector register
7769 // size). At that point legalization will try to custom lower the sub-legal
7770 // result and get here - where we can contain the truncate as a single target
7771 // operation.
7772
7773 // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
7774 // <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
7775 //
7776 // We will implement it for big-endian ordering as this (where x denotes
7777 // undefined):
7778 // < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
7779 // < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
7780 //
7781 // The same operation in little-endian ordering will be:
7782 // <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
7783 // <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
7784
7785 EVT TrgVT = Op.getValueType();
7786 assert(TrgVT.isVector() && "Vector type expected.")(static_cast<void> (0));
7787 unsigned TrgNumElts = TrgVT.getVectorNumElements();
7788 EVT EltVT = TrgVT.getVectorElementType();
7789 if (!isOperationCustom(Op.getOpcode(), TrgVT) ||
7790 TrgVT.getSizeInBits() > 128 || !isPowerOf2_32(TrgNumElts) ||
7791 !isPowerOf2_32(EltVT.getSizeInBits()))
7792 return SDValue();
7793
7794 SDValue N1 = Op.getOperand(0);
7795 EVT SrcVT = N1.getValueType();
7796 unsigned SrcSize = SrcVT.getSizeInBits();
7797 if (SrcSize > 256 ||
7798 !isPowerOf2_32(SrcVT.getVectorNumElements()) ||
7799 !isPowerOf2_32(SrcVT.getVectorElementType().getSizeInBits()))
7800 return SDValue();
7801 if (SrcSize == 256 && SrcVT.getVectorNumElements() < 2)
7802 return SDValue();
7803
7804 unsigned WideNumElts = 128 / EltVT.getSizeInBits();
7805 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
7806
7807 SDLoc DL(Op);
7808 SDValue Op1, Op2;
7809 if (SrcSize == 256) {
7810 EVT VecIdxTy = getVectorIdxTy(DAG.getDataLayout());
7811 EVT SplitVT =
7812 N1.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
7813 unsigned SplitNumElts = SplitVT.getVectorNumElements();
7814 Op1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
7815 DAG.getConstant(0, DL, VecIdxTy));
7816 Op2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
7817 DAG.getConstant(SplitNumElts, DL, VecIdxTy));
7818 }
7819 else {
7820 Op1 = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
7821 Op2 = DAG.getUNDEF(WideVT);
7822 }
7823
7824 // First list the elements we want to keep.
7825 unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
7826 SmallVector<int, 16> ShuffV;
7827 if (Subtarget.isLittleEndian())
7828 for (unsigned i = 0; i < TrgNumElts; ++i)
7829 ShuffV.push_back(i * SizeMult);
7830 else
7831 for (unsigned i = 1; i <= TrgNumElts; ++i)
7832 ShuffV.push_back(i * SizeMult - 1);
7833
7834 // Populate the remaining elements with undefs.
7835 for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
7836 // ShuffV.push_back(i + WideNumElts);
7837 ShuffV.push_back(WideNumElts + 1);
7838
7839 Op1 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op1);
7840 Op2 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op2);
7841 return DAG.getVectorShuffle(WideVT, DL, Op1, Op2, ShuffV);
7842}
7843
7844/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
7845/// possible.
7846SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
7847 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
7848 EVT ResVT = Op.getValueType();
7849 EVT CmpVT = Op.getOperand(0).getValueType();
7850 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
7851 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3);
7852 SDLoc dl(Op);
7853
7854 // Without power9-vector, we don't have native instruction for f128 comparison.
7855 // Following transformation to libcall is needed for setcc:
7856 // select_cc lhs, rhs, tv, fv, cc -> select_cc (setcc cc, x, y), 0, tv, fv, NE
7857 if (!Subtarget.hasP9Vector() && CmpVT == MVT::f128) {
7858 SDValue Z = DAG.getSetCC(
7859 dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT),
7860 LHS, RHS, CC);
7861 SDValue Zero = DAG.getConstant(0, dl, Z.getValueType());
7862 return DAG.getSelectCC(dl, Z, Zero, TV, FV, ISD::SETNE);
7863 }
7864
7865 // Not FP, or using SPE? Not a fsel.
7866 if (!CmpVT.isFloatingPoint() || !TV.getValueType().isFloatingPoint() ||
7867 Subtarget.hasSPE())
7868 return Op;
7869
7870 SDNodeFlags Flags = Op.getNode()->getFlags();
7871
7872 // We have xsmaxcdp/xsmincdp which are OK to emit even in the
7873 // presence of infinities.
7874 if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
7875 switch (CC) {
7876 default:
7877 break;
7878 case ISD::SETOGT:
7879 case ISD::SETGT:
7880 return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS);
7881 case ISD::SETOLT:
7882 case ISD::SETLT:
7883 return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS);
7884 }
7885 }
7886
7887 // We might be able to do better than this under some circumstances, but in
7888 // general, fsel-based lowering of select is a finite-math-only optimization.
7889 // For more information, see section F.3 of the 2.06 ISA specification.
7890 // With ISA 3.0
7891 if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) ||
7892 (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs()))
7893 return Op;
7894
7895 // If the RHS of the comparison is a 0.0, we don't need to do the
7896 // subtraction at all.
7897 SDValue Sel1;
7898 if (isFloatingPointZero(RHS))
7899 switch (CC) {
7900 default: break; // SETUO etc aren't handled by fsel.
7901 case ISD::SETNE:
7902 std::swap(TV, FV);
7903 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7904 case ISD::SETEQ:
7905 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
7906 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7907 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7908 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits
7909 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7910 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7911 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
7912 case ISD::SETULT:
7913 case ISD::SETLT:
7914 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
7915 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7916 case ISD::SETOGE:
7917 case ISD::SETGE:
7918 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
7919 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7920 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7921 case ISD::SETUGT:
7922 case ISD::SETGT:
7923 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
7924 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7925 case ISD::SETOLE:
7926 case ISD::SETLE:
7927 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
7928 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7929 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7930 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
7931 }
7932
7933 SDValue Cmp;
7934 switch (CC) {
7935 default: break; // SETUO etc aren't handled by fsel.
7936 case ISD::SETNE:
7937 std::swap(TV, FV);
7938 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7939 case ISD::SETEQ:
7940 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7941 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7942 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7943 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7944 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits
7945 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7946 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7947 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
7948 case ISD::SETULT:
7949 case ISD::SETLT:
7950 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7951 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7952 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7953 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7954 case ISD::SETOGE:
7955 case ISD::SETGE:
7956 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7957 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7958 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7959 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7960 case ISD::SETUGT:
7961 case ISD::SETGT:
7962 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7963 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7964 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7965 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7966 case ISD::SETOLE:
7967 case ISD::SETLE:
7968 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7969 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7970 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7971 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7972 }
7973 return Op;
7974}
7975
7976static unsigned getPPCStrictOpcode(unsigned Opc) {
7977 switch (Opc) {
7978 default:
7979 llvm_unreachable("No strict version of this opcode!")__builtin_unreachable();
7980 case PPCISD::FCTIDZ:
7981 return PPCISD::STRICT_FCTIDZ;
7982 case PPCISD::FCTIWZ:
7983 return PPCISD::STRICT_FCTIWZ;
7984 case PPCISD::FCTIDUZ:
7985 return PPCISD::STRICT_FCTIDUZ;
7986 case PPCISD::FCTIWUZ:
7987 return PPCISD::STRICT_FCTIWUZ;
7988 case PPCISD::FCFID:
7989 return PPCISD::STRICT_FCFID;
7990 case PPCISD::FCFIDU:
7991 return PPCISD::STRICT_FCFIDU;
7992 case PPCISD::FCFIDS:
7993 return PPCISD::STRICT_FCFIDS;
7994 case PPCISD::FCFIDUS:
7995 return PPCISD::STRICT_FCFIDUS;
7996 }
7997}
7998
7999static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG,
8000 const PPCSubtarget &Subtarget) {
8001 SDLoc dl(Op);
8002 bool IsStrict = Op->isStrictFPOpcode();
8003 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
8004 Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
8005
8006 // TODO: Any other flags to propagate?
8007 SDNodeFlags Flags;
8008 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8009
8010 // For strict nodes, source is the second operand.
8011 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8012 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
8013 assert(Src.getValueType().isFloatingPoint())(static_cast<void> (0));
8014 if (Src.getValueType() == MVT::f32) {
8015 if (IsStrict) {
8016 Src =
8017 DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
8018 DAG.getVTList(MVT::f64, MVT::Other), {Chain, Src}, Flags);
8019 Chain = Src.getValue(1);
8020 } else
8021 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
8022 }
8023 SDValue Conv;
8024 unsigned Opc = ISD::DELETED_NODE;
8025 switch (Op.getSimpleValueType().SimpleTy) {
8026 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!")__builtin_unreachable();
8027 case MVT::i32:
8028 Opc = IsSigned ? PPCISD::FCTIWZ
8029 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ);
8030 break;
8031 case MVT::i64:
8032 assert((IsSigned || Subtarget.hasFPCVT()) &&(static_cast<void> (0))
8033 "i64 FP_TO_UINT is supported only with FPCVT")(static_cast<void> (0));
8034 Opc = IsSigned ? PPCISD::FCTIDZ : PPCISD::FCTIDUZ;
8035 }
8036 if (IsStrict) {
8037 Opc = getPPCStrictOpcode(Opc);
8038 Conv = DAG.getNode(Opc, dl, DAG.getVTList(MVT::f64, MVT::Other),
8039 {Chain, Src}, Flags);
8040 } else {
8041 Conv = DAG.getNode(Opc, dl, MVT::f64, Src);
8042 }
8043 return Conv;
8044}
8045
8046void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
8047 SelectionDAG &DAG,
8048 const SDLoc &dl) const {
8049 SDValue Tmp = convertFPToInt(Op, DAG, Subtarget);
8050 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
8051 Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
8052 bool IsStrict = Op->isStrictFPOpcode();
8053
8054 // Convert the FP value to an int value through memory.
8055 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
8056 (IsSigned || Subtarget.hasFPCVT());
8057 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
8058 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
8059 MachinePointerInfo MPI =
8060 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
8061
8062 // Emit a store to the stack slot.
8063 SDValue Chain = IsStrict ? Tmp.getValue(1) : DAG.getEntryNode();
8064 Align Alignment(DAG.getEVTAlign(Tmp.getValueType()));
8065 if (i32Stack) {
8066 MachineFunction &MF = DAG.getMachineFunction();
8067 Alignment = Align(4);
8068 MachineMemOperand *MMO =
8069 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
8070 SDValue Ops[] = { Chain, Tmp, FIPtr };
8071 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
8072 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
8073 } else
8074 Chain = DAG.getStore(Chain, dl, Tmp, FIPtr, MPI, Alignment);
8075
8076 // Result is a load from the stack slot. If loading 4 bytes, make sure to
8077 // add in a bias on big endian.
8078 if (Op.getValueType() == MVT::i32 && !i32Stack) {
8079 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
8080 DAG.getConstant(4, dl, FIPtr.getValueType()));
8081 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
8082 }
8083
8084 RLI.Chain = Chain;
8085 RLI.Ptr = FIPtr;
8086 RLI.MPI = MPI;
8087 RLI.Alignment = Alignment;
8088}
8089
8090/// Custom lowers floating point to integer conversions to use
8091/// the direct move instructions available in ISA 2.07 to avoid the
8092/// need for load/store combinations.
8093SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
8094 SelectionDAG &DAG,
8095 const SDLoc &dl) const {
8096 SDValue Conv = convertFPToInt(Op, DAG, Subtarget);
8097 SDValue Mov = DAG.getNode(PPCISD::MFVSR, dl, Op.getValueType(), Conv);
8098 if (Op->isStrictFPOpcode())
8099 return DAG.getMergeValues({Mov, Conv.getValue(1)}, dl);
8100 else
8101 return Mov;
8102}
8103
8104SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
8105 const SDLoc &dl) const {
8106 bool IsStrict = Op->isStrictFPOpcode();
8107 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
8108 Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
8109 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8110 EVT SrcVT = Src.getValueType();
8111 EVT DstVT = Op.getValueType();
8112
8113 // FP to INT conversions are legal for f128.
8114 if (SrcVT == MVT::f128)
8115 return Subtarget.hasP9Vector() ? Op : SDValue();
8116
8117 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
8118 // PPC (the libcall is not available).
8119 if (SrcVT == MVT::ppcf128) {
8120 if (DstVT == MVT::i32) {
8121 // TODO: Conservatively pass only nofpexcept flag here. Need to check and
8122 // set other fast-math flags to FP operations in both strict and
8123 // non-strict cases. (FP_TO_SINT, FSUB)
8124 SDNodeFlags Flags;
8125 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8126
8127 if (IsSigned) {
8128 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
8129 DAG.getIntPtrConstant(0, dl));
8130 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
8131 DAG.getIntPtrConstant(1, dl));
8132
8133 // Add the two halves of the long double in round-to-zero mode, and use
8134 // a smaller FP_TO_SINT.
8135 if (IsStrict) {
8136 SDValue Res = DAG.getNode(PPCISD::STRICT_FADDRTZ, dl,
8137 DAG.getVTList(MVT::f64, MVT::Other),
8138 {Op.getOperand(0), Lo, Hi}, Flags);
8139 return DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
8140 DAG.getVTList(MVT::i32, MVT::Other),
8141 {Res.getValue(1), Res}, Flags);
8142 } else {
8143 SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
8144 return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
8145 }
8146 } else {
8147 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
8148 APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
8149 SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT);
8150 SDValue SignMask = DAG.getConstant(0x80000000, dl, DstVT);
8151 if (IsStrict) {
8152 // Sel = Src < 0x80000000
8153 // FltOfs = select Sel, 0.0, 0x80000000
8154 // IntOfs = select Sel, 0, 0x80000000
8155 // Result = fp_to_sint(Src - FltOfs) ^ IntOfs
8156 SDValue Chain = Op.getOperand(0);
8157 EVT SetCCVT =
8158 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT);
8159 EVT DstSetCCVT =
8160 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT);
8161 SDValue Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT,
8162 Chain, true);
8163 Chain = Sel.getValue(1);
8164
8165 SDValue FltOfs = DAG.getSelect(
8166 dl, SrcVT, Sel, DAG.getConstantFP(0.0, dl, SrcVT), Cst);
8167 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT);
8168
8169 SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl,
8170 DAG.getVTList(SrcVT, MVT::Other),
8171 {Chain, Src, FltOfs}, Flags);
8172 Chain = Val.getValue(1);
8173 SDValue SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
8174 DAG.getVTList(DstVT, MVT::Other),
8175 {Chain, Val}, Flags);
8176 Chain = SInt.getValue(1);
8177 SDValue IntOfs = DAG.getSelect(
8178 dl, DstVT, Sel, DAG.getConstant(0, dl, DstVT), SignMask);
8179 SDValue Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs);
8180 return DAG.getMergeValues({Result, Chain}, dl);
8181 } else {
8182 // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
8183 // FIXME: generated code sucks.
8184 SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, Src, Cst);
8185 True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
8186 True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, SignMask);
8187 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
8188 return DAG.getSelectCC(dl, Src, Cst, True, False, ISD::SETGE);
8189 }
8190 }
8191 }
8192
8193 return SDValue();
8194 }
8195
8196 if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
8197 return LowerFP_TO_INTDirectMove(Op, DAG, dl);
8198
8199 ReuseLoadInfo RLI;
8200 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8201
8202 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8203 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8204}
8205
8206// We're trying to insert a regular store, S, and then a load, L. If the
8207// incoming value, O, is a load, we might just be able to have our load use the
8208// address used by O. However, we don't know if anything else will store to
8209// that address before we can load from it. To prevent this situation, we need
8210// to insert our load, L, into the chain as a peer of O. To do this, we give L
8211// the same chain operand as O, we create a token factor from the chain results
8212// of O and L, and we replace all uses of O's chain result with that token
8213// factor (see spliceIntoChain below for this last part).
8214bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
8215 ReuseLoadInfo &RLI,
8216 SelectionDAG &DAG,
8217 ISD::LoadExtType ET) const {
8218 // Conservatively skip reusing for constrained FP nodes.
8219 if (Op->isStrictFPOpcode())
8220 return false;
8221
8222 SDLoc dl(Op);
8223 bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
8224 (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
8225 if (ET == ISD::NON_EXTLOAD &&
8226 (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
8227 isOperationLegalOrCustom(Op.getOpcode(),
8228 Op.getOperand(0).getValueType())) {
8229
8230 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8231 return true;
8232 }
8233
8234 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
8235 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
8236 LD->isNonTemporal())
8237 return false;
8238 if (LD->getMemoryVT() != MemVT)
8239 return false;
8240
8241 // If the result of the load is an illegal type, then we can't build a
8242 // valid chain for reuse since the legalised loads and token factor node that
8243 // ties the legalised loads together uses a different output chain then the
8244 // illegal load.
8245 if (!isTypeLegal(LD->getValueType(0)))
8246 return false;
8247
8248 RLI.Ptr = LD->getBasePtr();
8249 if (LD->isIndexed() && !LD->getOffset().isUndef()) {
8250 assert(LD->getAddressingMode() == ISD::PRE_INC &&(static_cast<void> (0))
8251 "Non-pre-inc AM on PPC?")(static_cast<void> (0));
8252 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
8253 LD->getOffset());
8254 }
8255
8256 RLI.Chain = LD->getChain();
8257 RLI.MPI = LD->getPointerInfo();
8258 RLI.IsDereferenceable = LD->isDereferenceable();
8259 RLI.IsInvariant = LD->isInvariant();
8260 RLI.Alignment = LD->getAlign();
8261 RLI.AAInfo = LD->getAAInfo();
8262 RLI.Ranges = LD->getRanges();
8263
8264 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
8265 return true;
8266}
8267
8268// Given the head of the old chain, ResChain, insert a token factor containing
8269// it and NewResChain, and make users of ResChain now be users of that token
8270// factor.
8271// TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
8272void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
8273 SDValue NewResChain,
8274 SelectionDAG &DAG) const {
8275 if (!ResChain)
8276 return;
8277
8278 SDLoc dl(NewResChain);
8279
8280 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
8281 NewResChain, DAG.getUNDEF(MVT::Other));
8282 assert(TF.getNode() != NewResChain.getNode() &&(static_cast<void> (0))
8283 "A new TF really is required here")(static_cast<void> (0));
8284
8285 DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
8286 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
8287}
8288
8289/// Analyze profitability of direct move
8290/// prefer float load to int load plus direct move
8291/// when there is no integer use of int load
8292bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
8293 SDNode *Origin = Op.getOperand(0).getNode();
8294 if (Origin->getOpcode() != ISD::LOAD)
8295 return true;
8296
8297 // If there is no LXSIBZX/LXSIHZX, like Power8,
8298 // prefer direct move if the memory size is 1 or 2 bytes.
8299 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
8300 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
8301 return true;
8302
8303 for (SDNode::use_iterator UI = Origin->use_begin(),
8304 UE = Origin->use_end();
8305 UI != UE; ++UI) {
8306
8307 // Only look at the users of the loaded value.
8308 if (UI.getUse().get().getResNo() != 0)
8309 continue;
8310
8311 if (UI->getOpcode() != ISD::SINT_TO_FP &&
8312 UI->getOpcode() != ISD::UINT_TO_FP &&
8313 UI->getOpcode() != ISD::STRICT_SINT_TO_FP &&
8314 UI->getOpcode() != ISD::STRICT_UINT_TO_FP)
8315 return true;
8316 }
8317
8318 return false;
8319}
8320
8321static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG,
8322 const PPCSubtarget &Subtarget,
8323 SDValue Chain = SDValue()) {
8324 bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
8325 Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8326 SDLoc dl(Op);
8327
8328 // TODO: Any other flags to propagate?
8329 SDNodeFlags Flags;
8330 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8331
8332 // If we have FCFIDS, then use it when converting to single-precision.
8333 // Otherwise, convert to double-precision and then round.
8334 bool IsSingle = Op.getValueType() == MVT::f32 && Subtarget.hasFPCVT();
8335 unsigned ConvOpc = IsSingle ? (IsSigned ? PPCISD::FCFIDS : PPCISD::FCFIDUS)
8336 : (IsSigned ? PPCISD::FCFID : PPCISD::FCFIDU);
8337 EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64;
8338 if (Op->isStrictFPOpcode()) {
8339 if (!Chain)
8340 Chain = Op.getOperand(0);
8341 return DAG.getNode(getPPCStrictOpcode(ConvOpc), dl,
8342 DAG.getVTList(ConvTy, MVT::Other), {Chain, Src}, Flags);
8343 } else
8344 return DAG.getNode(ConvOpc, dl, ConvTy, Src);
8345}
8346
8347/// Custom lowers integer to floating point conversions to use
8348/// the direct move instructions available in ISA 2.07 to avoid the
8349/// need for load/store combinations.
8350SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
8351 SelectionDAG &DAG,
8352 const SDLoc &dl) const {
8353 assert((Op.getValueType() == MVT::f32 ||(static_cast<void> (0))
8354 Op.getValueType() == MVT::f64) &&(static_cast<void> (0))
8355 "Invalid floating point type as target of conversion")(static_cast<void> (0));
8356 assert(Subtarget.hasFPCVT() &&(static_cast<void> (0))
8357 "Int to FP conversions with direct moves require FPCVT")(static_cast<void> (0));
8358 SDValue Src = Op.getOperand(Op->isStrictFPOpcode() ? 1 : 0);
8359 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
8360 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP ||
8361 Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8362 unsigned MovOpc = (WordInt && !Signed) ? PPCISD::MTVSRZ : PPCISD::MTVSRA;
8363 SDValue Mov = DAG.getNode(MovOpc, dl, MVT::f64, Src);
8364 return convertIntToFP(Op, Mov, DAG, Subtarget);
8365}
8366
8367static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
8368
8369 EVT VecVT = Vec.getValueType();
8370 assert(VecVT.isVector() && "Expected a vector type.")(static_cast<void> (0));
8371 assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.")(static_cast<void> (0));
8372
8373 EVT EltVT = VecVT.getVectorElementType();
8374 unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8375 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8376
8377 unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
8378 SmallVector<SDValue, 16> Ops(NumConcat);
8379 Ops[0] = Vec;
8380 SDValue UndefVec = DAG.getUNDEF(VecVT);
8381 for (unsigned i = 1; i < NumConcat; ++i)
8382 Ops[i] = UndefVec;
8383
8384 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
8385}
8386
8387SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
8388 const SDLoc &dl) const {
8389 bool IsStrict = Op->isStrictFPOpcode();
8390 unsigned Opc = Op.getOpcode();
8391 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8392 assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP ||(static_cast<void> (0))
8393 Opc == ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) &&(static_cast<void> (0))
8394 "Unexpected conversion type")(static_cast<void> (0));
8395 assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&(static_cast<void> (0))
8396 "Supports conversions to v2f64/v4f32 only.")(static_cast<void> (0));
8397
8398 // TODO: Any other flags to propagate?
8399 SDNodeFlags Flags;
8400 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8401
8402 bool SignedConv = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP;
8403 bool FourEltRes = Op.getValueType() == MVT::v4f32;
8404
8405 SDValue Wide = widenVec(DAG, Src, dl);
8406 EVT WideVT = Wide.getValueType();
8407 unsigned WideNumElts = WideVT.getVectorNumElements();
8408 MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
8409
8410 SmallVector<int, 16> ShuffV;
8411 for (unsigned i = 0; i < WideNumElts; ++i)
8412 ShuffV.push_back(i + WideNumElts);
8413
8414 int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
8415 int SaveElts = FourEltRes ? 4 : 2;
8416 if (Subtarget.isLittleEndian())
8417 for (int i = 0; i < SaveElts; i++)
8418 ShuffV[i * Stride] = i;
8419 else
8420 for (int i = 1; i <= SaveElts; i++)
8421 ShuffV[i * Stride - 1] = i - 1;
8422
8423 SDValue ShuffleSrc2 =
8424 SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
8425 SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
8426
8427 SDValue Extend;
8428 if (SignedConv) {
8429 Arrange = DAG.getBitcast(IntermediateVT, Arrange);
8430 EVT ExtVT = Src.getValueType();
8431 if (Subtarget.hasP9Altivec())
8432 ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(),
8433 IntermediateVT.getVectorNumElements());
8434
8435 Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
8436 DAG.getValueType(ExtVT));
8437 } else
8438 Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
8439
8440 if (IsStrict)
8441 return DAG.getNode(Opc, dl, DAG.getVTList(Op.getValueType(), MVT::Other),
8442 {Op.getOperand(0), Extend}, Flags);
8443
8444 return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
8445}
8446
8447SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
8448 SelectionDAG &DAG) const {
8449 SDLoc dl(Op);
8450 bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
8451 Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8452 bool IsStrict = Op->isStrictFPOpcode();
8453 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8454 SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
8455
8456 // TODO: Any other flags to propagate?
8457 SDNodeFlags Flags;
8458 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8459
8460 EVT InVT = Src.getValueType();
8461 EVT OutVT = Op.getValueType();
8462 if (OutVT.isVector() && OutVT.isFloatingPoint() &&
8463 isOperationCustom(Op.getOpcode(), InVT))
8464 return LowerINT_TO_FPVector(Op, DAG, dl);
8465
8466 // Conversions to f128 are legal.
8467 if (Op.getValueType() == MVT::f128)
8468 return Subtarget.hasP9Vector() ? Op : SDValue();
8469
8470 // Don't handle ppc_fp128 here; let it be lowered to a libcall.
8471 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
8472 return SDValue();
8473
8474 if (Src.getValueType() == MVT::i1) {
8475 SDValue Sel = DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Src,
8476 DAG.getConstantFP(1.0, dl, Op.getValueType()),
8477 DAG.getConstantFP(0.0, dl, Op.getValueType()));
8478 if (IsStrict)
8479 return DAG.getMergeValues({Sel, Chain}, dl);
8480 else
8481 return Sel;
8482 }
8483
8484 // If we have direct moves, we can do all the conversion, skip the store/load
8485 // however, without FPCVT we can't do most conversions.
8486 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8487 Subtarget.isPPC64() && Subtarget.hasFPCVT())
8488 return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8489
8490 assert((IsSigned || Subtarget.hasFPCVT()) &&(static_cast<void> (0))
8491 "UINT_TO_FP is supported only with FPCVT")(static_cast<void> (0));
8492
8493 if (Src.getValueType() == MVT::i64) {
8494 SDValue SINT = Src;
8495 // When converting to single-precision, we actually need to convert
8496 // to double-precision first and then round to single-precision.
8497 // To avoid double-rounding effects during that operation, we have
8498 // to prepare the input operand. Bits that might be truncated when
8499 // converting to double-precision are replaced by a bit that won't
8500 // be lost at this stage, but is below the single-precision rounding
8501 // position.
8502 //
8503 // However, if -enable-unsafe-fp-math is in effect, accept double
8504 // rounding to avoid the extra overhead.
8505 if (Op.getValueType() == MVT::f32 &&
8506 !Subtarget.hasFPCVT() &&
8507 !DAG.getTarget().Options.UnsafeFPMath) {
8508
8509 // Twiddle input to make sure the low 11 bits are zero. (If this
8510 // is the case, we are guaranteed the value will fit into the 53 bit
8511 // mantissa of an IEEE double-precision value without rounding.)
8512 // If any of those low 11 bits were not zero originally, make sure
8513 // bit 12 (value 2048) is set instead, so that the final rounding
8514 // to single-precision gets the correct result.
8515 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8516 SINT, DAG.getConstant(2047, dl, MVT::i64));
8517 Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8518 Round, DAG.getConstant(2047, dl, MVT::i64));
8519 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8520 Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8521 Round, DAG.getConstant(-2048, dl, MVT::i64));
8522
8523 // However, we cannot use that value unconditionally: if the magnitude
8524 // of the input value is small, the bit-twiddling we did above might
8525 // end up visibly changing the output. Fortunately, in that case, we
8526 // don't need to twiddle bits since the original input will convert
8527 // exactly to double-precision floating-point already. Therefore,
8528 // construct a conditional to use the original value if the top 11
8529 // bits are all sign-bit copies, and use the rounded value computed
8530 // above otherwise.
8531 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8532 SINT, DAG.getConstant(53, dl, MVT::i32));
8533 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8534 Cond, DAG.getConstant(1, dl, MVT::i64));
8535 Cond = DAG.getSetCC(
8536 dl,
8537 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
8538 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8539
8540 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8541 }
8542
8543 ReuseLoadInfo RLI;
8544 SDValue Bits;
8545
8546 MachineFunction &MF = DAG.getMachineFunction();
8547 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8548 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8549 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8550 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8551 } else if (Subtarget.hasLFIWAX() &&
8552 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8553 MachineMemOperand *MMO =
8554 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8555 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8556 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8557 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
8558 DAG.getVTList(MVT::f64, MVT::Other),
8559 Ops, MVT::i32, MMO);
8560 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8561 } else if (Subtarget.hasFPCVT() &&
8562 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8563 MachineMemOperand *MMO =
8564 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8565 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8566 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8567 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
8568 DAG.getVTList(MVT::f64, MVT::Other),
8569 Ops, MVT::i32, MMO);
8570 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8571 } else if (((Subtarget.hasLFIWAX() &&
8572 SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8573 (Subtarget.hasFPCVT() &&
8574 SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8575 SINT.getOperand(0).getValueType() == MVT::i32) {
8576 MachineFrameInfo &MFI = MF.getFrameInfo();
8577 EVT PtrVT = getPointerTy(DAG.getDataLayout());
8578
8579 int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8580 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8581
8582 SDValue Store = DAG.getStore(Chain, dl, SINT.getOperand(0), FIdx,
8583 MachinePointerInfo::getFixedStack(
8584 DAG.getMachineFunction(), FrameIdx));
8585 Chain = Store;
8586
8587 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&(static_cast<void> (0))
8588 "Expected an i32 store")(static_cast<void> (0));
8589
8590 RLI.Ptr = FIdx;
8591 RLI.Chain = Chain;
8592 RLI.MPI =
8593 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8594 RLI.Alignment = Align(4);
8595
8596 MachineMemOperand *MMO =
8597 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8598 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8599 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8600 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
8601 PPCISD::LFIWZX : PPCISD::LFIWAX,
8602 dl, DAG.getVTList(MVT::f64, MVT::Other),
8603 Ops, MVT::i32, MMO);
8604 Chain = Bits.getValue(1);
8605 } else
8606 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8607
8608 SDValue FP = convertIntToFP(Op, Bits, DAG, Subtarget, Chain);
8609 if (IsStrict)
8610 Chain = FP.getValue(1);
8611
8612 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8613 if (IsStrict)
8614 FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl,
8615 DAG.getVTList(MVT::f32, MVT::Other),
8616 {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8617 else
8618 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8619 DAG.getIntPtrConstant(0, dl));
8620 }
8621 return FP;
8622 }
8623
8624 assert(Src.getValueType() == MVT::i32 &&(static_cast<void> (0))
8625 "Unhandled INT_TO_FP type in custom expander!")(static_cast<void> (0));
8626 // Since we only generate this in 64-bit mode, we can take advantage of
8627 // 64-bit registers. In particular, sign extend the input value into the
8628 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8629 // then lfd it and fcfid it.
8630 MachineFunction &MF = DAG.getMachineFunction();
8631 MachineFrameInfo &MFI = MF.getFrameInfo();
8632 EVT PtrVT = getPointerTy(MF.getDataLayout());
8633
8634 SDValue Ld;
8635 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8636 ReuseLoadInfo RLI;
8637 bool ReusingLoad;
8638 if (!(ReusingLoad = canReuseLoadAddress(Src, MVT::i32, RLI, DAG))) {
8639 int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8640 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8641
8642 SDValue Store = DAG.getStore(Chain, dl, Src, FIdx,
8643 MachinePointerInfo::getFixedStack(
8644 DAG.getMachineFunction(), FrameIdx));
8645 Chain = Store;
8646
8647 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&(static_cast<void> (0))
8648 "Expected an i32 store")(static_cast<void> (0));
8649
8650 RLI.Ptr = FIdx;
8651 RLI.Chain = Chain;
8652 RLI.MPI =
8653 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8654 RLI.Alignment = Align(4);
8655 }
8656
8657 MachineMemOperand *MMO =
8658 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8659 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8660 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8661 Ld = DAG.getMemIntrinsicNode(IsSigned ? PPCISD::LFIWAX : PPCISD::LFIWZX, dl,
8662 DAG.getVTList(MVT::f64, MVT::Other), Ops,
8663 MVT::i32, MMO);
8664 Chain = Ld.getValue(1);
8665 if (ReusingLoad)
8666 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8667 } else {
8668 assert(Subtarget.isPPC64() &&(static_cast<void> (0))
8669 "i32->FP without LFIWAX supported only on PPC64")(static_cast<void> (0));
8670
8671 int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
8672 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8673
8674 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, Src);
8675
8676 // STD the extended value into the stack slot.
8677 SDValue Store = DAG.getStore(
8678 Chain, dl, Ext64, FIdx,
8679 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8680 Chain = Store;
8681
8682 // Load the value as a double.
8683 Ld = DAG.getLoad(
8684 MVT::f64, dl, Chain, FIdx,
8685 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8686 Chain = Ld.getValue(1);
8687 }
8688
8689 // FCFID it and return it.
8690 SDValue FP = convertIntToFP(Op, Ld, DAG, Subtarget, Chain);
8691 if (IsStrict)
8692 Chain = FP.getValue(1);
8693 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8694 if (IsStrict)
8695 FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl,
8696 DAG.getVTList(MVT::f32, MVT::Other),
8697 {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8698 else
8699 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8700 DAG.getIntPtrConstant(0, dl));
8701 }
8702 return FP;
8703}
8704
8705SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8706 SelectionDAG &DAG) const {
8707 SDLoc dl(Op);
8708 /*
8709 The rounding mode is in bits 30:31 of FPSR, and has the following
8710 settings:
8711 00 Round to nearest
8712 01 Round to 0
8713 10 Round to +inf
8714 11 Round to -inf
8715
8716 FLT_ROUNDS, on the other hand, expects the following:
8717 -1 Undefined
8718 0 Round to 0
8719 1 Round to nearest
8720 2 Round to +inf
8721 3 Round to -inf
8722
8723 To perform the conversion, we do:
8724 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8725 */
8726
8727 MachineFunction &MF = DAG.getMachineFunction();
8728 EVT VT = Op.getValueType();
8729 EVT PtrVT = getPointerTy(MF.getDataLayout());
8730
8731 // Save FP Control Word to register
8732 SDValue Chain = Op.getOperand(0);
8733 SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain);
8734 Chain = MFFS.getValue(1);
8735
8736 SDValue CWD;
8737 if (isTypeLegal(MVT::i64)) {
8738 CWD = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
8739 DAG.getNode(ISD::BITCAST, dl, MVT::i64, MFFS));
8740 } else {
8741 // Save FP register to stack slot
8742 int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false);
8743 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8744 Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
8745
8746 // Load FP Control Word from low 32 bits of stack slot.
8747 assert(hasBigEndianPartOrdering(MVT::i64, MF.getDataLayout()) &&(static_cast<void> (0))
8748 "Stack slot adjustment is valid only on big endian subtargets!")(static_cast<void> (0));
8749 SDValue Four = DAG.getConstant(4, dl, PtrVT);
8750 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8751 CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
8752 Chain = CWD.getValue(1);
8753 }
8754
8755 // Transform as necessary
8756 SDValue CWD1 =
8757 DAG.getNode(ISD::AND, dl, MVT::i32,
8758 CWD, DAG.getConstant(3, dl, MVT::i32));
8759 SDValue CWD2 =
8760 DAG.getNode(ISD::SRL, dl, MVT::i32,
8761 DAG.getNode(ISD::AND, dl, MVT::i32,
8762 DAG.getNode(ISD::XOR, dl, MVT::i32,
8763 CWD, DAG.getConstant(3, dl, MVT::i32)),
8764 DAG.getConstant(3, dl, MVT::i32)),
8765 DAG.getConstant(1, dl, MVT::i32));
8766
8767 SDValue RetVal =
8768 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
8769
8770 RetVal =
8771 DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND),
8772 dl, VT, RetVal);
8773
8774 return DAG.getMergeValues({RetVal, Chain}, dl);
8775}
8776
8777SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8778 EVT VT = Op.getValueType();
8779 unsigned BitWidth = VT.getSizeInBits();
8780 SDLoc dl(Op);
8781 assert(Op.getNumOperands() == 3 &&(static_cast<void> (0))
8782 VT == Op.getOperand(1).getValueType() &&(static_cast<void> (0))
8783 "Unexpected SHL!")(static_cast<void> (0));
8784
8785 // Expand into a bunch of logical ops. Note that these ops
8786 // depend on the PPC behavior for oversized shift amounts.
8787 SDValue Lo = Op.getOperand(0);
8788 SDValue Hi = Op.getOperand(1);
8789 SDValue Amt = Op.getOperand(2);
8790 EVT AmtVT = Amt.getValueType();
8791
8792 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8793 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8794 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
8795 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
8796 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
8797 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8798 DAG.getConstant(-BitWidth, dl, AmtVT));
8799 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
8800 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8801 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
8802 SDValue OutOps[] = { OutLo, OutHi };
8803 return DAG.getMergeValues(OutOps, dl);
8804}
8805
8806SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8807 EVT VT = Op.getValueType();
8808 SDLoc dl(Op);
8809 unsigned BitWidth = VT.getSizeInBits();
8810 assert(Op.getNumOperands() == 3 &&(static_cast<void> (0))
8811 VT == Op.getOperand(1).getValueType() &&(static_cast<void> (0))
8812 "Unexpected SRL!")(static_cast<void> (0));
8813
8814 // Expand into a bunch of logical ops. Note that these ops
8815 // depend on the PPC behavior for oversized shift amounts.
8816 SDValue Lo = Op.getOperand(0);
8817 SDValue Hi = Op.getOperand(1);
8818 SDValue Amt = Op.getOperand(2);
8819 EVT AmtVT = Amt.getValueType();
8820
8821 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8822 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8823 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8824 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8825 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8826 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8827 DAG.getConstant(-BitWidth, dl, AmtVT));
8828 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
8829 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8830 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
8831 SDValue OutOps[] = { OutLo, OutHi };
8832 return DAG.getMergeValues(OutOps, dl);
8833}
8834
8835SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
8836 SDLoc dl(Op);
8837 EVT VT = Op.getValueType();
8838 unsigned BitWidth = VT.getSizeInBits();
8839 assert(Op.getNumOperands() == 3 &&(static_cast<void> (0))
8840 VT == Op.getOperand(1).getValueType() &&(static_cast<void> (0))
8841 "Unexpected SRA!")(static_cast<void> (0));
8842
8843 // Expand into a bunch of logical ops, followed by a select_cc.
8844 SDValue Lo = Op.getOperand(0);
8845 SDValue Hi = Op.getOperand(1);
8846 SDValue Amt = Op.getOperand(2);
8847 EVT AmtVT = Amt.getValueType();
8848
8849 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8850 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8851 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8852 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8853 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8854 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8855 DAG.getConstant(-BitWidth, dl, AmtVT));
8856 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
8857 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
8858 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
8859 Tmp4, Tmp6, ISD::SETLE);
8860 SDValue OutOps[] = { OutLo, OutHi };
8861 return DAG.getMergeValues(OutOps, dl);
8862}
8863
8864SDValue PPCTargetLowering::LowerFunnelShift(SDValue Op,
8865 SelectionDAG &DAG) const {
8866 SDLoc dl(Op);
8867 EVT VT = Op.getValueType();
8868 unsigned BitWidth = VT.getSizeInBits();
8869
8870 bool IsFSHL = Op.getOpcode() == ISD::FSHL;
8871 SDValue X = Op.getOperand(0);
8872 SDValue Y = Op.getOperand(1);
8873 SDValue Z = Op.getOperand(2);
8874 EVT AmtVT = Z.getValueType();
8875
8876 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
8877 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
8878 // This is simpler than TargetLowering::expandFunnelShift because we can rely
8879 // on PowerPC shift by BW being well defined.
8880 Z = DAG.getNode(ISD::AND, dl, AmtVT, Z,
8881 DAG.getConstant(BitWidth - 1, dl, AmtVT));
8882 SDValue SubZ =
8883 DAG.getNode(ISD::SUB, dl, AmtVT, DAG.getConstant(BitWidth, dl, AmtVT), Z);
8884 X = DAG.getNode(PPCISD::SHL, dl, VT, X, IsFSHL ? Z : SubZ);
8885 Y = DAG.getNode(PPCISD::SRL, dl, VT, Y, IsFSHL ? SubZ : Z);
8886 return DAG.getNode(ISD::OR, dl, VT, X, Y);
8887}
8888
8889//===----------------------------------------------------------------------===//
8890// Vector related lowering.
8891//
8892
8893/// getCanonicalConstSplat - Build a canonical splat immediate of Val with an
8894/// element size of SplatSize. Cast the result to VT.
8895static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT,
8896 SelectionDAG &DAG, const SDLoc &dl) {
8897 static const MVT VTys[] = { // canonical VT to use for each size.
8898 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
8899 };
8900
8901 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
8902
8903 // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize.
8904 if (Val == ((1LLU << (SplatSize * 8)) - 1)) {
8905 SplatSize = 1;
8906 Val = 0xFF;
8907 }
8908
8909 EVT CanonicalVT = VTys[SplatSize-1];
8910
8911 // Build a canonical splat for this value.
8912 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
8913}
8914
8915/// BuildIntrinsicOp - Return a unary operator intrinsic node with the
8916/// specified intrinsic ID.
8917static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
8918 const SDLoc &dl, EVT DestVT = MVT::Other) {
8919 if (DestVT == MVT::Other) DestVT = Op.getValueType();
8920 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8921 DAG.getConstant(IID, dl, MVT::i32), Op);
8922}
8923
8924/// BuildIntrinsicOp - Return a binary operator intrinsic node with the
8925/// specified intrinsic ID.
8926static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
8927 SelectionDAG &DAG, const SDLoc &dl,
8928 EVT DestVT = MVT::Other) {
8929 if (DestVT == MVT::Other) DestVT = LHS.getValueType();
8930 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8931 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
8932}
8933
8934/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
8935/// specified intrinsic ID.
8936static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
8937 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
8938 EVT DestVT = MVT::Other) {
8939 if (DestVT == MVT::Other) DestVT = Op0.getValueType();
8940 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8941 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
8942}
8943
8944/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
8945/// amount. The result has the specified value type.
8946static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
8947 SelectionDAG &DAG, const SDLoc &dl) {
8948 // Force LHS/RHS to be the right type.
8949 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
8950 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
8951
8952 int Ops[16];
8953 for (unsigned i = 0; i != 16; ++i)
8954 Ops[i] = i + Amt;
8955 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
8956 return DAG.getNode(ISD::BITCAST, dl, VT, T);
8957}
8958
8959/// Do we have an efficient pattern in a .td file for this node?
8960///
8961/// \param V - pointer to the BuildVectorSDNode being matched
8962/// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
8963///
8964/// There are some patterns where it is beneficial to keep a BUILD_VECTOR
8965/// node as a BUILD_VECTOR node rather than expanding it. The patterns where
8966/// the opposite is true (expansion is beneficial) are:
8967/// - The node builds a vector out of integers that are not 32 or 64-bits
8968/// - The node builds a vector out of constants
8969/// - The node is a "load-and-splat"
8970/// In all other cases, we will choose to keep the BUILD_VECTOR.
8971static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
8972 bool HasDirectMove,
8973 bool HasP8Vector) {
8974 EVT VecVT = V->getValueType(0);
8975 bool RightType = VecVT == MVT::v2f64 ||
8976 (HasP8Vector && VecVT == MVT::v4f32) ||
8977 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
8978 if (!RightType)
8979 return false;
8980
8981 bool IsSplat = true;
8982 bool IsLoad = false;
8983 SDValue Op0 = V->getOperand(0);
8984
8985 // This function is called in a block that confirms the node is not a constant
8986 // splat. So a constant BUILD_VECTOR here means the vector is built out of
8987 // different constants.
8988 if (V->isConstant())
8989 return false;
8990 for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
8991 if (V->getOperand(i).isUndef())
8992 return false;
8993 // We want to expand nodes that represent load-and-splat even if the
8994 // loaded value is a floating point truncation or conversion to int.
8995 if (V->getOperand(i).getOpcode() == ISD::LOAD ||
8996 (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
8997 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8998 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
8999 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
9000 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
9001 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
9002 IsLoad = true;
9003 // If the operands are different or the input is not a load and has more
9004 // uses than just this BV node, then it isn't a splat.
9005 if (V->getOperand(i) != Op0 ||
9006 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
9007 IsSplat = false;
9008 }
9009 return !(IsSplat && IsLoad);
9010}
9011
9012// Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
9013SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
9014
9015 SDLoc dl(Op);
9016 SDValue Op0 = Op->getOperand(0);
9017
9018 if ((Op.getValueType() != MVT::f128) ||
9019 (Op0.getOpcode() != ISD::BUILD_PAIR) ||
9020 (Op0.getOperand(0).getValueType() != MVT::i64) ||
9021 (Op0.getOperand(1).getValueType() != MVT::i64))
9022 return SDValue();
9023
9024 return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
9025 Op0.getOperand(1));
9026}
9027
9028static const SDValue *getNormalLoadInput(const SDValue &Op, bool &IsPermuted) {
9029 const SDValue *InputLoad = &Op;
9030 if (InputLoad->getOpcode() == ISD::BITCAST)
9031 InputLoad = &InputLoad->getOperand(0);
9032 if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR ||
9033 InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED) {
9034 IsPermuted = InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED;
9035 InputLoad = &InputLoad->getOperand(0);
9036 }
9037 if (InputLoad->getOpcode() != ISD::LOAD)
9038 return nullptr;
9039 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9040 return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
9041}
9042
9043// Convert the argument APFloat to a single precision APFloat if there is no
9044// loss in information during the conversion to single precision APFloat and the
9045// resulting number is not a denormal number. Return true if successful.
9046bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) {
9047 APFloat APFloatToConvert = ArgAPFloat;
9048 bool LosesInfo = true;
9049 APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
9050 &LosesInfo);
9051 bool Success = (!LosesInfo && !APFloatToConvert.isDenormal());
9052 if (Success)
9053 ArgAPFloat = APFloatToConvert;
9054 return Success;
9055}
9056
9057// Bitcast the argument APInt to a double and convert it to a single precision
9058// APFloat, bitcast the APFloat to an APInt and assign it to the original
9059// argument if there is no loss in information during the conversion from
9060// double to single precision APFloat and the resulting number is not a denormal
9061// number. Return true if successful.
9062bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) {
9063 double DpValue = ArgAPInt.bitsToDouble();
9064 APFloat APFloatDp(DpValue);
9065 bool Success = convertToNonDenormSingle(APFloatDp);
9066 if (Success)
9067 ArgAPInt = APFloatDp.bitcastToAPInt();
9068 return Success;
9069}
9070
9071// Nondestructive check for convertTonNonDenormSingle.
9072bool llvm::checkConvertToNonDenormSingle(APFloat &ArgAPFloat) {
9073 // Only convert if it loses info, since XXSPLTIDP should
9074 // handle the other case.
9075 APFloat APFloatToConvert = ArgAPFloat;
9076 bool LosesInfo = true;
9077 APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
9078 &LosesInfo);
9079
9080 return (!LosesInfo && !APFloatToConvert.isDenormal());
9081}
9082
9083// If this is a case we can't handle, return null and let the default
9084// expansion code take care of it. If we CAN select this case, and if it
9085// selects to a single instruction, return Op. Otherwise, if we can codegen
9086// this case more efficiently than a constant pool load, lower it to the
9087// sequence of ops that should be used.
9088SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
9089 SelectionDAG &DAG) const {
9090 SDLoc dl(Op);
9091 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
9092 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR")(static_cast<void> (0));
9093
9094 // Check if this is a splat of a constant value.
9095 APInt APSplatBits, APSplatUndef;
9096 unsigned SplatBitSize;
9097 bool HasAnyUndefs;
9098 bool BVNIsConstantSplat =
9099 BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
9100 HasAnyUndefs, 0, !Subtarget.isLittleEndian());
9101
9102 // If it is a splat of a double, check if we can shrink it to a 32 bit
9103 // non-denormal float which when converted back to double gives us the same
9104 // double. This is to exploit the XXSPLTIDP instruction.
9105 // If we lose precision, we use XXSPLTI32DX.
9106 if (BVNIsConstantSplat && (SplatBitSize == 64) &&
9107 Subtarget.hasPrefixInstrs()) {
9108 // Check the type first to short-circuit so we don't modify APSplatBits if
9109 // this block isn't executed.
9110 if ((Op->getValueType(0) == MVT::v2f64) &&
9111 convertToNonDenormSingle(APSplatBits)) {
9112 SDValue SplatNode = DAG.getNode(
9113 PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64,
9114 DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32));
9115 return DAG.getBitcast(Op.getValueType(), SplatNode);
9116 } else {
9117 // We may lose precision, so we have to use XXSPLTI32DX.
9118
9119 uint32_t Hi =
9120 (uint32_t)((APSplatBits.getZExtValue() & 0xFFFFFFFF00000000LL) >> 32);
9121 uint32_t Lo =
9122 (uint32_t)(APSplatBits.getZExtValue() & 0xFFFFFFFF);
9123 SDValue SplatNode = DAG.getUNDEF(MVT::v2i64);
9124
9125 if (!Hi || !Lo)
9126 // If either load is 0, then we should generate XXLXOR to set to 0.
9127 SplatNode = DAG.getTargetConstant(0, dl, MVT::v2i64);
9128
9129 if (Hi)
9130 SplatNode = DAG.getNode(
9131 PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode,
9132 DAG.getTargetConstant(0, dl, MVT::i32),
9133 DAG.getTargetConstant(Hi, dl, MVT::i32));
9134
9135 if (Lo)
9136 SplatNode =
9137 DAG.getNode(PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode,
9138 DAG.getTargetConstant(1, dl, MVT::i32),
9139 DAG.getTargetConstant(Lo, dl, MVT::i32));
9140
9141 return DAG.getBitcast(Op.getValueType(), SplatNode);
9142 }
9143 }
9144
9145 if (!BVNIsConstantSplat || SplatBitSize > 32) {
9146
9147 bool IsPermutedLoad = false;
9148 const SDValue *InputLoad =
9149 getNormalLoadInput(Op.getOperand(0), IsPermutedLoad);
9150 // Handle load-and-splat patterns as we have instructions that will do this
9151 // in one go.
9152 if (InputLoad && DAG.isSplatValue(Op, true)) {
9153 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9154
9155 // We have handling for 4 and 8 byte elements.
9156 unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits();
9157
9158 // Checking for a single use of this load, we have to check for vector
9159 // width (128 bits) / ElementSize uses (since each operand of the
9160 // BUILD_VECTOR is a separate use of the value.
9161 unsigned NumUsesOfInputLD = 128 / ElementSize;
9162 for (SDValue BVInOp : Op->ops())
9163 if (BVInOp.isUndef())
9164 NumUsesOfInputLD--;
9165 assert(NumUsesOfInputLD > 0 && "No uses of input LD of a build_vector?")(static_cast<void> (0));
9166 if (InputLoad->getNode()->hasNUsesOfValue(NumUsesOfInputLD, 0) &&
9167 ((Subtarget.hasVSX() && ElementSize == 64) ||
9168 (Subtarget.hasP9Vector() && ElementSize == 32))) {
9169 SDValue Ops[] = {
9170 LD->getChain(), // Chain
9171 LD->getBasePtr(), // Ptr
9172 DAG.getValueType(Op.getValueType()) // VT
9173 };
9174 SDValue LdSplt = DAG.getMemIntrinsicNode(
9175 PPCISD::LD_SPLAT, dl, DAG.getVTList(Op.getValueType(), MVT::Other),
9176 Ops, LD->getMemoryVT(), LD->getMemOperand());
9177 // Replace all uses of the output chain of the original load with the
9178 // output chain of the new load.
9179 DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1),
9180 LdSplt.getValue(1));
9181 return LdSplt;
9182 }
9183 }
9184
9185 // In 64BIT mode BUILD_VECTOR nodes that are not constant splats of up to
9186 // 32-bits can be lowered to VSX instructions under certain conditions.
9187 // Without VSX, there is no pattern more efficient than expanding the node.
9188 if (Subtarget.hasVSX() && Subtarget.isPPC64() &&
9189 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
9190 Subtarget.hasP8Vector()))
9191 return Op;
9192 return SDValue();
9193 }
9194
9195 uint64_t SplatBits = APSplatBits.getZExtValue();
9196 uint64_t SplatUndef = APSplatUndef.getZExtValue();
9197 unsigned SplatSize = SplatBitSize / 8;
9198
9199 // First, handle single instruction cases.
9200
9201 // All zeros?
9202 if (SplatBits == 0) {
9203 // Canonicalize all zero vectors to be v4i32.
9204 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
9205 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
9206 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
9207 }
9208 return Op;
9209 }
9210
9211 // We have XXSPLTIW for constant splats four bytes wide.
9212 // Given vector length is a multiple of 4, 2-byte splats can be replaced
9213 // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to
9214 // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be
9215 // turned into a 4-byte splat of 0xABABABAB.
9216 if (Subtarget.hasPrefixInstrs() && SplatSize == 2)
9217 return getCanonicalConstSplat(SplatBits | (SplatBits << 16), SplatSize * 2,
9218 Op.getValueType(), DAG, dl);
9219
9220 if (Subtarget.hasPrefixInstrs() && SplatSize == 4)
9221 return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9222 dl);
9223
9224 // We have XXSPLTIB for constant splats one byte wide.
9225 if (Subtarget.hasP9Vector() && SplatSize == 1)
9226 return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9227 dl);
9228
9229 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
9230 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
9231 (32-SplatBitSize));
9232 if (SextVal >= -16 && SextVal <= 15)
9233 return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
9234 dl);
9235
9236 // Two instruction sequences.
9237
9238 // If this value is in the range [-32,30] and is even, use:
9239 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
9240 // If this value is in the range [17,31] and is odd, use:
9241 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
9242 // If this value is in the range [-31,-17] and is odd, use:
9243 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
9244 // Note the last two are three-instruction sequences.
9245 if (SextVal >= -32 && SextVal <= 31) {
9246 // To avoid having these optimizations undone by constant folding,
9247 // we convert to a pseudo that will be expanded later into one of
9248 // the above forms.
9249 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
9250 EVT VT = (SplatSize == 1 ? MVT::v16i8 :
9251 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
9252 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
9253 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
9254 if (VT == Op.getValueType())
9255 return RetVal;
9256 else
9257 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
9258 }
9259
9260 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
9261 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important
9262 // for fneg/fabs.
9263 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
9264 // Make -1 and vspltisw -1:
9265 SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl);
9266
9267 // Make the VSLW intrinsic, computing 0x8000_0000.
9268 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
9269 OnesV, DAG, dl);
9270
9271 // xor by OnesV to invert it.
9272 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
9273 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9274 }
9275
9276 // Check to see if this is a wide variety of vsplti*, binop self cases.
9277 static const signed char SplatCsts[] = {
9278 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
9279 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
9280 };
9281
9282 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
9283 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
9284 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
9285 int i = SplatCsts[idx];
9286
9287 // Figure out what shift amount will be used by altivec if shifted by i in
9288 // this splat size.
9289 unsigned TypeShiftAmt = i & (SplatBitSize-1);
9290
9291 // vsplti + shl self.
9292 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
9293 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9294 static const unsigned IIDs[] = { // Intrinsic to use for each size.
9295 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
9296 Intrinsic::ppc_altivec_vslw
9297 };
9298 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9299 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9300 }
9301
9302 // vsplti + srl self.
9303 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9304 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9305 static const unsigned IIDs[] = { // Intrinsic to use for each size.
9306 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
9307 Intrinsic::ppc_altivec_vsrw
9308 };
9309 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9310 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9311 }
9312
9313 // vsplti + rol self.
9314 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
9315 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
9316 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9317 static const unsigned IIDs[] = { // Intrinsic to use for each size.
9318 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
9319 Intrinsic::ppc_altivec_vrlw
9320 };
9321 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9322 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9323 }
9324
9325 // t = vsplti c, result = vsldoi t, t, 1
9326 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
9327 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9328 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
9329 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9330 }
9331 // t = vsplti c, result = vsldoi t, t, 2
9332 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
9333 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9334 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
9335 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9336 }
9337 // t = vsplti c, result = vsldoi t, t, 3
9338 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
9339 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9340 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
9341 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9342 }
9343 }
9344
9345 return SDValue();
9346}
9347
9348/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
9349/// the specified operations to build the shuffle.
9350static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
9351 SDValue RHS, SelectionDAG &DAG,
9352 const SDLoc &dl) {
9353 unsigned OpNum = (PFEntry >> 26) & 0x0F;
9354 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
9355 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
9356
9357 enum {
9358 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
9359 OP_VMRGHW,
9360 OP_VMRGLW,
9361 OP_VSPLTISW0,
9362 OP_VSPLTISW1,
9363 OP_VSPLTISW2,
9364 OP_VSPLTISW3,
9365 OP_VSLDOI4,
9366 OP_VSLDOI8,
9367 OP_VSLDOI12
9368 };
9369
9370 if (OpNum == OP_COPY) {
9371 if (LHSID == (1*9+2)*9+3) return LHS;
9372 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!")(static_cast<void> (0));
9373 return RHS;
9374 }
9375
9376 SDValue OpLHS, OpRHS;
9377 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
9378 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
9379
9380 int ShufIdxs[16];
9381 switch (OpNum) {
9382 default: llvm_unreachable("Unknown i32 permute!")__builtin_unreachable();
9383 case OP_VMRGHW:
9384 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
9385 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
9386 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
9387 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
9388 break;
9389 case OP_VMRGLW:
9390 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
9391 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
9392 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
9393 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
9394 break;
9395 case OP_VSPLTISW0:
9396 for (unsigned i = 0; i != 16; ++i)
9397 ShufIdxs[i] = (i&3)+0;
9398 break;
9399 case OP_VSPLTISW1:
9400 for (unsigned i = 0; i != 16; ++i)
9401 ShufIdxs[i] = (i&3)+4;
9402 break;
9403 case OP_VSPLTISW2:
9404 for (unsigned i = 0; i != 16; ++i)
9405 ShufIdxs[i] = (i&3)+8;
9406 break;
9407 case OP_VSPLTISW3:
9408 for (unsigned i = 0; i != 16; ++i)
9409 ShufIdxs[i] = (i&3)+12;
9410 break;
9411 case OP_VSLDOI4:
9412 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
9413 case OP_VSLDOI8:
9414 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
9415 case OP_VSLDOI12:
9416 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
9417 }
9418 EVT VT = OpLHS.getValueType();
9419 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
9420 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
9421 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
9422 return DAG.getNode(ISD::BITCAST, dl, VT, T);
9423}
9424
9425/// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
9426/// by the VINSERTB instruction introduced in ISA 3.0, else just return default
9427/// SDValue.
9428SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
9429 SelectionDAG &DAG) const {
9430 const unsigned BytesInVector = 16;
9431 bool IsLE = Subtarget.isLittleEndian();
9432 SDLoc dl(N);
9433 SDValue V1 = N->getOperand(0);
9434 SDValue V2 = N->getOperand(1);
9435 unsigned ShiftElts = 0, InsertAtByte = 0;
9436 bool Swap = false;
9437
9438 // Shifts required to get the byte we want at element 7.
9439 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1,
9440 0, 15, 14, 13, 12, 11, 10, 9};
9441 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
9442 1, 2, 3, 4, 5, 6, 7, 8};
9443
9444 ArrayRef<int> Mask = N->getMask();
9445 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9446
9447 // For each mask element, find out if we're just inserting something
9448 // from V2 into V1 or vice versa.
9449 // Possible permutations inserting an element from V2 into V1:
9450 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9451 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9452 // ...
9453 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
9454 // Inserting from V1 into V2 will be similar, except mask range will be
9455 // [16,31].
9456
9457 bool FoundCandidate = false;
9458 // If both vector operands for the shuffle are the same vector, the mask
9459 // will contain only elements from the first one and the second one will be
9460 // undef.
9461 unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
9462 // Go through the mask of half-words to find an element that's being moved
9463 // from one vector to the other.
9464 for (unsigned i = 0; i < BytesInVector; ++i) {
9465 unsigned CurrentElement = Mask[i];
9466 // If 2nd operand is undefined, we should only look for element 7 in the
9467 // Mask.
9468 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
9469 continue;
9470
9471 bool OtherElementsInOrder = true;
9472 // Examine the other elements in the Mask to see if they're in original
9473 // order.
9474 for (unsigned j = 0; j < BytesInVector; ++j) {
9475 if (j == i)
9476 continue;
9477 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
9478 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined,
9479 // in which we always assume we're always picking from the 1st operand.
9480 int MaskOffset =
9481 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
9482 if (Mask[j] != OriginalOrder[j] + MaskOffset) {
9483 OtherElementsInOrder = false;
9484 break;
9485 }
9486 }
9487 // If other elements are in original order, we record the number of shifts
9488 // we need to get the element we want into element 7. Also record which byte
9489 // in the vector we should insert into.
9490 if (OtherElementsInOrder) {
9491 // If 2nd operand is undefined, we assume no shifts and no swapping.
9492 if (V2.isUndef()) {
9493 ShiftElts = 0;
9494 Swap = false;
9495 } else {
9496 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
9497 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
9498 : BigEndianShifts[CurrentElement & 0xF];
9499 Swap = CurrentElement < BytesInVector;
9500 }
9501 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
9502 FoundCandidate = true;
9503 break;
9504 }
9505 }
9506
9507 if (!FoundCandidate)
9508 return SDValue();
9509
9510 // Candidate found, construct the proper SDAG sequence with VINSERTB,
9511 // optionally with VECSHL if shift is required.
9512 if (Swap)
9513 std::swap(V1, V2);
9514 if (V2.isUndef())
9515 V2 = V1;
9516 if (ShiftElts) {
9517 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9518 DAG.getConstant(ShiftElts, dl, MVT::i32));
9519 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
9520 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9521 }
9522 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
9523 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9524}
9525
9526/// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
9527/// by the VINSERTH instruction introduced in ISA 3.0, else just return default
9528/// SDValue.
9529SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9530 SelectionDAG &DAG) const {
9531 const unsigned NumHalfWords = 8;
9532 const unsigned BytesInVector = NumHalfWords * 2;
9533 // Check that the shuffle is on half-words.
9534 if (!isNByteElemShuffleMask(N, 2, 1))
9535 return SDValue();
9536
9537 bool IsLE = Subtarget.isLittleEndian();
9538 SDLoc dl(N);
9539 SDValue V1 = N->getOperand(0);
9540 SDValue V2 = N->getOperand(1);
9541 unsigned ShiftElts = 0, InsertAtByte = 0;
9542 bool Swap = false;
9543
9544 // Shifts required to get the half-word we want at element 3.
9545 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9546 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9547
9548 uint32_t Mask = 0;
9549 uint32_t OriginalOrderLow = 0x1234567;
9550 uint32_t OriginalOrderHigh = 0x89ABCDEF;
9551 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a
9552 // 32-bit space, only need 4-bit nibbles per element.
9553 for (unsigned i = 0; i < NumHalfWords; ++i) {
9554 unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9555 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9556 }
9557
9558 // For each mask element, find out if we're just inserting something
9559 // from V2 into V1 or vice versa. Possible permutations inserting an element
9560 // from V2 into V1:
9561 // X, 1, 2, 3, 4, 5, 6, 7
9562 // 0, X, 2, 3, 4, 5, 6, 7
9563 // 0, 1, X, 3, 4, 5, 6, 7
9564 // 0, 1, 2, X, 4, 5, 6, 7
9565 // 0, 1, 2, 3, X, 5, 6, 7
9566 // 0, 1, 2, 3, 4, X, 6, 7
9567 // 0, 1, 2, 3, 4, 5, X, 7
9568 // 0, 1, 2, 3, 4, 5, 6, X
9569 // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9570
9571 bool FoundCandidate = false;
9572 // Go through the mask of half-words to find an element that's being moved
9573 // from one vector to the other.
9574 for (unsigned i = 0; i < NumHalfWords; ++i) {
9575 unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9576 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9577 uint32_t MaskOtherElts = ~(0xF << MaskShift);
9578 uint32_t TargetOrder = 0x0;
9579
9580 // If both vector operands for the shuffle are the same vector, the mask
9581 // will contain only elements from the first one and the second one will be
9582 // undef.
9583 if (V2.isUndef()) {
9584 ShiftElts = 0;
9585 unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9586 TargetOrder = OriginalOrderLow;
9587 Swap = false;
9588 // Skip if not the correct element or mask of other elements don't equal
9589 // to our expected order.
9590 if (MaskOneElt == VINSERTHSrcElem &&
9591 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9592 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9593 FoundCandidate = true;
9594 break;
9595 }
9596 } else { // If both operands are defined.
9597 // Target order is [8,15] if the current mask is between [0,7].
9598 TargetOrder =
9599 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9600 // Skip if mask of other elements don't equal our expected order.
9601 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9602 // We only need the last 3 bits for the number of shifts.
9603 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9604 : BigEndianShifts[MaskOneElt & 0x7];
9605 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9606 Swap = MaskOneElt < NumHalfWords;
9607 FoundCandidate = true;
9608 break;
9609 }
9610 }
9611 }
9612
9613 if (!FoundCandidate)
9614 return SDValue();
9615
9616 // Candidate found, construct the proper SDAG sequence with VINSERTH,
9617 // optionally with VECSHL if shift is required.
9618 if (Swap)
9619 std::swap(V1, V2);
9620 if (V2.isUndef())
9621 V2 = V1;
9622 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9623 if (ShiftElts) {
9624 // Double ShiftElts because we're left shifting on v16i8 type.
9625 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9626 DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9627 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9628 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9629 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9630 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9631 }
9632 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9633 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9634 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9635 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9636}
9637
9638/// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
9639/// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise
9640/// return the default SDValue.
9641SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN,
9642 SelectionDAG &DAG) const {
9643 // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles
9644 // to v16i8. Peek through the bitcasts to get the actual operands.
9645 SDValue LHS = peekThroughBitcasts(SVN->getOperand(0));
9646 SDValue RHS = peekThroughBitcasts(SVN->getOperand(1));
9647
9648 auto ShuffleMask = SVN->getMask();
9649 SDValue VecShuffle(SVN, 0);
9650 SDLoc DL(SVN);
9651
9652 // Check that we have a four byte shuffle.
9653 if (!isNByteElemShuffleMask(SVN, 4, 1))
9654 return SDValue();
9655
9656 // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx.
9657 if (RHS->getOpcode() != ISD::BUILD_VECTOR) {
9658 std::swap(LHS, RHS);
9659 VecShuffle = DAG.getCommutedVectorShuffle(*SVN);
9660 ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask();
9661 }
9662
9663 // Ensure that the RHS is a vector of constants.
9664 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
9665 if (!BVN)
9666 return SDValue();
9667
9668 // Check if RHS is a splat of 4-bytes (or smaller).
9669 APInt APSplatValue, APSplatUndef;
9670 unsigned SplatBitSize;
9671 bool HasAnyUndefs;
9672 if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize,
9673 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
9674 SplatBitSize > 32)
9675 return SDValue();
9676
9677 // Check that the shuffle mask matches the semantics of XXSPLTI32DX.
9678 // The instruction splats a constant C into two words of the source vector
9679 // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }.
9680 // Thus we check that the shuffle mask is the equivalent of
9681 // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively.
9682 // Note: the check above of isNByteElemShuffleMask() ensures that the bytes
9683 // within each word are consecutive, so we only need to check the first byte.
9684 SDValue Index;
9685 bool IsLE = Subtarget.isLittleEndian();
9686 if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
9687 (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
9688 ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
9689 Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32);
9690 else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
9691 (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
9692 ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
9693 Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32);
9694 else
9695 return SDValue();
9696
9697 // If the splat is narrower than 32-bits, we need to get the 32-bit value
9698 // for XXSPLTI32DX.
9699 unsigned SplatVal = APSplatValue.getZExtValue();
9700 for (; SplatBitSize < 32; SplatBitSize <<= 1)
9701 SplatVal |= (SplatVal << SplatBitSize);
9702
9703 SDValue SplatNode = DAG.getNode(
9704 PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS),
9705 Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32));
9706 return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode);
9707}
9708
9709/// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8).
9710/// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is
9711/// a multiple of 8. Otherwise convert it to a scalar rotation(i128)
9712/// i.e (or (shl x, C1), (srl x, 128-C1)).
9713SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
9714 assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL")(static_cast<void> (0));
9715 assert(Op.getValueType() == MVT::v1i128 &&(static_cast<void> (0))
9716 "Only set v1i128 as custom, other type shouldn't reach here!")(static_cast<void> (0));
9717 SDLoc dl(Op);
9718 SDValue N0 = peekThroughBitcasts(Op.getOperand(0));
9719 SDValue N1 = peekThroughBitcasts(Op.getOperand(1));
9720 unsigned SHLAmt = N1.getConstantOperandVal(0);
9721 if (SHLAmt % 8 == 0) {
9722 SmallVector<int, 16> Mask(16, 0);
9723 std::iota(Mask.begin(), Mask.end(), 0);
9724 std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end());
9725 if (SDValue Shuffle =
9726 DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0),
9727 DAG.getUNDEF(MVT::v16i8), Mask))
9728 return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
9729 }
9730 SDValue ArgVal = DAG.getBitcast(MVT::i128, N0);
9731 SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal,
9732 DAG.getConstant(SHLAmt, dl, MVT::i32));
9733 SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal,
9734 DAG.getConstant(128 - SHLAmt, dl, MVT::i32));
9735 SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp);
9736 return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
9737}
9738
9739/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
9740/// is a shuffle we can handle in a single instruction, return it. Otherwise,
9741/// return the code it can be lowered into. Worst case, it can always be
9742/// lowered into a vperm.
9743SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
9744 SelectionDAG &DAG) const {
9745 SDLoc dl(Op);
9746 SDValue V1 = Op.getOperand(0);
9747 SDValue V2 = Op.getOperand(1);
9748 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9749
9750 // Any nodes that were combined in the target-independent combiner prior
9751 // to vector legalization will not be sent to the target combine. Try to
9752 // combine it here.
9753 if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
9754 if (!isa<ShuffleVectorSDNode>(NewShuffle))
9755 return NewShuffle;
9756 Op = NewShuffle;
9757 SVOp = cast<ShuffleVectorSDNode>(Op);
9758 V1 = Op.getOperand(0);
9759 V2 = Op.getOperand(1);
9760 }
9761 EVT VT = Op.getValueType();
9762 bool isLittleEndian = Subtarget.isLittleEndian();
9763
9764 unsigned ShiftElts, InsertAtByte;
9765 bool Swap = false;
9766
9767 // If this is a load-and-splat, we can do that with a single instruction
9768 // in some cases. However if the load has multiple uses, we don't want to
9769 // combine it because that will just produce multiple loads.
9770 bool IsPermutedLoad = false;
9771 const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad);
9772 if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
9773 (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
9774 InputLoad->hasOneUse()) {
9775 bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
9776 int SplatIdx =
9777 PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
9778
9779 // The splat index for permuted loads will be in the left half of the vector
9780 // which is strictly wider than the loaded value by 8 bytes. So we need to
9781 // adjust the splat index to point to the correct address in memory.
9782 if (IsPermutedLoad) {
9783 assert((isLittleEndian || IsFourByte) &&(static_cast<void> (0))
9784 "Unexpected size for permuted load on big endian target")(static_cast<void> (0));
9785 SplatIdx += IsFourByte ? 2 : 1;
9786 assert((SplatIdx < (IsFourByte ? 4 : 2)) &&(static_cast<void> (0))
9787 "Splat of a value outside of the loaded memory")(static_cast<void> (0));
9788 }
9789
9790 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9791 // For 4-byte load-and-splat, we need Power9.
9792 if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
9793 uint64_t Offset = 0;
9794 if (IsFourByte)
9795 Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
9796 else
9797 Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
9798
9799 // If the width of the load is the same as the width of the splat,
9800 // loading with an offset would load the wrong memory.
9801 if (LD->getValueType(0).getSizeInBits() == (IsFourByte ? 32 : 64))
9802 Offset = 0;
9803
9804 SDValue BasePtr = LD->getBasePtr();
9805 if (Offset != 0)
9806 BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
9807 BasePtr, DAG.getIntPtrConstant(Offset, dl));
9808 SDValue Ops[] = {
9809 LD->getChain(), // Chain
9810 BasePtr, // BasePtr
9811 DAG.getValueType(Op.getValueType()) // VT
9812 };
9813 SDVTList VTL =
9814 DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
9815 SDValue LdSplt =
9816 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
9817 Ops, LD->getMemoryVT(), LD->getMemOperand());
9818 DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1), LdSplt.getValue(1));
9819 if (LdSplt.getValueType() != SVOp->getValueType(0))
9820 LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
9821 return LdSplt;
9822 }
9823 }
9824 if (Subtarget.hasP9Vector() &&
9825 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
9826 isLittleEndian)) {
9827 if (Swap)
9828 std::swap(V1, V2);
9829 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9830 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
9831 if (ShiftElts) {
9832 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
9833 DAG.getConstant(ShiftElts, dl, MVT::i32));
9834 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
9835 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9836 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9837 }
9838 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
9839 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9840 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9841 }
9842
9843 if (Subtarget.hasPrefixInstrs()) {
9844 SDValue SplatInsertNode;
9845 if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
9846 return SplatInsertNode;
9847 }
9848
9849 if (Subtarget.hasP9Altivec()) {
9850 SDValue NewISDNode;
9851 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
9852 return NewISDNode;
9853
9854 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
9855 return NewISDNode;
9856 }
9857
9858 if (Subtarget.hasVSX() &&
9859 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9860 if (Swap)
9861 std::swap(V1, V2);
9862 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9863 SDValue Conv2 =
9864 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
9865
9866 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
9867 DAG.getConstant(ShiftElts, dl, MVT::i32));
9868 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
9869 }
9870
9871 if (Subtarget.hasVSX() &&
9872 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9873 if (Swap)
9874 std::swap(V1, V2);
9875 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9876 SDValue Conv2 =
9877 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
9878
9879 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
9880 DAG.getConstant(ShiftElts, dl, MVT::i32));
9881 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
9882 }
9883
9884 if (Subtarget.hasP9Vector()) {
9885 if (PPC::isXXBRHShuffleMask(SVOp)) {
9886 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9887 SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
9888 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
9889 } else if (PPC::isXXBRWShuffleMask(SVOp)) {
9890 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9891 SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
9892 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
9893 } else if (PPC::isXXBRDShuffleMask(SVOp)) {
9894 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9895 SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
9896 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
9897 } else if (PPC::isXXBRQShuffleMask(SVOp)) {
9898 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
9899 SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
9900 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
9901 }
9902 }
9903
9904 if (Subtarget.hasVSX()) {
9905 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
9906 int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
9907
9908 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9909 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
9910 DAG.getConstant(SplatIdx, dl, MVT::i32));
9911 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
9912 }
9913
9914 // Left shifts of 8 bytes are actually swaps. Convert accordingly.
9915 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
9916 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
9917 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
9918 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
9919 }
9920 }
9921
9922 // Cases that are handled by instructions that take permute immediates
9923 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
9924 // selected by the instruction selector.
9925 if (V2.isUndef()) {
9926 if (PPC::isSplatShuffleMask(SVOp, 1) ||
9927 PPC::isSplatShuffleMask(SVOp, 2) ||
9928 PPC::isSplatShuffleMask(SVOp, 4) ||
9929 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
9930 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
9931 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
9932 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
9933 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
9934 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
9935 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
9936 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
9937 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
9938 (Subtarget.hasP8Altivec() && (
9939 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
9940 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
9941 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
9942 return Op;
9943 }
9944 }
9945
9946 // Altivec has a variety of "shuffle immediates" that take two vector inputs
9947 // and produce a fixed permutation. If any of these match, do not lower to
9948 // VPERM.
9949 unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
9950 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9951 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9952 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
9953 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9954 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9955 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9956 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9957 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9958 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9959 (Subtarget.hasP8Altivec() && (
9960 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9961 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
9962 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
9963 return Op;
9964
9965 // Check to see if this is a shuffle of 4-byte values. If so, we can use our
9966 // perfect shuffle table to emit an optimal matching sequence.
9967 ArrayRef<int> PermMask = SVOp->getMask();
9968
9969 unsigned PFIndexes[4];
9970 bool isFourElementShuffle = true;
9971 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
9972 unsigned EltNo = 8; // Start out undef.
9973 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte.
9974 if (PermMask[i*4+j] < 0)
9975 continue; // Undef, ignore it.
9976
9977 unsigned ByteSource = PermMask[i*4+j];
9978 if ((ByteSource & 3) != j) {
9979 isFourElementShuffle = false;
9980 break;
9981 }
9982
9983 if (EltNo == 8) {
9984 EltNo = ByteSource/4;
9985 } else if (EltNo != ByteSource/4) {
9986 isFourElementShuffle = false;
9987 break;
9988 }
9989 }
9990 PFIndexes[i] = EltNo;
9991 }
9992
9993 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
9994 // perfect shuffle vector to determine if it is cost effective to do this as
9995 // discrete instructions, or whether we should use a vperm.
9996 // For now, we skip this for little endian until such time as we have a
9997 // little-endian perfect shuffle table.
9998 if (isFourElementShuffle && !isLittleEndian) {
9999 // Compute the index in the perfect shuffle table.
10000 unsigned PFTableIndex =
10001 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
10002
10003 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
10004 unsigned Cost = (PFEntry >> 30);
10005
10006 // Determining when to avoid vperm is tricky. Many things affect the cost
10007 // of vperm, particularly how many times the perm mask needs to be computed.
10008 // For example, if the perm mask can be hoisted out of a loop or is already
10009 // used (perhaps because there are multiple permutes with the same shuffle
10010 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of
10011 // the loop requires an extra register.
10012 //
10013 // As a compromise, we only emit discrete instructions if the shuffle can be
10014 // generated in 3 or fewer operations. When we have loop information
10015 // available, if this block is within a loop, we should avoid using vperm
10016 // for 3-operation perms and use a constant pool load instead.
10017 if (Cost < 3)
10018 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
10019 }
10020
10021 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
10022 // vector that will get spilled to the constant pool.
10023 if (V2.isUndef()) V2 = V1;
10024
10025 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
10026 // that it is in input element units, not in bytes. Convert now.
10027
10028 // For little endian, the order of the input vectors is reversed, and
10029 // the permutation mask is complemented with respect to 31. This is
10030 // necessary to produce proper semantics with the big-endian-biased vperm
10031 // instruction.
10032 EVT EltVT = V1.getValueType().getVectorElementType();
10033 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
10034
10035 SmallVector<SDValue, 16> ResultMask;
10036 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
10037 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
10038
10039 for (unsigned j = 0; j != BytesPerElement; ++j)
10040 if (isLittleEndian)
10041 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
10042 dl, MVT::i32));
10043 else
10044 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
10045 MVT::i32));
10046 }
10047
10048 ShufflesHandledWithVPERM++;
10049 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
10050 LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n")do { } while (false);
10051 LLVM_DEBUG(SVOp->dump())do { } while (false);
10052 LLVM_DEBUG(dbgs() << "With the following permute control vector:\n")do { } while (false);
10053 LLVM_DEBUG(VPermMask.dump())do { } while (false);
10054
10055 if (isLittleEndian)
10056 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
10057 V2, V1, VPermMask);
10058 else
10059 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
10060 V1, V2, VPermMask);
10061}
10062
10063/// getVectorCompareInfo - Given an intrinsic, return false if it is not a
10064/// vector comparison. If it is, return true and fill in Opc/isDot with
10065/// information about the intrinsic.
10066static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
10067 bool &isDot, const PPCSubtarget &Subtarget) {
10068 unsigned IntrinsicID =
10069 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
10070 CompareOpc = -1;
10071 isDot = false;
10072 switch (IntrinsicID) {
10073 default:
10074 return false;
10075 // Comparison predicates.
10076 case Intrinsic::ppc_altivec_vcmpbfp_p:
10077 CompareOpc = 966;
10078 isDot = true;
10079 break;
10080 case Intrinsic::ppc_altivec_vcmpeqfp_p:
10081 CompareOpc = 198;
10082 isDot = true;
10083 break;
10084 case Intrinsic::ppc_altivec_vcmpequb_p:
10085 CompareOpc = 6;
10086 isDot = true;
10087 break;
10088 case Intrinsic::ppc_altivec_vcmpequh_p:
10089 CompareOpc = 70;
10090 isDot = true;
10091 break;
10092 case Intrinsic::ppc_altivec_vcmpequw_p:
10093 CompareOpc = 134;
10094 isDot = true;
10095 break;
10096 case Intrinsic::ppc_altivec_vcmpequd_p:
10097 if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
10098 CompareOpc = 199;
10099 isDot = true;
10100 } else
10101 return false;
10102 break;
10103 case Intrinsic::ppc_altivec_vcmpneb_p:
10104 case Intrinsic::ppc_altivec_vcmpneh_p:
10105 case Intrinsic::ppc_altivec_vcmpnew_p:
10106 case Intrinsic::ppc_altivec_vcmpnezb_p:
10107 case Intrinsic::ppc_altivec_vcmpnezh_p:
10108 case Intrinsic::ppc_altivec_vcmpnezw_p:
10109 if (Subtarget.hasP9Altivec()) {
10110 switch (IntrinsicID) {
10111 default:
10112 llvm_unreachable("Unknown comparison intrinsic.")__builtin_unreachable();
10113 case Intrinsic::ppc_altivec_vcmpneb_p:
10114 CompareOpc = 7;
10115 break;
10116 case Intrinsic::ppc_altivec_vcmpneh_p:
10117 CompareOpc = 71;
10118 break;
10119 case Intrinsic::ppc_altivec_vcmpnew_p:
10120 CompareOpc = 135;
10121 break;
10122 case Intrinsic::ppc_altivec_vcmpnezb_p:
10123 CompareOpc = 263;
10124 break;
10125 case Intrinsic::ppc_altivec_vcmpnezh_p:
10126 CompareOpc = 327;
10127 break;
10128 case Intrinsic::ppc_altivec_vcmpnezw_p:
10129 CompareOpc = 391;
10130 break;
10131 }
10132 isDot = true;
10133 } else
10134 return false;
10135 break;
10136 case Intrinsic::ppc_altivec_vcmpgefp_p:
10137 CompareOpc = 454;
10138 isDot = true;
10139 break;
10140 case Intrinsic::ppc_altivec_vcmpgtfp_p:
10141 CompareOpc = 710;
10142 isDot = true;
10143 break;
10144 case Intrinsic::ppc_altivec_vcmpgtsb_p:
10145 CompareOpc = 774;
10146 isDot = true;
10147 break;
10148 case Intrinsic::ppc_altivec_vcmpgtsh_p:
10149 CompareOpc = 838;
10150 isDot = true;
10151 break;
10152 case Intrinsic::ppc_altivec_vcmpgtsw_p:
10153 CompareOpc = 902;
10154 isDot = true;
10155 break;
10156 case Intrinsic::ppc_altivec_vcmpgtsd_p:
10157 if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
10158 CompareOpc = 967;
10159 isDot = true;
10160 } else
10161 return false;
10162 break;
10163 case Intrinsic::ppc_altivec_vcmpgtub_p:
10164 CompareOpc = 518;
10165 isDot = true;
10166 break;
10167 case Intrinsic::ppc_altivec_vcmpgtuh_p:
10168 CompareOpc = 582;
10169 isDot = true;
10170 break;
10171 case Intrinsic::ppc_altivec_vcmpgtuw_p:
10172 CompareOpc = 646;
10173 isDot = true;
10174 break;
10175 case Intrinsic::ppc_altivec_vcmpgtud_p:
10176 if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
10177 CompareOpc = 711;
10178 isDot = true;
10179 } else
10180 return false;
10181 break;
10182
10183 case Intrinsic::ppc_altivec_vcmpequq:
10184 case Intrinsic::ppc_altivec_vcmpgtsq:
10185 case Intrinsic::ppc_altivec_vcmpgtuq:
10186 if (!Subtarget.isISA3_1())
10187 return false;
10188 switch (IntrinsicID) {
10189 default:
10190 llvm_unreachable("Unknown comparison intrinsic.")__builtin_unreachable();
10191 case Intrinsic::ppc_altivec_vcmpequq:
10192 CompareOpc = 455;
10193 break;
10194 case Intrinsic::ppc_altivec_vcmpgtsq:
10195 CompareOpc = 903;
10196 break;
10197 case Intrinsic::ppc_altivec_vcmpgtuq:
10198 CompareOpc = 647;
10199 break;
10200 }
10201 break;
10202
10203 // VSX predicate comparisons use the same infrastructure
10204 case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10205 case Intrinsic::ppc_vsx_xvcmpgedp_p:
10206 case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10207 case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10208 case Intrinsic::ppc_vsx_xvcmpgesp_p:
10209 case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10210 if (Subtarget.hasVSX()) {
10211 switch (IntrinsicID) {
10212 case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10213 CompareOpc = 99;
10214 break;
10215 case Intrinsic::ppc_vsx_xvcmpgedp_p:
10216 CompareOpc = 115;
10217 break;
10218 case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10219 CompareOpc = 107;
10220 break;
10221 case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10222 CompareOpc = 67;
10223 break;
10224 case Intrinsic::ppc_vsx_xvcmpgesp_p:
10225 CompareOpc = 83;
10226 break;
10227 case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10228 CompareOpc = 75;
10229 break;
10230 }
10231 isDot = true;
10232 } else
10233 return false;
10234 break;
10235
10236 // Normal Comparisons.
10237 case Intrinsic::ppc_altivec_vcmpbfp:
10238 CompareOpc = 966;
10239 break;
10240 case Intrinsic::ppc_altivec_vcmpeqfp:
10241 CompareOpc = 198;
10242 break;
10243 case Intrinsic::ppc_altivec_vcmpequb:
10244 CompareOpc = 6;
10245 break;
10246 case Intrinsic::ppc_altivec_vcmpequh:
10247 CompareOpc = 70;
10248 break;
10249 case Intrinsic::ppc_altivec_vcmpequw:
10250 CompareOpc = 134;
10251 break;
10252 case Intrinsic::ppc_altivec_vcmpequd:
10253 if (Subtarget.hasP8Altivec())
10254 CompareOpc = 199;
10255 else
10256 return false;
10257 break;
10258 case Intrinsic::ppc_altivec_vcmpneb:
10259 case Intrinsic::ppc_altivec_vcmpneh:
10260 case Intrinsic::ppc_altivec_vcmpnew:
10261 case Intrinsic::ppc_altivec_vcmpnezb:
10262 case Intrinsic::ppc_altivec_vcmpnezh:
10263 case Intrinsic::ppc_altivec_vcmpnezw:
10264 if (Subtarget.hasP9Altivec())
10265 switch (IntrinsicID) {
10266 default:
10267 llvm_unreachable("Unknown comparison intrinsic.")__builtin_unreachable();
10268 case Intrinsic::ppc_altivec_vcmpneb:
10269 CompareOpc = 7;
10270 break;
10271 case Intrinsic::ppc_altivec_vcmpneh:
10272 CompareOpc = 71;
10273 break;
10274 case Intrinsic::ppc_altivec_vcmpnew:
10275 CompareOpc = 135;
10276 break;
10277 case Intrinsic::ppc_altivec_vcmpnezb:
10278 CompareOpc = 263;
10279 break;
10280 case Intrinsic::ppc_altivec_vcmpnezh:
10281 CompareOpc = 327;
10282 break;
10283 case Intrinsic::ppc_altivec_vcmpnezw:
10284 CompareOpc = 391;
10285 break;
10286 }
10287 else
10288 return false;
10289 break;
10290 case Intrinsic::ppc_altivec_vcmpgefp:
10291 CompareOpc = 454;
10292 break;
10293 case Intrinsic::ppc_altivec_vcmpgtfp:
10294 CompareOpc = 710;
10295 break;
10296 case Intrinsic::ppc_altivec_vcmpgtsb:
10297 CompareOpc = 774;
10298 break;
10299 case Intrinsic::ppc_altivec_vcmpgtsh:
10300 CompareOpc = 838;
10301 break;
10302 case Intrinsic::ppc_altivec_vcmpgtsw:
10303 CompareOpc = 902;
10304 break;
10305 case Intrinsic::ppc_altivec_vcmpgtsd:
10306 if (Subtarget.hasP8Altivec())
10307 CompareOpc = 967;
10308 else
10309 return false;
10310 break;
10311 case Intrinsic::ppc_altivec_vcmpgtub:
10312 CompareOpc = 518;
10313 break;
10314 case Intrinsic::ppc_altivec_vcmpgtuh:
10315 CompareOpc = 582;
10316 break;
10317 case Intrinsic::ppc_altivec_vcmpgtuw:
10318 CompareOpc = 646;
10319 break;
10320 case Intrinsic::ppc_altivec_vcmpgtud:
10321 if (Subtarget.hasP8Altivec())
10322 CompareOpc = 711;
10323 else
10324 return false;
10325 break;
10326 case Intrinsic::ppc_altivec_vcmpequq_p:
10327 case Intrinsic::ppc_altivec_vcmpgtsq_p:
10328 case Intrinsic::ppc_altivec_vcmpgtuq_p:
10329 if (!Subtarget.isISA3_1())
10330 return false;
10331 switch (IntrinsicID) {
10332 default:
10333 llvm_unreachable("Unknown comparison intrinsic.")__builtin_unreachable();
10334 case Intrinsic::ppc_altivec_vcmpequq_p:
10335 CompareOpc = 455;
10336 break;
10337 case Intrinsic::ppc_altivec_vcmpgtsq_p:
10338 CompareOpc = 903;
10339 break;
10340 case Intrinsic::ppc_altivec_vcmpgtuq_p:
10341 CompareOpc = 647;
10342 break;
10343 }
10344 isDot = true;
10345 break;
10346 }
10347 return true;
10348}
10349
10350/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
10351/// lower, do it, otherwise return null.
10352SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
10353 SelectionDAG &DAG) const {
10354 unsigned IntrinsicID =
10355 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
10356
10357 SDLoc dl(Op);
10358
10359 switch (IntrinsicID) {
10360 case Intrinsic::thread_pointer:
10361 // Reads the thread pointer register, used for __builtin_thread_pointer.
10362 if (Subtarget.isPPC64())
10363 return DAG.getRegister(PPC::X13, MVT::i64);
10364 return DAG.getRegister(PPC::R2, MVT::i32);
10365
10366 case Intrinsic::ppc_mma_disassemble_acc:
10367 case Intrinsic::ppc_vsx_disassemble_pair: {
10368 int NumVecs = 2;
10369 SDValue WideVec = Op.getOperand(1);
10370 if (IntrinsicID == Intrinsic::ppc_mma_disassemble_acc) {
10371 NumVecs = 4;
10372 WideVec = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, WideVec);
10373 }
10374 SmallVector<SDValue, 4> RetOps;
10375 for (int VecNo = 0; VecNo < NumVecs; VecNo++) {
10376 SDValue Extract = DAG.getNode(
10377 PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, WideVec,
10378 DAG.getConstant(Subtarget.isLittleEndian() ? NumVecs - 1 - VecNo
10379 : VecNo,
10380 dl, getPointerTy(DAG.getDataLayout())));
10381 RetOps.push_back(Extract);
10382 }
10383 return DAG.getMergeValues(RetOps, dl);
10384 }
10385 }
10386
10387 // If this is a lowered altivec predicate compare, CompareOpc is set to the
10388 // opcode number of the comparison.
10389 int CompareOpc;
10390 bool isDot;
10391 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
10392 return SDValue(); // Don't custom lower most intrinsics.
10393
10394 // If this is a non-dot comparison, make the VCMP node and we are done.
10395 if (!isDot) {
10396 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
10397 Op.getOperand(1), Op.getOperand(2),
10398 DAG.getConstant(CompareOpc, dl, MVT::i32));
10399 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
10400 }
10401
10402 // Create the PPCISD altivec 'dot' comparison node.
10403 SDValue Ops[] = {
10404 Op.getOperand(2), // LHS
10405 Op.getOperand(3), // RHS
10406 DAG.getConstant(CompareOpc, dl, MVT::i32)
10407 };
10408 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
10409 SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
10410
10411 // Now that we have the comparison, emit a copy from the CR to a GPR.
10412 // This is flagged to the above dot comparison.
10413 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
10414 DAG.getRegister(PPC::CR6, MVT::i32),
10415 CompNode.getValue(1));
10416
10417 // Unpack the result based on how the target uses it.
10418 unsigned BitNo; // Bit # of CR6.
10419 bool InvertBit; // Invert result?
10420 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
10421 default: // Can't happen, don't crash on invalid number though.
10422 case 0: // Return the value of the EQ bit of CR6.
10423 BitNo = 0; InvertBit = false;
10424 break;
10425 case 1: // Return the inverted value of the EQ bit of CR6.
10426 BitNo = 0; InvertBit = true;
10427 break;
10428 case 2: // Return the value of the LT bit of CR6.
10429 BitNo = 2; InvertBit = false;
10430 break;
10431 case 3: // Return the inverted value of the LT bit of CR6.
10432 BitNo = 2; InvertBit = true;
10433 break;
10434 }
10435
10436 // Shift the bit into the low position.
10437 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
10438 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
10439 // Isolate the bit.
10440 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
10441 DAG.getConstant(1, dl, MVT::i32));
10442
10443 // If we are supposed to, toggle the bit.
10444 if (InvertBit)
10445 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
10446 DAG.getConstant(1, dl, MVT::i32));
10447 return Flags;
10448}
10449
10450SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10451 SelectionDAG &DAG) const {
10452 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
10453 // the beginning of the argument list.
10454 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
10455 SDLoc DL(Op);
10456 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
10457 case Intrinsic::ppc_cfence: {
10458 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.")(static_cast<void> (0));
10459 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.")(static_cast<void> (0));
10460 SDValue Val = Op.getOperand(ArgStart + 1);
10461 EVT Ty = Val.getValueType();
10462 if (Ty == MVT::i128) {
10463 // FIXME: Testing one of two paired registers is sufficient to guarantee
10464 // ordering?
10465 Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, Val);
10466 }
10467 return SDValue(
10468 DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
10469 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Val),
10470 Op.getOperand(0)),
10471 0);
10472 }
10473 default:
10474 break;
10475 }
10476 return SDValue();
10477}
10478
10479// Lower scalar BSWAP64 to xxbrd.
10480SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
10481 SDLoc dl(Op);
10482 if (!Subtarget.isPPC64())
10483 return Op;
10484 // MTVSRDD
10485 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
10486 Op.getOperand(0));
10487 // XXBRD
10488 Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
10489 // MFVSRD
10490 int VectorIndex = 0;
10491 if (Subtarget.isLittleEndian())
10492 VectorIndex = 1;
10493 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
10494 DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
10495 return Op;
10496}
10497
10498// ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
10499// compared to a value that is atomically loaded (atomic loads zero-extend).
10500SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
10501 SelectionDAG &DAG) const {
10502 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&(static_cast<void> (0))
10503 "Expecting an atomic compare-and-swap here.")(static_cast<void> (0));
10504 SDLoc dl(Op);
10505 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
10506 EVT MemVT = AtomicNode->getMemoryVT();
10507 if (MemVT.getSizeInBits() >= 32)
10508 return Op;
10509
10510 SDValue CmpOp = Op.getOperand(2);
10511 // If this is already correctly zero-extended, leave it alone.
10512 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
10513 if (DAG.MaskedValueIsZero(CmpOp, HighBits))
10514 return Op;
10515
10516 // Clear the high bits of the compare operand.
10517 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
10518 SDValue NewCmpOp =
10519 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
10520 DAG.getConstant(MaskVal, dl, MVT::i32));
10521
10522 // Replace the existing compare operand with the properly zero-extended one.
10523 SmallVector<SDValue, 4> Ops;
10524 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
10525 Ops.push_back(AtomicNode->getOperand(i));
10526 Ops[2] = NewCmpOp;
10527 MachineMemOperand *MMO = AtomicNode->getMemOperand();
10528 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
10529 auto NodeTy =
10530 (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
10531 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
10532}
10533
10534SDValue PPCTargetLowering::LowerATOMIC_LOAD_STORE(SDValue Op,
10535 SelectionDAG &DAG) const {
10536 AtomicSDNode *N = cast<AtomicSDNode>(Op.getNode());
10537 EVT MemVT = N->getMemoryVT();
10538 assert(MemVT.getSimpleVT() == MVT::i128 &&(static_cast<void> (0))
10539 "Expect quadword atomic operations")(static_cast<void> (0));
10540 SDLoc dl(N);
10541 unsigned Opc = N->getOpcode();
10542 switch (Opc) {
10543 case ISD::ATOMIC_LOAD: {
10544 // Lower quadword atomic load to int_ppc_atomic_load_i128 which will be
10545 // lowered to ppc instructions by pattern matching instruction selector.
10546 SDVTList Tys = DAG.getVTList(MVT::i64, MVT::i64, MVT::Other);
10547 SmallVector<SDValue, 4> Ops{
10548 N->getOperand(0),
10549 DAG.getConstant(Intrinsic::ppc_atomic_load_i128, dl, MVT::i32)};
10550 for (int I = 1, E = N->getNumOperands(); I < E; ++I)
10551 Ops.push_back(N->getOperand(I));
10552 SDValue LoadedVal = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, Tys,
10553 Ops, MemVT, N->getMemOperand());
10554 SDValue ValLo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i128, LoadedVal);
10555 SDValue ValHi =
10556 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i128, LoadedVal.getValue(1));
10557 ValHi = DAG.getNode(ISD::SHL, dl, MVT::i128, ValHi,
10558 DAG.getConstant(64, dl, MVT::i32));
10559 SDValue Val =
10560 DAG.getNode(ISD::OR, dl, {MVT::i128, MVT::Other}, {ValLo, ValHi});
10561 return DAG.getNode(ISD::MERGE_VALUES, dl, {MVT::i128, MVT::Other},
10562 {Val, LoadedVal.getValue(2)});
10563 }
10564 case ISD::ATOMIC_STORE: {
10565 // Lower quadword atomic store to int_ppc_atomic_store_i128 which will be
10566 // lowered to ppc instructions by pattern matching instruction selector.
10567 SDVTList Tys = DAG.getVTList(MVT::Other);
10568 SmallVector<SDValue, 4> Ops{
10569 N->getOperand(0),
10570 DAG.getConstant(Intrinsic::ppc_atomic_store_i128, dl, MVT::i32)};
10571 SDValue Val = N->getOperand(2);
10572 SDValue ValLo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i64, Val);
10573 SDValue ValHi = DAG.getNode(ISD::SRL, dl, MVT::i128, Val,
10574 DAG.getConstant(64, dl, MVT::i32));
10575 ValHi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i64, ValHi);
10576 Ops.push_back(ValLo);
10577 Ops.push_back(ValHi);
10578 Ops.push_back(N->getOperand(1));
10579 return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, dl, Tys, Ops, MemVT,
10580 N->getMemOperand());
10581 }
10582 default:
10583 llvm_unreachable("Unexpected atomic opcode")__builtin_unreachable();
10584 }
10585}
10586
10587SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
10588 SelectionDAG &DAG) const {
10589 SDLoc dl(Op);
10590 // Create a stack slot that is 16-byte aligned.
10591 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10592 int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10593 EVT PtrVT = getPointerTy(DAG.getDataLayout());
10594 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10595
10596 // Store the input value into Value#0 of the stack slot.
10597 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
10598 MachinePointerInfo());
10599 // Load it out.
10600 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
10601}
10602
10603SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
10604 SelectionDAG &DAG) const {
10605 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&(static_cast<void> (0))
10606 "Should only be called for ISD::INSERT_VECTOR_ELT")(static_cast<void> (0));
10607
10608 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
10609
10610 EVT VT = Op.getValueType();
10611 SDLoc dl(Op);
10612 SDValue V1 = Op.getOperand(0);
10613 SDValue V2 = Op.getOperand(1);
10614 SDValue V3 = Op.getOperand(2);
10615
10616 if (VT == MVT::v2f64 && C)
10617 return Op;
10618
10619 if (Subtarget.isISA3_1()) {
10620 if ((VT == MVT::v2i64 || VT == MVT::v2f64) && !Subtarget.isPPC64())
10621 return SDValue();
10622 // On P10, we have legal lowering for constant and variable indices for
10623 // integer vectors.
10624 if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
10625 VT == MVT::v2i64)
10626 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, V2, V3);
10627 // For f32 and f64 vectors, we have legal lowering for variable indices.
10628 // For f32 we also have legal lowering when the element is loaded from
10629 // memory.
10630 if (VT == MVT::v4f32 || VT == MVT::v2f64) {
10631 if (!C || (VT == MVT::v4f32 && dyn_cast<LoadSDNode>(V2)))
10632 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, V2, V3);
10633 return Op;
10634 }
10635 }
10636
10637 // Before P10, we have legal lowering for constant indices but not for
10638 // variable ones.
10639 if (!C)
10640 return SDValue();
10641
10642 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
10643 if (VT == MVT::v8i16 || VT == MVT::v16i8) {
10644 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
10645 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
10646 unsigned InsertAtElement = C->getZExtValue();
10647 unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
10648 if (Subtarget.isLittleEndian()) {
10649 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
10650 }
10651 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
10652 DAG.getConstant(InsertAtByte, dl, MVT::i32));
10653 }
10654 return Op;
10655}
10656
10657SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
10658 SelectionDAG &DAG) const {
10659 SDLoc dl(Op);
10660 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
10661 SDValue LoadChain = LN->getChain();
10662 SDValue BasePtr = LN->getBasePtr();
10663 EVT VT = Op.getValueType();
10664
10665 if (VT != MVT::v256i1 && VT != MVT::v512i1)
10666 return Op;
10667
10668 // Type v256i1 is used for pairs and v512i1 is used for accumulators.
10669 // Here we create 2 or 4 v16i8 loads to load the pair or accumulator value in
10670 // 2 or 4 vsx registers.
10671 assert((VT != MVT::v512i1 || Subtarget.hasMMA()) &&(static_cast<void> (0))
10672 "Type unsupported without MMA")(static_cast<void> (0));
10673 assert((VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&(static_cast<void> (0))
10674 "Type unsupported without paired vector support")(static_cast<void> (0));
10675 Align Alignment = LN->getAlign();
10676 SmallVector<SDValue, 4> Loads;
10677 SmallVector<SDValue, 4> LoadChains;
10678 unsigned NumVecs = VT.getSizeInBits() / 128;
10679 for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
10680 SDValue Load =
10681 DAG.getLoad(MVT::v16i8, dl, LoadChain, BasePtr,
10682 LN->getPointerInfo().getWithOffset(Idx * 16),
10683 commonAlignment(Alignment, Idx * 16),
10684 LN->getMemOperand()->getFlags(), LN->getAAInfo());
10685 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10686 DAG.getConstant(16, dl, BasePtr.getValueType()));
10687 Loads.push_back(Load);
10688 LoadChains.push_back(Load.getValue(1));
10689 }
10690 if (Subtarget.isLittleEndian()) {
10691 std::reverse(Loads.begin(), Loads.end());
10692 std::reverse(LoadChains.begin(), LoadChains.end());
10693 }
10694 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10695 SDValue Value =
10696 DAG.getNode(VT == MVT::v512i1 ? PPCISD::ACC_BUILD : PPCISD::PAIR_BUILD,
10697 dl, VT, Loads);
10698 SDValue RetOps[] = {Value, TF};
10699 return DAG.getMergeValues(RetOps, dl);
10700}
10701
10702SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
10703 SelectionDAG &DAG) const {
10704 SDLoc dl(Op);
10705 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
10706 SDValue StoreChain = SN->getChain();
10707 SDValue BasePtr = SN->getBasePtr();
10708 SDValue Value = SN->getValue();
10709 EVT StoreVT = Value.getValueType();
10710
10711 if (StoreVT != MVT::v256i1 && StoreVT != MVT::v512i1)
10712 return Op;
10713
10714 // Type v256i1 is used for pairs and v512i1 is used for accumulators.
10715 // Here we create 2 or 4 v16i8 stores to store the pair or accumulator
10716 // underlying registers individually.
10717 assert((StoreVT != MVT::v512i1 || Subtarget.hasMMA()) &&(static_cast<void> (0))
10718 "Type unsupported without MMA")(static_cast<void> (0));
10719 assert((StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&(static_cast<void> (0))
10720 "Type unsupported without paired vector support")(static_cast<void> (0));
10721 Align Alignment = SN->getAlign();
10722 SmallVector<SDValue, 4> Stores;
10723 unsigned NumVecs = 2;
10724 if (StoreVT == MVT::v512i1) {
10725 Value = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, Value);
10726 NumVecs = 4;
10727 }
10728 for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
10729 unsigned VecNum = Subtarget.isLittleEndian() ? NumVecs - 1 - Idx : Idx;
10730 SDValue Elt = DAG.getNode(PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, Value,
10731 DAG.getConstant(VecNum, dl, getPointerTy(DAG.getDataLayout())));
10732 SDValue Store =
10733 DAG.getStore(StoreChain, dl, Elt, BasePtr,
10734 SN->getPointerInfo().getWithOffset(Idx * 16),
10735 commonAlignment(Alignment, Idx * 16),
10736 SN->getMemOperand()->getFlags(), SN->getAAInfo());
10737 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10738 DAG.getConstant(16, dl, BasePtr.getValueType()));
10739 Stores.push_back(Store);
10740 }
10741 SDValue TF = DAG.getTokenFactor(dl, Stores);
10742 return TF;
10743}
10744
10745SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
10746 SDLoc dl(Op);
10747 if (Op.getValueType() == MVT::v4i32) {
10748 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10749
10750 SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl);
10751 // +16 as shift amt.
10752 SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl);
10753 SDValue RHSSwap = // = vrlw RHS, 16
10754 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
10755
10756 // Shrinkify inputs to v8i16.
10757 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
10758 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
10759 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
10760
10761 // Low parts multiplied together, generating 32-bit results (we ignore the
10762 // top parts).
10763 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
10764 LHS, RHS, DAG, dl, MVT::v4i32);
10765
10766 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
10767 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
10768 // Shift the high parts up 16 bits.
10769 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
10770 Neg16, DAG, dl);
10771 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
10772 } else if (Op.getValueType() == MVT::v16i8) {
10773 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10774 bool isLittleEndian = Subtarget.isLittleEndian();
10775
10776 // Multiply the even 8-bit parts, producing 16-bit sums.
10777 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
10778 LHS, RHS, DAG, dl, MVT::v8i16);
10779 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
10780
10781 // Multiply the odd 8-bit parts, producing 16-bit sums.
10782 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
10783 LHS, RHS, DAG, dl, MVT::v8i16);
10784 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
10785
10786 // Merge the results together. Because vmuleub and vmuloub are
10787 // instructions with a big-endian bias, we must reverse the
10788 // element numbering and reverse the meaning of "odd" and "even"
10789 // when generating little endian code.
10790 int Ops[16];
10791 for (unsigned i = 0; i != 8; ++i) {
10792 if (isLittleEndian) {
10793 Ops[i*2 ] = 2*i;
10794 Ops[i*2+1] = 2*i+16;
10795 } else {
10796 Ops[i*2 ] = 2*i+1;
10797 Ops[i*2+1] = 2*i+1+16;
10798 }
10799 }
10800 if (isLittleEndian)
10801 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
10802 else
10803 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
10804 } else {
10805 llvm_unreachable("Unknown mul to lower!")__builtin_unreachable();
10806 }
10807}
10808
10809SDValue PPCTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
10810 bool IsStrict = Op->isStrictFPOpcode();
10811 if (Op.getOperand(IsStrict ? 1 : 0).getValueType() == MVT::f128 &&
10812 !Subtarget.hasP9Vector())
10813 return SDValue();
10814
10815 return Op;
10816}
10817
10818// Custom lowering for fpext vf32 to v2f64
10819SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
10820
10821 assert(Op.getOpcode() == ISD::FP_EXTEND &&(static_cast<void> (0))
10822 "Should only be called for ISD::FP_EXTEND")(static_cast<void> (0));
10823
10824 // FIXME: handle extends from half precision float vectors on P9.
10825 // We only want to custom lower an extend from v2f32 to v2f64.
10826 if (Op.getValueType() != MVT::v2f64 ||
10827 Op.getOperand(0).getValueType() != MVT::v2f32)
10828 return SDValue();
10829
10830 SDLoc dl(Op);
10831 SDValue Op0 = Op.getOperand(0);
10832
10833 switch (Op0.getOpcode()) {
10834 default:
10835 return SDValue();
10836 case ISD::EXTRACT_SUBVECTOR: {
10837 assert(Op0.getNumOperands() == 2 &&(static_cast<void> (0))
10838 isa<ConstantSDNode>(Op0->getOperand(1)) &&(static_cast<void> (0))
10839 "Node should have 2 operands with second one being a constant!")(static_cast<void> (0));
10840
10841 if (Op0.getOperand(0).getValueType() != MVT::v4f32)
10842 return SDValue();
10843
10844 // Custom lower is only done for high or low doubleword.
10845 int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
10846 if (Idx % 2 != 0)
10847 return SDValue();
10848
10849 // Since input is v4f32, at this point Idx is either 0 or 2.
10850 // Shift to get the doubleword position we want.
10851 int DWord = Idx >> 1;
10852
10853 // High and low word positions are different on little endian.
10854 if (Subtarget.isLittleEndian())
10855 DWord ^= 0x1;
10856
10857 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
10858 Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
10859 }
10860 case ISD::FADD:
10861 case ISD::FMUL:
10862 case ISD::FSUB: {
10863 SDValue NewLoad[2];
10864 for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
10865 // Ensure both input are loads.
10866 SDValue LdOp = Op0.getOperand(i);
10867 if (LdOp.getOpcode() != ISD::LOAD)
10868 return SDValue();
10869 // Generate new load node.
10870 LoadSDNode *LD = cast<LoadSDNode>(LdOp);
10871 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10872 NewLoad[i] = DAG.getMemIntrinsicNode(
10873 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10874 LD->getMemoryVT(), LD->getMemOperand());
10875 }
10876 SDValue NewOp =
10877 DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
10878 NewLoad[1], Op0.getNode()->getFlags());
10879 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
10880 DAG.getConstant(0, dl, MVT::i32));
10881 }
10882 case ISD::LOAD: {
10883 LoadSDNode *LD = cast<LoadSDNode>(Op0);
10884 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10885 SDValue NewLd = DAG.getMemIntrinsicNode(
10886 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10887 LD->getMemoryVT(), LD->getMemOperand());
10888 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
10889 DAG.getConstant(0, dl, MVT::i32));
10890 }
10891 }
10892 llvm_unreachable("ERROR:Should return for all cases within swtich.")__builtin_unreachable();
10893}
10894
10895/// LowerOperation - Provide custom lowering hooks for some operations.
10896///
10897SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10898 switch (Op.getOpcode()) {
10899 default: llvm_unreachable("Wasn't expecting to be able to lower this!")__builtin_unreachable();
10900 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
10901 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
10902 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
10903 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
10904 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
10905 case ISD::STRICT_FSETCC:
10906 case ISD::STRICT_FSETCCS:
10907 case ISD::SETCC: return LowerSETCC(Op, DAG);
10908 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
10909 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
10910
10911 case ISD::INLINEASM:
10912 case ISD::INLINEASM_BR: return LowerINLINEASM(Op, DAG);
10913 // Variable argument lowering.
10914 case ISD::VASTART: return LowerVASTART(Op, DAG);
10915 case ISD::VAARG: return LowerVAARG(Op, DAG);
10916 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
10917
10918 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG);
10919 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
10920 case ISD::GET_DYNAMIC_AREA_OFFSET:
10921 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
10922
10923 // Exception handling lowering.
10924 case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG);
10925 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
10926 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
10927
10928 case ISD::LOAD: return LowerLOAD(Op, DAG);
10929 case ISD::STORE: return LowerSTORE(Op, DAG);
10930 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
10931 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
10932 case ISD::STRICT_FP_TO_UINT:
10933 case ISD::STRICT_FP_TO_SINT:
10934 case ISD::FP_TO_UINT:
10935 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
10936 case ISD::STRICT_UINT_TO_FP:
10937 case ISD::STRICT_SINT_TO_FP:
10938 case ISD::UINT_TO_FP:
10939 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
10940 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
10941
10942 // Lower 64-bit shifts.
10943 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG);
10944 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG);
10945 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG);
10946
10947 case ISD::FSHL: return LowerFunnelShift(Op, DAG);
10948 case ISD::FSHR: return LowerFunnelShift(Op, DAG);
10949
10950 // Vector-related lowering.
10951 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
10952 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
10953 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
10954 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
10955 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
10956 case ISD::MUL: return LowerMUL(Op, DAG);
10957 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
10958 case ISD::STRICT_FP_ROUND:
10959 case ISD::FP_ROUND:
10960 return LowerFP_ROUND(Op, DAG);
10961 case ISD::ROTL: return LowerROTL(Op, DAG);
10962
10963 // For counter-based loop handling.
10964 case ISD::INTRINSIC_W_CHAIN: return SDValue();
10965
10966 case ISD::BITCAST: return LowerBITCAST(Op, DAG);
10967
10968 // Frame & Return address.
10969 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
10970 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
10971
10972 case ISD::INTRINSIC_VOID:
10973 return LowerINTRINSIC_VOID(Op, DAG);
10974 case ISD::BSWAP:
10975 return LowerBSWAP(Op, DAG);
10976 case ISD::ATOMIC_CMP_SWAP:
10977 return LowerATOMIC_CMP_SWAP(Op, DAG);
10978 case ISD::ATOMIC_STORE:
10979 return LowerATOMIC_LOAD_STORE(Op, DAG);
10980 }
10981}
10982
10983void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
10984 SmallVectorImpl<SDValue>&Results,
10985 SelectionDAG &DAG) const {
10986 SDLoc dl(N);
10987 switch (N->getOpcode()) {
10988 default:
10989 llvm_unreachable("Do not know how to custom type legalize this operation!")__builtin_unreachable();
10990 case ISD::ATOMIC_LOAD: {
10991 SDValue Res = LowerATOMIC_LOAD_STORE(SDValue(N, 0), DAG);
10992 Results.push_back(Res);
10993 Results.push_back(Res.getValue(1));
10994 break;
10995 }
10996 case ISD::READCYCLECOUNTER: {
10997 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
10998 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
10999
11000 Results.push_back(
11001 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1)));
11002 Results.push_back(RTB.getValue(2));
11003 break;
11004 }
11005 case ISD::INTRINSIC_W_CHAIN: {
11006 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
11007 Intrinsic::loop_decrement)
11008 break;
11009
11010 assert(N->getValueType(0) == MVT::i1 &&(static_cast<void> (0))
11011 "Unexpected result type for CTR decrement intrinsic")(static_cast<void> (0));
11012 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
11013 N->getValueType(0));
11014 SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
11015 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
11016 N->getOperand(1));
11017
11018 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
11019 Results.push_back(NewInt.getValue(1));
11020 break;
11021 }
11022 case ISD::VAARG: {
11023 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
11024 return;
11025
11026 EVT VT = N->getValueType(0);
11027
11028 if (VT == MVT::i64) {
11029 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
11030
11031 Results.push_back(NewNode);
11032 Results.push_back(NewNode.getValue(1));
11033 }
11034 return;
11035 }
11036 case ISD::STRICT_FP_TO_SINT:
11037 case ISD::STRICT_FP_TO_UINT:
11038 case ISD::FP_TO_SINT:
11039 case ISD::FP_TO_UINT:
11040 // LowerFP_TO_INT() can only handle f32 and f64.
11041 if (N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType() ==
11042 MVT::ppcf128)
11043 return;
11044 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
11045 return;
11046 case ISD::TRUNCATE: {
11047 if (!N->getValueType(0).isVector())
11048 return;
11049 SDValue Lowered = LowerTRUNCATEVector(SDValue(N, 0), DAG);
11050 if (Lowered)
11051 Results.push_back(Lowered);
11052 return;
11053 }
11054 case ISD::FSHL:
11055 case ISD::FSHR:
11056 // Don't handle funnel shifts here.
11057 return;
11058 case ISD::BITCAST:
11059 // Don't handle bitcast here.
11060 return;
11061 case ISD::FP_EXTEND:
11062 SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
11063 if (Lowered)
11064 Results.push_back(Lowered);
11065 return;
11066 }
11067}
11068
11069//===----------------------------------------------------------------------===//
11070// Other Lowering Code
11071//===----------------------------------------------------------------------===//
11072
11073static Instruction *callIntrinsic(IRBuilderBase &Builder, Intrinsic::ID Id) {
11074 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
11075 Function *Func = Intrinsic::getDeclaration(M, Id);
11076 return Builder.CreateCall(Func, {});
11077}
11078
11079// The mappings for emitLeading/TrailingFence is taken from
11080// http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
11081Instruction *PPCTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
11082 Instruction *Inst,
11083 AtomicOrdering Ord) const {
11084 if (Ord == AtomicOrdering::SequentiallyConsistent)
11085 return callIntrinsic(Builder, Intrinsic::ppc_sync);
11086 if (isReleaseOrStronger(Ord))
11087 return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
11088 return nullptr;
11089}
11090
11091Instruction *PPCTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
11092 Instruction *Inst,
11093 AtomicOrdering Ord) const {
11094 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
11095 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
11096 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
11097 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
11098 if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
11099 return Builder.CreateCall(
11100 Intrinsic::getDeclaration(
11101 Builder.GetInsertBlock()->getParent()->getParent(),
11102 Intrinsic::ppc_cfence, {Inst->getType()}),
11103 {Inst});
11104 // FIXME: Can use isync for rmw operation.
11105 return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
11106 }
11107 return nullptr;
11108}
11109
11110MachineBasicBlock *
11111PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
11112 unsigned AtomicSize,
11113 unsigned BinOpcode,
11114 unsigned CmpOpcode,
11115 unsigned CmpPred) const {
11116 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
11117 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11118
11119 auto LoadMnemonic = PPC::LDARX;
11120 auto StoreMnemonic = PPC::STDCX;
11121 switch (AtomicSize) {
11122 default:
11123 llvm_unreachable("Unexpected size of atomic entity")__builtin_unreachable();
11124 case 1:
11125 LoadMnemonic = PPC::LBARX;
11126 StoreMnemonic = PPC::STBCX;
11127 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4")(static_cast<void> (0));
11128 break;
11129 case 2:
11130 LoadMnemonic = PPC::LHARX;
11131 StoreMnemonic = PPC::STHCX;
11132 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4")(static_cast<void> (0));
11133 break;
11134 case 4:
11135 LoadMnemonic = PPC::LWARX;
11136 StoreMnemonic = PPC::STWCX;
11137 break;
11138 case 8:
11139 LoadMnemonic = PPC::LDARX;
11140 StoreMnemonic = PPC::STDCX;
11141 break;
11142 }
11143
11144 const BasicBlock *LLVM_BB = BB->getBasicBlock();
11145 MachineFunction *F = BB->getParent();
11146 MachineFunction::iterator It = ++BB->getIterator();
11147
11148 Register dest = MI.getOperand(0).getReg();
11149 Register ptrA = MI.getOperand(1).getReg();
11150 Register ptrB = MI.getOperand(2).getReg();
11151 Register incr = MI.getOperand(3).getReg();
11152 DebugLoc dl = MI.getDebugLoc();
11153
11154 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11155 MachineBasicBlock *loop2MBB =
11156 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11157 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11158 F->insert(It, loopMBB);
11159 if (CmpOpcode)
11160 F->insert(It, loop2MBB);
11161 F->insert(It, exitMBB);
11162 exitMBB->splice(exitMBB->begin(), BB,
11163 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11164 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11165
11166 MachineRegisterInfo &RegInfo = F->getRegInfo();
11167 Register TmpReg = (!BinOpcode) ? incr :
11168 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
11169 : &PPC::GPRCRegClass);
11170
11171 // thisMBB:
11172 // ...
11173 // fallthrough --> loopMBB
11174 BB->addSuccessor(loopMBB);
11175
11176 // loopMBB:
11177 // l[wd]arx dest, ptr
11178 // add r0, dest, incr
11179 // st[wd]cx. r0, ptr
11180 // bne- loopMBB
11181 // fallthrough --> exitMBB
11182
11183 // For max/min...
11184 // loopMBB:
11185 // l[wd]arx dest, ptr
11186 // cmpl?[wd] incr, dest
11187 // bgt exitMBB
11188 // loop2MBB:
11189 // st[wd]cx. dest, ptr
11190 // bne- loopMBB
11191 // fallthrough --> exitMBB
11192
11193 BB = loopMBB;
11194 BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
11195 .addReg(ptrA).addReg(ptrB);
11196 if (BinOpcode)
11197 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
11198 if (CmpOpcode) {
11199 // Signed comparisons of byte or halfword values must be sign-extended.
11200 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
11201 Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11202 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
11203 ExtReg).addReg(dest);
11204 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11205 .addReg(incr).addReg(ExtReg);
11206 } else
11207 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11208 .addReg(incr).addReg(dest);
11209
11210 BuildMI(BB, dl, TII->get(PPC::BCC))
11211 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
11212 BB->addSuccessor(loop2MBB);
11213 BB->addSuccessor(exitMBB);
11214 BB = loop2MBB;
11215 }
11216 BuildMI(BB, dl, TII->get(StoreMnemonic))
11217 .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
11218 BuildMI(BB, dl, TII->get(PPC::BCC))
11219 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
11220 BB->addSuccessor(loopMBB);
11221 BB->addSuccessor(exitMBB);
11222
11223 // exitMBB:
11224 // ...
11225 BB = exitMBB;
11226 return BB;
11227}
11228
11229static bool isSignExtended(MachineInstr &MI, const PPCInstrInfo *TII) {
11230 switch(MI.getOpcode()) {
11231 default:
11232 return false;
11233 case PPC::COPY:
11234 return TII->isSignExtended(MI);
11235 case PPC::LHA:
11236 case PPC::LHA8:
11237 case PPC::LHAU:
11238 case PPC::LHAU8:
11239 case PPC::LHAUX:
11240 case PPC::LHAUX8:
11241 case PPC::LHAX:
11242 case PPC::LHAX8:
11243 case PPC::LWA:
11244 case PPC::LWAUX:
11245 case PPC::LWAX:
11246 case PPC::LWAX_32:
11247 case PPC::LWA_32:
11248 case PPC::PLHA:
11249 case PPC::PLHA8:
11250 case PPC::PLHA8pc:
11251 case PPC::PLHApc:
11252 case PPC::PLWA:
11253 case PPC::PLWA8:
11254 case PPC::PLWA8pc:
11255 case PPC::PLWApc:
11256 case PPC::EXTSB:
11257 case PPC::EXTSB8:
11258 case PPC::EXTSB8_32_64:
11259 case PPC::EXTSB8_rec:
11260 case PPC::EXTSB_rec:
11261 case PPC::EXTSH:
11262 case PPC::EXTSH8:
11263 case PPC::EXTSH8_32_64:
11264 case PPC::EXTSH8_rec:
11265 case PPC::EXTSH_rec:
11266 case PPC::EXTSW:
11267 case PPC::EXTSWSLI:
11268 case PPC::EXTSWSLI_32_64:
11269 case PPC::EXTSWSLI_32_64_rec:
11270 case PPC::EXTSWSLI_rec:
11271 case PPC::EXTSW_32:
11272 case PPC::EXTSW_32_64:
11273 case PPC::EXTSW_32_64_rec:
11274 case PPC::EXTSW_rec:
11275 case PPC::SRAW:
11276 case PPC::SRAWI:
11277 case PPC::SRAWI_rec:
11278 case PPC::SRAW_rec:
11279 return true;
11280 }
11281 return false;
11282}
11283
11284MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
11285 MachineInstr &MI, MachineBasicBlock *BB,
11286 bool is8bit, // operation
11287 unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
11288 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
11289 const PPCInstrInfo *TII = Subtarget.getInstrInfo();
11290
11291 // If this is a signed comparison and the value being compared is not known
11292 // to be sign extended, sign extend it here.
11293 DebugLoc dl = MI.getDebugLoc();
11294 MachineFunction *F = BB->getParent();
11295 MachineRegisterInfo &RegInfo = F->getRegInfo();
11296 Register incr = MI.getOperand(3).getReg();
11297 bool IsSignExtended = Register::isVirtualRegister(incr) &&
11298 isSignExtended(*RegInfo.getVRegDef(incr), TII);
11299
11300 if (CmpOpcode == PPC::CMPW && !IsSignExtended) {
11301 Register ValueReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11302 BuildMI(*BB, MI, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueReg)
11303 .addReg(MI.getOperand(3).getReg());
11304 MI.getOperand(3).setReg(ValueReg);
11305 }
11306 // If we support part-word atomic mnemonics, just use them
11307 if (Subtarget.hasPartwordAtomics())
11308 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
11309 CmpPred);
11310
11311 // In 64 bit mode we have to use 64 bits for addresses, even though the
11312 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address
11313 // registers without caring whether they're 32 or 64, but here we're
11314 // doing actual arithmetic on the addresses.
11315 bool is64bit = Subtarget.isPPC64();
11316 bool isLittleEndian = Subtarget.isLittleEndian();
11317 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11318
11319 const BasicBlock *LLVM_BB = BB->getBasicBlock();
11320 MachineFunction::iterator It = ++BB->getIterator();
11321
11322 Register dest = MI.getOperand(0).getReg();
11323 Register ptrA = MI.getOperand(1).getReg();
11324 Register ptrB = MI.getOperand(2).getReg();
11325
11326 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11327 MachineBasicBlock *loop2MBB =
11328 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11329 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11330 F->insert(It, loopMBB);
11331 if (CmpOpcode)
11332 F->insert(It, loop2MBB);
11333 F->insert(It, exitMBB);
11334 exitMBB->splice(exitMBB->begin(), BB,
11335 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11336 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11337
11338 const TargetRegisterClass *RC =
11339 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11340 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11341
11342 Register PtrReg = RegInfo.createVirtualRegister(RC);
11343 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11344 Register ShiftReg =
11345 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11346 Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
11347 Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11348 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11349 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11350 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11351 Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
11352 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11353 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11354 Register SrwDestReg = RegInfo.createVirtualRegister(GPRC);
11355 Register Ptr1Reg;
11356 Register TmpReg =
11357 (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
11358
11359 // thisMBB:
11360 // ...
11361 // fallthrough --> loopMBB
11362 BB->addSuccessor(loopMBB);
11363
11364 // The 4-byte load must be aligned, while a char or short may be
11365 // anywhere in the word. Hence all this nasty bookkeeping code.
11366 // add ptr1, ptrA, ptrB [copy if ptrA==0]
11367 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11368 // xori shift, shift1, 24 [16]
11369 // rlwinm ptr, ptr1, 0, 0, 29
11370 // slw incr2, incr, shift
11371 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11372 // slw mask, mask2, shift
11373 // loopMBB:
11374 // lwarx tmpDest, ptr
11375 // add tmp, tmpDest, incr2
11376 // andc tmp2, tmpDest, mask
11377 // and tmp3, tmp, mask
11378 // or tmp4, tmp3, tmp2
11379 // stwcx. tmp4, ptr
11380 // bne- loopMBB
11381 // fallthrough --> exitMBB
11382 // srw SrwDest, tmpDest, shift
11383 // rlwinm SrwDest, SrwDest, 0, 24 [16], 31
11384 if (ptrA != ZeroReg) {
11385 Ptr1Reg = RegInfo.createVirtualRegister(RC);
11386 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11387 .addReg(ptrA)
11388 .addReg(ptrB);
11389 } else {
11390 Ptr1Reg = ptrB;
11391 }
11392 // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11393 // mode.
11394 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11395 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11396 .addImm(3)
11397 .addImm(27)
11398 .addImm(is8bit ? 28 : 27);
11399 if (!isLittleEndian)
11400 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11401 .addReg(Shift1Reg)
11402 .addImm(is8bit ? 24 : 16);
11403 if (is64bit)
11404 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11405 .addReg(Ptr1Reg)
11406 .addImm(0)
11407 .addImm(61);
11408 else
11409 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11410 .addReg(Ptr1Reg)
11411 .addImm(0)
11412 .addImm(0)
11413 .addImm(29);
11414 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
11415 if (is8bit)
11416 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11417 else {
11418 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11419 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11420 .addReg(Mask3Reg)
11421 .addImm(65535);
11422 }
11423 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11424 .addReg(Mask2Reg)
11425 .addReg(ShiftReg);
11426
11427 BB = loopMBB;
11428 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11429 .addReg(ZeroReg)
11430 .addReg(PtrReg);
11431 if (BinOpcode)
11432 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
11433 .addReg(Incr2Reg)
11434 .addReg(TmpDestReg);
11435 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11436 .addReg(TmpDestReg)
11437 .addReg(MaskReg);
11438 BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
11439 if (CmpOpcode) {
11440 // For unsigned comparisons, we can directly compare the shifted values.
11441 // For signed comparisons we shift and sign extend.
11442 Register SReg = RegInfo.createVirtualRegister(GPRC);
11443 BuildMI(BB, dl, TII->get(PPC::AND), SReg)
11444 .addReg(TmpDestReg)
11445 .addReg(MaskReg);
11446 unsigned ValueReg = SReg;
11447 unsigned CmpReg = Incr2Reg;
11448 if (CmpOpcode == PPC::CMPW) {
11449 ValueReg = RegInfo.createVirtualRegister(GPRC);
11450 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
11451 .addReg(SReg)
11452 .addReg(ShiftReg);
11453 Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
11454 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
11455 .addReg(ValueReg);
11456 ValueReg = ValueSReg;
11457 CmpReg = incr;
11458 }
11459 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11460 .addReg(CmpReg)
11461 .addReg(ValueReg);
11462 BuildMI(BB, dl, TII->get(PPC::BCC))
11463 .addImm(CmpPred)
11464 .addReg(PPC::CR0)
11465 .addMBB(exitMBB);
11466 BB->addSuccessor(loop2MBB);
11467 BB->addSuccessor(exitMBB);
11468 BB = loop2MBB;
11469 }
11470 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
11471 BuildMI(BB, dl, TII->get(PPC::STWCX))
11472 .addReg(Tmp4Reg)
11473 .addReg(ZeroReg)
11474 .addReg(PtrReg);
11475 BuildMI(BB, dl, TII->get(PPC::BCC))
11476 .addImm(PPC::PRED_NE)
11477 .addReg(PPC::CR0)
11478 .addMBB(loopMBB);
11479 BB->addSuccessor(loopMBB);
11480 BB->addSuccessor(exitMBB);
11481
11482 // exitMBB:
11483 // ...
11484 BB = exitMBB;
11485 // Since the shift amount is not a constant, we need to clear
11486 // the upper bits with a separate RLWINM.
11487 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::RLWINM), dest)
11488 .addReg(SrwDestReg)
11489 .addImm(0)
11490 .addImm(is8bit ? 24 : 16)
11491 .addImm(31);
11492 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), SrwDestReg)
11493 .addReg(TmpDestReg)
11494 .addReg(ShiftReg);
11495 return BB;
11496}
11497
11498llvm::MachineBasicBlock *
11499PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
11500 MachineBasicBlock *MBB) const {
11501 DebugLoc DL = MI.getDebugLoc();
11502 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11503 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
11504
11505 MachineFunction *MF = MBB->getParent();
11506 MachineRegisterInfo &MRI = MF->getRegInfo();
11507
11508 const BasicBlock *BB = MBB->getBasicBlock();
11509 MachineFunction::iterator I = ++MBB->getIterator();
11510
11511 Register DstReg = MI.getOperand(0).getReg();
11512 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
11513 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!")(static_cast<void> (0));
11514 Register mainDstReg = MRI.createVirtualRegister(RC);
11515 Register restoreDstReg = MRI.createVirtualRegister(RC);
11516
11517 MVT PVT = getPointerTy(MF->getDataLayout());
11518 assert((PVT == MVT::i64 || PVT == MVT::i32) &&(static_cast<void> (0))
11519 "Invalid Pointer Size!")(static_cast<void> (0));
11520 // For v = setjmp(buf), we generate
11521 //
11522 // thisMBB:
11523 // SjLjSetup mainMBB
11524 // bl mainMBB
11525 // v_restore = 1
11526 // b sinkMBB
11527 //
11528 // mainMBB:
11529 // buf[LabelOffset] = LR
11530 // v_main = 0
11531 //
11532 // sinkMBB:
11533 // v = phi(main, restore)
11534 //
11535
11536 MachineBasicBlock *thisMBB = MBB;
11537 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
11538 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
11539 MF->insert(I, mainMBB);
11540 MF->insert(I, sinkMBB);
11541
11542 MachineInstrBuilder MIB;
11543
11544 // Transfer the remainder of BB and its successor edges to sinkMBB.
11545 sinkMBB->splice(sinkMBB->begin(), MBB,
11546 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11547 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
11548
11549 // Note that the structure of the jmp_buf used here is not compatible
11550 // with that used by libc, and is not designed to be. Specifically, it
11551 // stores only those 'reserved' registers that LLVM does not otherwise
11552 // understand how to spill. Also, by convention, by the time this
11553 // intrinsic is called, Clang has already stored the frame address in the
11554 // first slot of the buffer and stack address in the third. Following the
11555 // X86 target code, we'll store the jump address in the second slot. We also
11556 // need to save the TOC pointer (R2) to handle jumps between shared
11557 // libraries, and that will be stored in the fourth slot. The thread
11558 // identifier (R13) is not affected.
11559
11560 // thisMBB:
11561 const int64_t LabelOffset = 1 * PVT.getStoreSize();
11562 const int64_t TOCOffset = 3 * PVT.getStoreSize();
11563 const int64_t BPOffset = 4 * PVT.getStoreSize();
11564
11565 // Prepare IP either in reg.
11566 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
11567 Register LabelReg = MRI.createVirtualRegister(PtrRC);
11568 Register BufReg = MI.getOperand(1).getReg();
11569
11570 if (Subtarget.is64BitELFABI()) {
11571 setUsesTOCBasePtr(*MBB->getParent());
11572 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
11573 .addReg(PPC::X2)
11574 .addImm(TOCOffset)
11575 .addReg(BufReg)
11576 .cloneMemRefs(MI);
11577 }
11578
11579 // Naked functions never have a base pointer, and so we use r1. For all
11580 // other functions, this decision must be delayed until during PEI.
11581 unsigned BaseReg;
11582 if (MF->getFunction().hasFnAttribute(Attribute::Naked))
11583 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
11584 else
11585 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
11586
11587 MIB = BuildMI(*thisMBB, MI, DL,
11588 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
11589 .addReg(BaseReg)
11590 .addImm(BPOffset)
11591 .addReg(BufReg)
11592 .cloneMemRefs(MI);
11593
11594 // Setup
11595 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
11596 MIB.addRegMask(TRI->getNoPreservedMask());
11597
11598 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
11599
11600 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
11601 .addMBB(mainMBB);
11602 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
11603
11604 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
11605 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
11606
11607 // mainMBB:
11608 // mainDstReg = 0
11609 MIB =
11610 BuildMI(mainMBB, DL,
11611 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
11612
11613 // Store IP
11614 if (Subtarget.isPPC64()) {
11615 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
11616 .addReg(LabelReg)
11617 .addImm(LabelOffset)
11618 .addReg(BufReg);
11619 } else {
11620 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
11621 .addReg(LabelReg)
11622 .addImm(LabelOffset)
11623 .addReg(BufReg);
11624 }
11625 MIB.cloneMemRefs(MI);
11626
11627 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
11628 mainMBB->addSuccessor(sinkMBB);
11629
11630 // sinkMBB:
11631 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
11632 TII->get(PPC::PHI), DstReg)
11633 .addReg(mainDstReg).addMBB(mainMBB)
11634 .addReg(restoreDstReg).addMBB(thisMBB);
11635
11636 MI.eraseFromParent();
11637 return sinkMBB;
11638}
11639
11640MachineBasicBlock *
11641PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
11642 MachineBasicBlock *MBB) const {
11643 DebugLoc DL = MI.getDebugLoc();
11644 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11645
11646 MachineFunction *MF = MBB->getParent();
11647 MachineRegisterInfo &MRI = MF->getRegInfo();
11648
11649 MVT PVT = getPointerTy(MF->getDataLayout());
11650 assert((PVT == MVT::i64 || PVT == MVT::i32) &&(static_cast<void> (0))
11651 "Invalid Pointer Size!")(static_cast<void> (0));
11652
11653 const TargetRegisterClass *RC =
11654 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11655 Register Tmp = MRI.createVirtualRegister(RC);
11656 // Since FP is only updated here but NOT referenced, it's treated as GPR.
11657 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
11658 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
11659 unsigned BP =
11660 (PVT == MVT::i64)
11661 ? PPC::X30
11662 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
11663 : PPC::R30);
11664
11665 MachineInstrBuilder MIB;
11666
11667 const int64_t LabelOffset = 1 * PVT.getStoreSize();
11668 const int64_t SPOffset = 2 * PVT.getStoreSize();
11669 const int64_t TOCOffset = 3 * PVT.getStoreSize();
11670 const int64_t BPOffset = 4 * PVT.getStoreSize();
11671
11672 Register BufReg = MI.getOperand(0).getReg();
11673
11674 // Reload FP (the jumped-to function may not have had a
11675 // frame pointer, and if so, then its r31 will be restored
11676 // as necessary).
11677 if (PVT == MVT::i64) {
11678 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
11679 .addImm(0)
11680 .addReg(BufReg);
11681 } else {
11682 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
11683 .addImm(0)
11684 .addReg(BufReg);
11685 }
11686 MIB.cloneMemRefs(MI);
11687
11688 // Reload IP
11689 if (PVT == MVT::i64) {
11690 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
11691 .addImm(LabelOffset)
11692 .addReg(BufReg);
11693 } else {
11694 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
11695 .addImm(LabelOffset)
11696 .addReg(BufReg);
11697 }
11698 MIB.cloneMemRefs(MI);
11699
11700 // Reload SP
11701 if (PVT == MVT::i64) {
11702 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
11703 .addImm(SPOffset)
11704 .addReg(BufReg);
11705 } else {
11706 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
11707 .addImm(SPOffset)
11708 .addReg(BufReg);
11709 }
11710 MIB.cloneMemRefs(MI);
11711
11712 // Reload BP
11713 if (PVT == MVT::i64) {
11714 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
11715 .addImm(BPOffset)
11716 .addReg(BufReg);
11717 } else {
11718 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
11719 .addImm(BPOffset)
11720 .addReg(BufReg);
11721 }
11722 MIB.cloneMemRefs(MI);
11723
11724 // Reload TOC
11725 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
11726 setUsesTOCBasePtr(*MBB->getParent());
11727 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
11728 .addImm(TOCOffset)
11729 .addReg(BufReg)
11730 .cloneMemRefs(MI);
11731 }
11732
11733 // Jump
11734 BuildMI(*MBB, MI, DL,
11735 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
11736 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
11737
11738 MI.eraseFromParent();
11739 return MBB;
11740}
11741
11742bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
11743 // If the function specifically requests inline stack probes, emit them.
11744 if (MF.getFunction().hasFnAttribute("probe-stack"))
11745 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
11746 "inline-asm";
11747 return false;
11748}
11749
11750unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const {
11751 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
11752 unsigned StackAlign = TFI->getStackAlignment();
11753 assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) &&(static_cast<void> (0))
11754 "Unexpected stack alignment")(static_cast<void> (0));
11755 // The default stack probe size is 4096 if the function has no
11756 // stack-probe-size attribute.
11757 unsigned StackProbeSize = 4096;
11758 const Function &Fn = MF.getFunction();
11759 if (Fn.hasFnAttribute("stack-probe-size"))
11760 Fn.getFnAttribute("stack-probe-size")
11761 .getValueAsString()
11762 .getAsInteger(0, StackProbeSize);
11763 // Round down to the stack alignment.
11764 StackProbeSize &= ~(StackAlign - 1);
11765 return StackProbeSize ? StackProbeSize : StackAlign;
11766}
11767
11768// Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted
11769// into three phases. In the first phase, it uses pseudo instruction
11770// PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and
11771// FinalStackPtr. In the second phase, it generates a loop for probing blocks.
11772// At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of
11773// MaxCallFrameSize so that it can calculate correct data area pointer.
11774MachineBasicBlock *
11775PPCTargetLowering::emitProbedAlloca(MachineInstr &MI,
11776 MachineBasicBlock *MBB) const {
11777 const bool isPPC64 = Subtarget.isPPC64();
11778 MachineFunction *MF = MBB->getParent();
11779 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11780 DebugLoc DL = MI.getDebugLoc();
11781 const unsigned ProbeSize = getStackProbeSize(*MF);
11782 const BasicBlock *ProbedBB = MBB->getBasicBlock();
11783 MachineRegisterInfo &MRI = MF->getRegInfo();
11784 // The CFG of probing stack looks as
11785 // +-----+
11786 // | MBB |
11787 // +--+--+
11788 // |
11789 // +----v----+
11790 // +--->+ TestMBB +---+
11791 // | +----+----+ |
11792 // | | |
11793 // | +-----v----+ |
11794 // +---+ BlockMBB | |
11795 // +----------+ |
11796 // |
11797 // +---------+ |
11798 // | TailMBB +<--+
11799 // +---------+
11800 // In MBB, calculate previous frame pointer and final stack pointer.
11801 // In TestMBB, test if sp is equal to final stack pointer, if so, jump to
11802 // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB.
11803 // TailMBB is spliced via \p MI.
11804 MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB);
11805 MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB);
11806 MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB);
11807
11808 MachineFunction::iterator MBBIter = ++MBB->getIterator();
11809 MF->insert(MBBIter, TestMBB);
11810 MF->insert(MBBIter, BlockMBB);
11811 MF->insert(MBBIter, TailMBB);
11812
11813 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
11814 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11815
11816 Register DstReg = MI.getOperand(0).getReg();
11817 Register NegSizeReg = MI.getOperand(1).getReg();
11818 Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
11819 Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11820 Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11821 Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11822
11823 // Since value of NegSizeReg might be realigned in prologepilog, insert a
11824 // PREPARE_PROBED_ALLOCA pseudo instruction to get actual FramePointer and
11825 // NegSize.
11826 unsigned ProbeOpc;
11827 if (!MRI.hasOneNonDBGUse(NegSizeReg))
11828 ProbeOpc =
11829 isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
11830 else
11831 // By introducing PREPARE_PROBED_ALLOCA_NEGSIZE_OPT, ActualNegSizeReg
11832 // and NegSizeReg will be allocated in the same phyreg to avoid
11833 // redundant copy when NegSizeReg has only one use which is current MI and
11834 // will be replaced by PREPARE_PROBED_ALLOCA then.
11835 ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
11836 : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
11837 BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer)
11838 .addDef(ActualNegSizeReg)
11839 .addReg(NegSizeReg)
11840 .add(MI.getOperand(2))
11841 .add(MI.getOperand(3));
11842
11843 // Calculate final stack pointer, which equals to SP + ActualNegSize.
11844 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4),
11845 FinalStackPtr)
11846 .addReg(SPReg)
11847 .addReg(ActualNegSizeReg);
11848
11849 // Materialize a scratch register for update.
11850 int64_t NegProbeSize = -(int64_t)ProbeSize;
11851 assert(isInt<32>(NegProbeSize) && "Unhandled probe size!")(static_cast<void> (0));
11852 Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11853 if (!isInt<16>(NegProbeSize)) {
11854 Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11855 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
11856 .addImm(NegProbeSize >> 16);
11857 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
11858 ScratchReg)
11859 .addReg(TempReg)
11860 .addImm(NegProbeSize & 0xFFFF);
11861 } else
11862 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg)
11863 .addImm(NegProbeSize);
11864
11865 {
11866 // Probing leading residual part.
11867 Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11868 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div)
11869 .addReg(ActualNegSizeReg)
11870 .addReg(ScratchReg);
11871 Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11872 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul)
11873 .addReg(Div)
11874 .addReg(ScratchReg);
11875 Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11876 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod)
11877 .addReg(Mul)
11878 .addReg(ActualNegSizeReg);
11879 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11880 .addReg(FramePointer)
11881 .addReg(SPReg)
11882 .addReg(NegMod);
11883 }
11884
11885 {
11886 // Remaining part should be multiple of ProbeSize.
11887 Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass);
11888 BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
11889 .addReg(SPReg)
11890 .addReg(FinalStackPtr);
11891 BuildMI(TestMBB, DL, TII->get(PPC::BCC))
11892 .addImm(PPC::PRED_EQ)
11893 .addReg(CmpResult)
11894 .addMBB(TailMBB);
11895 TestMBB->addSuccessor(BlockMBB);
11896 TestMBB->addSuccessor(TailMBB);
11897 }
11898
11899 {
11900 // Touch the block.
11901 // |P...|P...|P...
11902 BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11903 .addReg(FramePointer)
11904 .addReg(SPReg)
11905 .addReg(ScratchReg);
11906 BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB);
11907 BlockMBB->addSuccessor(TestMBB);
11908 }
11909
11910 // Calculation of MaxCallFrameSize is deferred to prologepilog, use
11911 // DYNAREAOFFSET pseudo instruction to get the future result.
11912 Register MaxCallFrameSizeReg =
11913 MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11914 BuildMI(TailMBB, DL,
11915 TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
11916 MaxCallFrameSizeReg)
11917 .add(MI.getOperand(2))
11918 .add(MI.getOperand(3));
11919 BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
11920 .addReg(SPReg)
11921 .addReg(MaxCallFrameSizeReg);
11922
11923 // Splice instructions after MI to TailMBB.
11924 TailMBB->splice(TailMBB->end(), MBB,
11925 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11926 TailMBB->transferSuccessorsAndUpdatePHIs(MBB);
11927 MBB->addSuccessor(TestMBB);
11928
11929 // Delete the pseudo instruction.
11930 MI.eraseFromParent();
11931
11932 ++NumDynamicAllocaProbed;
11933 return TailMBB;
11934}
11935
11936MachineBasicBlock *
11937PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
11938 MachineBasicBlock *BB) const {
11939 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
11940 MI.getOpcode() == TargetOpcode::PATCHPOINT) {
11941 if (Subtarget.is64BitELFABI() &&
11942 MI.getOpcode() == TargetOpcode::PATCHPOINT &&
11943 !Subtarget.isUsingPCRelativeCalls()) {
11944 // Call lowering should have added an r2 operand to indicate a dependence
11945 // on the TOC base pointer value. It can't however, because there is no
11946 // way to mark the dependence as implicit there, and so the stackmap code
11947 // will confuse it with a regular operand. Instead, add the dependence
11948 // here.
11949 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
11950 }
11951
11952 return emitPatchPoint(MI, BB);
11953 }
11954
11955 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
11956 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
11957 return emitEHSjLjSetJmp(MI, BB);
11958 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
11959 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
11960 return emitEHSjLjLongJmp(MI, BB);
11961 }
11962
11963 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11964
11965 // To "insert" these instructions we actually have to insert their
11966 // control-flow patterns.
11967 const BasicBlock *LLVM_BB = BB->getBasicBlock();
11968 MachineFunction::iterator It = ++BB->getIterator();
11969
11970 MachineFunction *F = BB->getParent();
11971
11972 if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11973 MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
11974 MI.getOpcode() == PPC::SELECT_I8) {
11975 SmallVector<MachineOperand, 2> Cond;
11976 if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11977 MI.getOpcode() == PPC::SELECT_CC_I8)
11978 Cond.push_back(MI.getOperand(4));
11979 else
11980 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
11981 Cond.push_back(MI.getOperand(1));
11982
11983 DebugLoc dl = MI.getDebugLoc();
11984 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
11985 MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
11986 } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
11987 MI.getOpcode() == PPC::SELECT_CC_F8 ||
11988 MI.getOpcode() == PPC::SELECT_CC_F16 ||
11989 MI.getOpcode() == PPC::SELECT_CC_VRRC ||
11990 MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
11991 MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
11992 MI.getOpcode() == PPC::SELECT_CC_VSRC ||
11993 MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
11994 MI.getOpcode() == PPC::SELECT_CC_SPE ||
11995 MI.getOpcode() == PPC::SELECT_F4 ||
11996 MI.getOpcode() == PPC::SELECT_F8 ||
11997 MI.getOpcode() == PPC::SELECT_F16 ||
11998 MI.getOpcode() == PPC::SELECT_SPE ||
11999 MI.getOpcode() == PPC::SELECT_SPE4 ||
12000 MI.getOpcode() == PPC::SELECT_VRRC ||
12001 MI.getOpcode() == PPC::SELECT_VSFRC ||
12002 MI.getOpcode() == PPC::SELECT_VSSRC ||
12003 MI.getOpcode() == PPC::SELECT_VSRC) {
12004 // The incoming instruction knows the destination vreg to set, the
12005 // condition code register to branch on, the true/false values to
12006 // select between, and a branch opcode to use.
12007
12008 // thisMBB:
12009 // ...
12010 // TrueVal = ...
12011 // cmpTY ccX, r1, r2
12012 // bCC copy1MBB
12013 // fallthrough --> copy0MBB
12014 MachineBasicBlock *thisMBB = BB;
12015 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
12016 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
12017 DebugLoc dl = MI.getDebugLoc();
12018 F->insert(It, copy0MBB);
12019 F->insert(It, sinkMBB);
12020
12021 // Transfer the remainder of BB and its successor edges to sinkMBB.
12022 sinkMBB->splice(sinkMBB->begin(), BB,
12023 std::next(MachineBasicBlock::iterator(MI)), BB->end());
12024 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
12025
12026 // Next, add the true and fallthrough blocks as its successors.
12027 BB->addSuccessor(copy0MBB);
12028 BB->addSuccessor(sinkMBB);
12029
12030 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
12031 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
12032 MI.getOpcode() == PPC::SELECT_F16 ||
12033 MI.getOpcode() == PPC::SELECT_SPE4 ||
12034 MI.getOpcode() == PPC::SELECT_SPE ||
12035 MI.getOpcode() == PPC::SELECT_VRRC ||
12036 MI.getOpcode() == PPC::SELECT_VSFRC ||
12037 MI.getOpcode() == PPC::SELECT_VSSRC ||
12038 MI.getOpcode() == PPC::SELECT_VSRC) {
12039 BuildMI(BB, dl, TII->get(PPC::BC))
12040 .addReg(MI.getOperand(1).getReg())
12041 .addMBB(sinkMBB);
12042 } else {
12043 unsigned SelectPred = MI.getOperand(4).getImm();
12044 BuildMI(BB, dl, TII->get(PPC::BCC))
12045 .addImm(SelectPred)
12046 .addReg(MI.getOperand(1).getReg())
12047 .addMBB(sinkMBB);
12048 }
12049
12050 // copy0MBB:
12051 // %FalseValue = ...
12052 // # fallthrough to sinkMBB
12053 BB = copy0MBB;
12054
12055 // Update machine-CFG edges
12056 BB->addSuccessor(sinkMBB);
12057
12058 // sinkMBB:
12059 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
12060 // ...
12061 BB = sinkMBB;
12062 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
12063 .addReg(MI.getOperand(3).getReg())
12064 .addMBB(copy0MBB)
12065 .addReg(MI.getOperand(2).getReg())
12066 .addMBB(thisMBB);
12067 } else if (MI.getOpcode() == PPC::ReadTB) {
12068 // To read the 64-bit time-base register on a 32-bit target, we read the
12069 // two halves. Should the counter have wrapped while it was being read, we
12070 // need to try again.
12071 // ...
12072 // readLoop:
12073 // mfspr Rx,TBU # load from TBU
12074 // mfspr Ry,TB # load from TB
12075 // mfspr Rz,TBU # load from TBU
12076 // cmpw crX,Rx,Rz # check if 'old'='new'
12077 // bne readLoop # branch if they're not equal
12078 // ...
12079
12080 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
12081 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
12082 DebugLoc dl = MI.getDebugLoc();
12083 F->insert(It, readMBB);
12084 F->insert(It, sinkMBB);
12085
12086 // Transfer the remainder of BB and its successor edges to sinkMBB.
12087 sinkMBB->splice(sinkMBB->begin(), BB,
12088 std::next(MachineBasicBlock::iterator(MI)), BB->end());
12089 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
12090
12091 BB->addSuccessor(readMBB);
12092 BB = readMBB;
12093
12094 MachineRegisterInfo &RegInfo = F->getRegInfo();
12095 Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
12096 Register LoReg = MI.getOperand(0).getReg();
12097 Register HiReg = MI.getOperand(1).getReg();
12098
12099 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
12100 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
12101 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
12102
12103 Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12104
12105 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
12106 .addReg(HiReg)
12107 .addReg(ReadAgainReg);
12108 BuildMI(BB, dl, TII->get(PPC::BCC))
12109 .addImm(PPC::PRED_NE)
12110 .addReg(CmpReg)
12111 .addMBB(readMBB);
12112
12113 BB->addSuccessor(readMBB);
12114 BB->addSuccessor(sinkMBB);
12115 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
12116 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
12117 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
12118 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
12119 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
12120 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
12121 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
12122 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
12123
12124 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
12125 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
12126 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
12127 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
12128 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
12129 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
12130 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
12131 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
12132
12133 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
12134 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
12135 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
12136 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
12137 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
12138 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
12139 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
12140 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
12141
12142 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
12143 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
12144 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
12145 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
12146 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
12147 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
12148 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
12149 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
12150
12151 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
12152 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
12153 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
12154 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
12155 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
12156 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
12157 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
12158 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
12159
12160 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
12161 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
12162 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
12163 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
12164 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
12165 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
12166 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
12167 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
12168
12169 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
12170 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
12171 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
12172 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
12173 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
12174 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
12175 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
12176 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
12177
12178 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
12179 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
12180 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
12181 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
12182 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
12183 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
12184 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
12185 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
12186
12187 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
12188 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
12189 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
12190 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
12191 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
12192 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
12193 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
12194 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
12195
12196 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
12197 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
12198 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
12199 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
12200 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
12201 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
12202 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
12203 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
12204
12205 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
12206 BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
12207 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
12208 BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
12209 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
12210 BB = EmitAtomicBinary(MI, BB, 4, 0);
12211 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
12212 BB = EmitAtomicBinary(MI, BB, 8, 0);
12213 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
12214 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
12215 (Subtarget.hasPartwordAtomics() &&
12216 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
12217 (Subtarget.hasPartwordAtomics() &&
12218 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
12219 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
12220
12221 auto LoadMnemonic = PPC::LDARX;
12222 auto StoreMnemonic = PPC::STDCX;
12223 switch (MI.getOpcode()) {
12224 default:
12225 llvm_unreachable("Compare and swap of unknown size")__builtin_unreachable();
12226 case PPC::ATOMIC_CMP_SWAP_I8:
12227 LoadMnemonic = PPC::LBARX;
12228 StoreMnemonic = PPC::STBCX;
12229 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.")(static_cast<void> (0));
12230 break;
12231 case PPC::ATOMIC_CMP_SWAP_I16:
12232 LoadMnemonic = PPC::LHARX;
12233 StoreMnemonic = PPC::STHCX;
12234 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.")(static_cast<void> (0));
12235 break;
12236 case PPC::ATOMIC_CMP_SWAP_I32:
12237 LoadMnemonic = PPC::LWARX;
12238 StoreMnemonic = PPC::STWCX;
12239 break;
12240 case PPC::ATOMIC_CMP_SWAP_I64:
12241 LoadMnemonic = PPC::LDARX;
12242 StoreMnemonic = PPC::STDCX;
12243 break;
12244 }
12245 Register dest = MI.getOperand(0).getReg();
12246 Register ptrA = MI.getOperand(1).getReg();
12247 Register ptrB = MI.getOperand(2).getReg();
12248 Register oldval = MI.getOperand(3).getReg();
12249 Register newval = MI.getOperand(4).getReg();
12250 DebugLoc dl = MI.getDebugLoc();
12251
12252 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12253 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12254 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12255 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12256 F->insert(It, loop1MBB);
12257 F->insert(It, loop2MBB);
12258 F->insert(It, midMBB);
12259 F->insert(It, exitMBB);
12260 exitMBB->splice(exitMBB->begin(), BB,
12261 std::next(MachineBasicBlock::iterator(MI)), BB->end());
12262 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12263
12264 // thisMBB:
12265 // ...
12266 // fallthrough --> loopMBB
12267 BB->addSuccessor(loop1MBB);
12268
12269 // loop1MBB:
12270 // l[bhwd]arx dest, ptr
12271 // cmp[wd] dest, oldval
12272 // bne- midMBB
12273 // loop2MBB:
12274 // st[bhwd]cx. newval, ptr
12275 // bne- loopMBB
12276 // b exitBB
12277 // midMBB:
12278 // st[bhwd]cx. dest, ptr
12279 // exitBB:
12280 BB = loop1MBB;
12281 BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
12282 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
12283 .addReg(oldval)
12284 .addReg(dest);
12285 BuildMI(BB, dl, TII->get(PPC::BCC))
12286 .addImm(PPC::PRED_NE)
12287 .addReg(PPC::CR0)
12288 .addMBB(midMBB);
12289 BB->addSuccessor(loop2MBB);
12290 BB->addSuccessor(midMBB);
12291
12292 BB = loop2MBB;
12293 BuildMI(BB, dl, TII->get(StoreMnemonic))
12294 .addReg(newval)
12295 .addReg(ptrA)
12296 .addReg(ptrB);
12297 BuildMI(BB, dl, TII->get(PPC::BCC))
12298 .addImm(PPC::PRED_NE)
12299 .addReg(PPC::CR0)
12300 .addMBB(loop1MBB);
12301 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12302 BB->addSuccessor(loop1MBB);
12303 BB->addSuccessor(exitMBB);
12304
12305 BB = midMBB;
12306 BuildMI(BB, dl, TII->get(StoreMnemonic))
12307 .addReg(dest)
12308 .addReg(ptrA)
12309 .addReg(ptrB);
12310 BB->addSuccessor(exitMBB);
12311
12312 // exitMBB:
12313 // ...
12314 BB = exitMBB;
12315 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
12316 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
12317 // We must use 64-bit registers for addresses when targeting 64-bit,
12318 // since we're actually doing arithmetic on them. Other registers
12319 // can be 32-bit.
12320 bool is64bit = Subtarget.isPPC64();
12321 bool isLittleEndian = Subtarget.isLittleEndian();
12322 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
12323
12324 Register dest = MI.getOperand(0).getReg();
12325 Register ptrA = MI.getOperand(1).getReg();
12326 Register ptrB = MI.getOperand(2).getReg();
12327 Register oldval = MI.getOperand(3).getReg();
12328 Register newval = MI.getOperand(4).getReg();
12329 DebugLoc dl = MI.getDebugLoc();
12330
12331 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12332 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12333 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12334 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12335 F->insert(It, loop1MBB);
12336 F->insert(It, loop2MBB);
12337 F->insert(It, midMBB);
12338 F->insert(It, exitMBB);
12339 exitMBB->splice(exitMBB->begin(), BB,
12340 std::next(MachineBasicBlock::iterator(MI)), BB->end());
12341 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12342
12343 MachineRegisterInfo &RegInfo = F->getRegInfo();
12344 const TargetRegisterClass *RC =
12345 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
12346 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
12347
12348 Register PtrReg = RegInfo.createVirtualRegister(RC);
12349 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
12350 Register ShiftReg =
12351 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
12352 Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
12353 Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
12354 Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
12355 Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
12356 Register MaskReg = RegInfo.createVirtualRegister(GPRC);
12357 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
12358 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
12359 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
12360 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
12361 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
12362 Register Ptr1Reg;
12363 Register TmpReg = RegInfo.createVirtualRegister(GPRC);
12364 Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
12365 // thisMBB:
12366 // ...
12367 // fallthrough --> loopMBB
12368 BB->addSuccessor(loop1MBB);
12369
12370 // The 4-byte load must be aligned, while a char or short may be
12371 // anywhere in the word. Hence all this nasty bookkeeping code.
12372 // add ptr1, ptrA, ptrB [copy if ptrA==0]
12373 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
12374 // xori shift, shift1, 24 [16]
12375 // rlwinm ptr, ptr1, 0, 0, 29
12376 // slw newval2, newval, shift
12377 // slw oldval2, oldval,shift
12378 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
12379 // slw mask, mask2, shift
12380 // and newval3, newval2, mask
12381 // and oldval3, oldval2, mask
12382 // loop1MBB:
12383 // lwarx tmpDest, ptr
12384 // and tmp, tmpDest, mask
12385 // cmpw tmp, oldval3
12386 // bne- midMBB
12387 // loop2MBB:
12388 // andc tmp2, tmpDest, mask
12389 // or tmp4, tmp2, newval3
12390 // stwcx. tmp4, ptr
12391 // bne- loop1MBB
12392 // b exitBB
12393 // midMBB:
12394 // stwcx. tmpDest, ptr
12395 // exitBB:
12396 // srw dest, tmpDest, shift
12397 if (ptrA != ZeroReg) {
12398 Ptr1Reg = RegInfo.createVirtualRegister(RC);
12399 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
12400 .addReg(ptrA)
12401 .addReg(ptrB);
12402 } else {
12403 Ptr1Reg = ptrB;
12404 }
12405
12406 // We need use 32-bit subregister to avoid mismatch register class in 64-bit
12407 // mode.
12408 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
12409 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
12410 .addImm(3)
12411 .addImm(27)
12412 .addImm(is8bit ? 28 : 27);
12413 if (!isLittleEndian)
12414 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
12415 .addReg(Shift1Reg)
12416 .addImm(is8bit ? 24 : 16);
12417 if (is64bit)
12418 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
12419 .addReg(Ptr1Reg)
12420 .addImm(0)
12421 .addImm(61);
12422 else
12423 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
12424 .addReg(Ptr1Reg)
12425 .addImm(0)
12426 .addImm(0)
12427 .addImm(29);
12428 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
12429 .addReg(newval)
12430 .addReg(ShiftReg);
12431 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
12432 .addReg(oldval)
12433 .addReg(ShiftReg);
12434 if (is8bit)
12435 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
12436 else {
12437 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
12438 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
12439 .addReg(Mask3Reg)
12440 .addImm(65535);
12441 }
12442 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
12443 .addReg(Mask2Reg)
12444 .addReg(ShiftReg);
12445 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
12446 .addReg(NewVal2Reg)
12447 .addReg(MaskReg);
12448 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
12449 .addReg(OldVal2Reg)
12450 .addReg(MaskReg);
12451
12452 BB = loop1MBB;
12453 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
12454 .addReg(ZeroReg)
12455 .addReg(PtrReg);
12456 BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
12457 .addReg(TmpDestReg)
12458 .addReg(MaskReg);
12459 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
12460 .addReg(TmpReg)
12461 .addReg(OldVal3Reg);
12462 BuildMI(BB, dl, TII->get(PPC::BCC))
12463 .addImm(PPC::PRED_NE)
12464 .addReg(PPC::CR0)
12465 .addMBB(midMBB);
12466 BB->addSuccessor(loop2MBB);
12467 BB->addSuccessor(midMBB);
12468
12469 BB = loop2MBB;
12470 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
12471 .addReg(TmpDestReg)
12472 .addReg(MaskReg);
12473 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
12474 .addReg(Tmp2Reg)
12475 .addReg(NewVal3Reg);
12476 BuildMI(BB, dl, TII->get(PPC::STWCX))
12477 .addReg(Tmp4Reg)
12478 .addReg(ZeroReg)
12479 .addReg(PtrReg);
12480 BuildMI(BB, dl, TII->get(PPC::BCC))
12481 .addImm(PPC::PRED_NE)
12482 .addReg(PPC::CR0)
12483 .addMBB(loop1MBB);
12484 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12485 BB->addSuccessor(loop1MBB);
12486 BB->addSuccessor(exitMBB);
12487
12488 BB = midMBB;
12489 BuildMI(BB, dl, TII->get(PPC::STWCX))
12490 .addReg(TmpDestReg)
12491 .addReg(ZeroReg)
12492 .addReg(PtrReg);
12493 BB->addSuccessor(exitMBB);
12494
12495 // exitMBB:
12496 // ...
12497 BB = exitMBB;
12498 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
12499 .addReg(TmpReg)
12500 .addReg(ShiftReg);
12501 } else if (MI.getOpcode() == PPC::FADDrtz) {
12502 // This pseudo performs an FADD with rounding mode temporarily forced
12503 // to round-to-zero. We emit this via custom inserter since the FPSCR
12504 // is not modeled at the SelectionDAG level.
12505 Register Dest = MI.getOperand(0).getReg();
12506 Register Src1 = MI.getOperand(1).getReg();
12507 Register Src2 = MI.getOperand(2).getReg();
12508 DebugLoc dl = MI.getDebugLoc();
12509
12510 MachineRegisterInfo &RegInfo = F->getRegInfo();
12511 Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12512
12513 // Save FPSCR value.
12514 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
12515
12516 // Set rounding mode to round-to-zero.
12517 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1))
12518 .addImm(31)
12519 .addReg(PPC::RM, RegState::ImplicitDefine);
12520
12521 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0))
12522 .addImm(30)
12523 .addReg(PPC::RM, RegState::ImplicitDefine);
12524
12525 // Perform addition.
12526 auto MIB = BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest)
12527 .addReg(Src1)
12528 .addReg(Src2);
12529 if (MI.getFlag(MachineInstr::NoFPExcept))
12530 MIB.setMIFlag(MachineInstr::NoFPExcept);
12531
12532 // Restore FPSCR value.
12533 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
12534 } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12535 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
12536 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12537 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
12538 unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12539 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
12540 ? PPC::ANDI8_rec
12541 : PPC::ANDI_rec;
12542 bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12543 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
12544
12545 MachineRegisterInfo &RegInfo = F->getRegInfo();
12546 Register Dest = RegInfo.createVirtualRegister(
12547 Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
12548
12549 DebugLoc Dl = MI.getDebugLoc();
12550 BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
12551 .addReg(MI.getOperand(1).getReg())
12552 .addImm(1);
12553 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12554 MI.getOperand(0).getReg())
12555 .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
12556 } else if (MI.getOpcode() == PPC::TCHECK_RET) {
12557 DebugLoc Dl = MI.getDebugLoc();
12558 MachineRegisterInfo &RegInfo = F->getRegInfo();
12559 Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12560 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
12561 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12562 MI.getOperand(0).getReg())
12563 .addReg(CRReg);
12564 } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
12565 DebugLoc Dl = MI.getDebugLoc();
12566 unsigned Imm = MI.getOperand(1).getImm();
12567 BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
12568 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12569 MI.getOperand(0).getReg())
12570 .addReg(PPC::CR0EQ);
12571 } else if (MI.getOpcode() == PPC::SETRNDi) {
12572 DebugLoc dl = MI.getDebugLoc();
12573 Register OldFPSCRReg = MI.getOperand(0).getReg();
12574
12575 // Save FPSCR value.
12576 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12577
12578 // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
12579 // the following settings:
12580 // 00 Round to nearest
12581 // 01 Round to 0
12582 // 10 Round to +inf
12583 // 11 Round to -inf
12584
12585 // When the operand is immediate, using the two least significant bits of
12586 // the immediate to set the bits 62:63 of FPSCR.
12587 unsigned Mode = MI.getOperand(1).getImm();
12588 BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
12589 .addImm(31)
12590 .addReg(PPC::RM, RegState::ImplicitDefine);
12591
12592 BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
12593 .addImm(30)
12594 .addReg(PPC::RM, RegState::ImplicitDefine);
12595 } else if (MI.getOpcode() == PPC::SETRND) {
12596 DebugLoc dl = MI.getDebugLoc();
12597
12598 // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
12599 // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
12600 // If the target doesn't have DirectMove, we should use stack to do the
12601 // conversion, because the target doesn't have the instructions like mtvsrd
12602 // or mfvsrd to do this conversion directly.
12603 auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
12604 if (Subtarget.hasDirectMove()) {
12605 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
12606 .addReg(SrcReg);
12607 } else {
12608 // Use stack to do the register copy.
12609 unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
12610 MachineRegisterInfo &RegInfo = F->getRegInfo();
12611 const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
12612 if (RC == &PPC::F8RCRegClass) {
12613 // Copy register from F8RCRegClass to G8RCRegclass.
12614 assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&(static_cast<void> (0))
12615 "Unsupported RegClass.")(static_cast<void> (0));
12616
12617 StoreOp = PPC::STFD;
12618 LoadOp = PPC::LD;
12619 } else {
12620 // Copy register from G8RCRegClass to F8RCRegclass.
12621 assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&(static_cast<void> (0))
12622 (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&(static_cast<void> (0))
12623 "Unsupported RegClass.")(static_cast<void> (0));
12624 }
12625
12626 MachineFrameInfo &MFI = F->getFrameInfo();
12627 int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
12628
12629 MachineMemOperand *MMOStore = F->getMachineMemOperand(
12630 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12631 MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
12632 MFI.getObjectAlign(FrameIdx));
12633
12634 // Store the SrcReg into the stack.
12635 BuildMI(*BB, MI, dl, TII->get(StoreOp))
12636 .addReg(SrcReg)
12637 .addImm(0)
12638 .addFrameIndex(FrameIdx)
12639 .addMemOperand(MMOStore);
12640
12641 MachineMemOperand *MMOLoad = F->getMachineMemOperand(
12642 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12643 MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
12644 MFI.getObjectAlign(FrameIdx));
12645
12646 // Load from the stack where SrcReg is stored, and save to DestReg,
12647 // so we have done the RegClass conversion from RegClass::SrcReg to
12648 // RegClass::DestReg.
12649 BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
12650 .addImm(0)
12651 .addFrameIndex(FrameIdx)
12652 .addMemOperand(MMOLoad);
12653 }
12654 };
12655
12656 Register OldFPSCRReg = MI.getOperand(0).getReg();
12657
12658 // Save FPSCR value.
12659 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12660
12661 // When the operand is gprc register, use two least significant bits of the
12662 // register and mtfsf instruction to set the bits 62:63 of FPSCR.
12663 //
12664 // copy OldFPSCRTmpReg, OldFPSCRReg
12665 // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
12666 // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
12667 // copy NewFPSCRReg, NewFPSCRTmpReg
12668 // mtfsf 255, NewFPSCRReg
12669 MachineOperand SrcOp = MI.getOperand(1);
12670 MachineRegisterInfo &RegInfo = F->getRegInfo();
12671 Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12672
12673 copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
12674
12675 Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12676 Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12677
12678 // The first operand of INSERT_SUBREG should be a register which has
12679 // subregisters, we only care about its RegClass, so we should use an
12680 // IMPLICIT_DEF register.
12681 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
12682 BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
12683 .addReg(ImDefReg)
12684 .add(SrcOp)
12685 .addImm(1);
12686
12687 Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12688 BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
12689 .addReg(OldFPSCRTmpReg)
12690 .addReg(ExtSrcReg)
12691 .addImm(0)
12692 .addImm(62);
12693
12694 Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12695 copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
12696
12697 // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
12698 // bits of FPSCR.
12699 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
12700 .addImm(255)
12701 .addReg(NewFPSCRReg)
12702 .addImm(0)
12703 .addImm(0);
12704 } else if (MI.getOpcode() == PPC::SETFLM) {
12705 DebugLoc Dl = MI.getDebugLoc();
12706
12707 // Result of setflm is previous FPSCR content, so we need to save it first.
12708 Register OldFPSCRReg = MI.getOperand(0).getReg();
12709 BuildMI(*BB, MI, Dl, TII->get(PPC::MFFS), OldFPSCRReg);
12710
12711 // Put bits in 32:63 to FPSCR.
12712 Register NewFPSCRReg = MI.getOperand(1).getReg();
12713 BuildMI(*BB, MI, Dl, TII->get(PPC::MTFSF))
12714 .addImm(255)
12715 .addReg(NewFPSCRReg)
12716 .addImm(0)
12717 .addImm(0);
12718 } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
12719 MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
12720 return emitProbedAlloca(MI, BB);
12721 } else if (MI.getOpcode() == PPC::SPLIT_QUADWORD) {
12722 DebugLoc DL = MI.getDebugLoc();
12723 Register Src = MI.getOperand(2).getReg();
12724 Register Lo = MI.getOperand(0).getReg();
12725 Register Hi = MI.getOperand(1).getReg();
12726 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY))
12727 .addDef(Lo)
12728 .addUse(Src, 0, PPC::sub_gp8_x1);
12729 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY))
12730 .addDef(Hi)
12731 .addUse(Src, 0, PPC::sub_gp8_x0);
12732 } else if (MI.getOpcode() == PPC::LQX_PSEUDO ||
12733 MI.getOpcode() == PPC::STQX_PSEUDO) {
12734 DebugLoc DL = MI.getDebugLoc();
12735 // Ptr is used as the ptr_rc_no_r0 part
12736 // of LQ/STQ's memory operand and adding result of RA and RB,
12737 // so it has to be g8rc_and_g8rc_nox0.
12738 Register Ptr =
12739 F->getRegInfo().createVirtualRegister(&PPC::G8RC_and_G8RC_NOX0RegClass);
12740 Register Val = MI.getOperand(0).getReg();
12741 Register RA = MI.getOperand(1).getReg();
12742 Register RB = MI.getOperand(2).getReg();
12743 BuildMI(*BB, MI, DL, TII->get(PPC::ADD8), Ptr).addReg(RA).addReg(RB);
12744 BuildMI(*BB, MI, DL,
12745 MI.getOpcode() == PPC::LQX_PSEUDO ? TII->get(PPC::LQ)
12746 : TII->get(PPC::STQ))
12747 .addReg(Val, MI.getOpcode() == PPC::LQX_PSEUDO ? RegState::Define : 0)
12748 .addImm(0)
12749 .addReg(Ptr);
12750 } else {
12751 llvm_unreachable("Unexpected instr type to insert")__builtin_unreachable();
12752 }
12753
12754 MI.eraseFromParent(); // The pseudo instruction is gone now.
12755 return BB;
12756}
12757
12758//===----------------------------------------------------------------------===//
12759// Target Optimization Hooks
12760//===----------------------------------------------------------------------===//
12761
12762static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
12763 // For the estimates, convergence is quadratic, so we essentially double the
12764 // number of digits correct after every iteration. For both FRE and FRSQRTE,
12765 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
12766 // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
12767 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
12768 if (VT.getScalarType() == MVT::f64)
12769 RefinementSteps++;
12770 return RefinementSteps;
12771}
12772
12773SDValue PPCTargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
12774 const DenormalMode &Mode) const {
12775 // We only have VSX Vector Test for software Square Root.
12776 EVT VT = Op.getValueType();
12777 if (!isTypeLegal(MVT::i1) ||
12778 (VT != MVT::f64 &&
12779 ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX())))
12780 return TargetLowering::getSqrtInputTest(Op, DAG, Mode);
12781
12782 SDLoc DL(Op);
12783 // The output register of FTSQRT is CR field.
12784 SDValue FTSQRT = DAG.getNode(PPCISD::FTSQRT, DL, MVT::i32, Op);
12785 // ftsqrt BF,FRB
12786 // Let e_b be the unbiased exponent of the double-precision
12787 // floating-point operand in register FRB.
12788 // fe_flag is set to 1 if either of the following conditions occurs.
12789 // - The double-precision floating-point operand in register FRB is a zero,
12790 // a NaN, or an infinity, or a negative value.
12791 // - e_b is less than or equal to -970.
12792 // Otherwise fe_flag is set to 0.
12793 // Both VSX and non-VSX versions would set EQ bit in the CR if the number is
12794 // not eligible for iteration. (zero/negative/infinity/nan or unbiased
12795 // exponent is less than -970)
12796 SDValue SRIdxVal = DAG.getTargetConstant(PPC::sub_eq, DL, MVT::i32);
12797 return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i1,
12798 FTSQRT, SRIdxVal),
12799 0);
12800}
12801
12802SDValue
12803PPCTargetLowering::getSqrtResultForDenormInput(SDValue Op,
12804 SelectionDAG &DAG) const {
12805 // We only have VSX Vector Square Root.
12806 EVT VT = Op.getValueType();
12807 if (VT != MVT::f64 &&
12808 ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX()))
12809 return TargetLowering::getSqrtResultForDenormInput(Op, DAG);
12810
12811 return DAG.getNode(PPCISD::FSQRT, SDLoc(Op), VT, Op);
12812}
12813
12814SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
12815 int Enabled, int &RefinementSteps,
12816 bool &UseOneConstNR,
12817 bool Reciprocal) const {
12818 EVT VT = Operand.getValueType();
12819 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
12820 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
12821 (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12822 (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12823 if (RefinementSteps == ReciprocalEstimate::Unspecified)
12824 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12825
12826 // The Newton-Raphson computation with a single constant does not provide
12827 // enough accuracy on some CPUs.
12828 UseOneConstNR = !Subtarget.needsTwoConstNR();
12829 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
12830 }
12831 return SDValue();
12832}
12833
12834SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
12835 int Enabled,
12836 int &RefinementSteps) const {
12837 EVT VT = Operand.getValueType();
12838 if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
12839 (VT == MVT::f64 && Subtarget.hasFRE()) ||
12840 (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12841 (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12842 if (RefinementSteps == ReciprocalEstimate::Unspecified)
12843 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12844 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
12845 }
12846 return SDValue();
12847}
12848
12849unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
12850 // Note: This functionality is used only when unsafe-fp-math is enabled, and
12851 // on cores with reciprocal estimates (which are used when unsafe-fp-math is
12852 // enabled for division), this functionality is redundant with the default
12853 // combiner logic (once the division -> reciprocal/multiply transformation
12854 // has taken place). As a result, this matters more for older cores than for
12855 // newer ones.
12856
12857 // Combine multiple FDIVs with the same divisor into multiple FMULs by the
12858 // reciprocal if there are two or more FDIVs (for embedded cores with only
12859 // one FP pipeline) for three or more FDIVs (for generic OOO cores).
12860 switch (Subtarget.getCPUDirective()) {
12861 default:
12862 return 3;
12863 case PPC::DIR_440:
12864 case PPC::DIR_A2:
12865 case PPC::DIR_E500:
12866 case PPC::DIR_E500mc:
12867 case PPC::DIR_E5500:
12868 return 2;
12869 }
12870}
12871
12872// isConsecutiveLSLoc needs to work even if all adds have not yet been
12873// collapsed, and so we need to look through chains of them.
12874static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
12875 int64_t& Offset, SelectionDAG &DAG) {
12876 if (DAG.isBaseWithConstantOffset(Loc)) {
12877 Base = Loc.getOperand(0);
12878 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
12879
12880 // The base might itself be a base plus an offset, and if so, accumulate
12881 // that as well.
12882 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
12883 }
12884}
12885
12886static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
12887 unsigned Bytes, int Dist,
12888 SelectionDAG &DAG) {
12889 if (VT.getSizeInBits() / 8 != Bytes)
12890 return false;
12891
12892 SDValue BaseLoc = Base->getBasePtr();
12893 if (Loc.getOpcode() == ISD::FrameIndex) {
12894 if (BaseLoc.getOpcode() != ISD::FrameIndex)
12895 return false;
12896 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
12897 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
12898 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
12899 int FS = MFI.getObjectSize(FI);
12900 int BFS = MFI.getObjectSize(BFI);
12901 if (FS != BFS || FS != (int)Bytes) return false;
12902 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
12903 }
12904
12905 SDValue Base1 = Loc, Base2 = BaseLoc;
12906 int64_t Offset1 = 0, Offset2 = 0;
12907 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
12908 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
12909 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
12910 return true;
12911
12912 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12913 const GlobalValue *GV1 = nullptr;
12914 const GlobalValue *GV2 = nullptr;
12915 Offset1 = 0;
12916 Offset2 = 0;
12917 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
12918 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
12919 if (isGA1 && isGA2 && GV1 == GV2)
12920 return Offset1 == (Offset2 + Dist*Bytes);
12921 return false;
12922}
12923
12924// Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
12925// not enforce equality of the chain operands.
12926static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
12927 unsigned Bytes, int Dist,
12928 SelectionDAG &DAG) {
12929 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
12930 EVT VT = LS->getMemoryVT();
12931 SDValue Loc = LS->getBasePtr();
12932 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
12933 }
12934
12935 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
12936 EVT VT;
12937 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12938 default: return false;
12939 case Intrinsic::ppc_altivec_lvx:
12940 case Intrinsic::ppc_altivec_lvxl:
12941 case Intrinsic::ppc_vsx_lxvw4x:
12942 case Intrinsic::ppc_vsx_lxvw4x_be:
12943 VT = MVT::v4i32;
12944 break;
12945 case Intrinsic::ppc_vsx_lxvd2x:
12946 case Intrinsic::ppc_vsx_lxvd2x_be:
12947 VT = MVT::v2f64;
12948 break;
12949 case Intrinsic::ppc_altivec_lvebx:
12950 VT = MVT::i8;
12951 break;
12952 case Intrinsic::ppc_altivec_lvehx:
12953 VT = MVT::i16;
12954 break;
12955 case Intrinsic::ppc_altivec_lvewx:
12956 VT = MVT::i32;
12957 break;
12958 }
12959
12960 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
12961 }
12962
12963 if (N->getOpcode() == ISD::INTRINSIC_VOID) {
12964 EVT VT;
12965 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12966 default: return false;
12967 case Intrinsic::ppc_altivec_stvx:
12968 case Intrinsic::ppc_altivec_stvxl:
12969 case Intrinsic::ppc_vsx_stxvw4x:
12970 VT = MVT::v4i32;
12971 break;
12972 case Intrinsic::ppc_vsx_stxvd2x:
12973 VT = MVT::v2f64;
12974 break;
12975 case Intrinsic::ppc_vsx_stxvw4x_be:
12976 VT = MVT::v4i32;
12977 break;
12978 case Intrinsic::ppc_vsx_stxvd2x_be:
12979 VT = MVT::v2f64;
12980 break;
12981 case Intrinsic::ppc_altivec_stvebx:
12982 VT = MVT::i8;
12983 break;
12984 case Intrinsic::ppc_altivec_stvehx:
12985 VT = MVT::i16;
12986 break;
12987 case Intrinsic::ppc_altivec_stvewx:
12988 VT = MVT::i32;
12989 break;
12990 }
12991
12992 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
12993 }
12994
12995 return false;
12996}
12997
12998// Return true is there is a nearyby consecutive load to the one provided
12999// (regardless of alignment). We search up and down the chain, looking though
13000// token factors and other loads (but nothing else). As a result, a true result
13001// indicates that it is safe to create a new consecutive load adjacent to the
13002// load provided.
13003static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
13004 SDValue Chain = LD->getChain();
13005 EVT VT = LD->getMemoryVT();
13006
13007 SmallSet<SDNode *, 16> LoadRoots;
13008 SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
13009 SmallSet<SDNode *, 16> Visited;
13010
13011 // First, search up the chain, branching to follow all token-factor operands.
13012 // If we find a consecutive load, then we're done, otherwise, record all
13013 // nodes just above the top-level loads and token factors.
13014 while (!Queue.empty()) {
13015 SDNode *ChainNext = Queue.pop_back_val();
13016 if (!Visited.insert(ChainNext).second)
13017 continue;
13018
13019 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
13020 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
13021 return true;
13022
13023 if (!Visited.count(ChainLD->getChain().getNode()))
13024 Queue.push_back(ChainLD->getChain().getNode());
13025 } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
13026 for (const SDUse &O : ChainNext->ops())
13027 if (!Visited.count(O.getNode()))
13028 Queue.push_back(O.getNode());
13029 } else
13030 LoadRoots.insert(ChainNext);
13031 }
13032
13033 // Second, search down the chain, starting from the top-level nodes recorded
13034 // in the first phase. These top-level nodes are the nodes just above all
13035 // loads and token factors. Starting with their uses, recursively look though
13036 // all loads (just the chain uses) and token factors to find a consecutive
13037 // load.
13038 Visited.clear();
13039 Queue.clear();
13040
13041 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
13042 IE = LoadRoots.end(); I != IE; ++I) {
13043 Queue.push_back(*I);
13044
13045 while (!Queue.empty()) {
13046 SDNode *LoadRoot = Queue.pop_back_val();
13047 if (!Visited.insert(LoadRoot).second)
13048 continue;
13049
13050 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
13051 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
13052 return true;
13053
13054 for (SDNode::use_iterator UI = LoadRoot->use_begin(),
13055 UE = LoadRoot->use_end(); UI != UE; ++UI)
13056 if (((isa<MemSDNode>(*UI) &&
13057 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
13058 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
13059 Queue.push_back(*UI);
13060 }
13061 }
13062
13063 return false;
13064}
13065
13066/// This function is called when we have proved that a SETCC node can be replaced
13067/// by subtraction (and other supporting instructions) so that the result of
13068/// comparison is kept in a GPR instead of CR. This function is purely for
13069/// codegen purposes and has some flags to guide the codegen process.
13070static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
13071 bool Swap, SDLoc &DL, SelectionDAG &DAG) {
13072 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.")(static_cast<void> (0));
13073
13074 // Zero extend the operands to the largest legal integer. Originally, they
13075 // must be of a strictly smaller size.
13076 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
13077 DAG.getConstant(Size, DL, MVT::i32));
13078 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
13079 DAG.getConstant(Size, DL, MVT::i32));
13080
13081 // Swap if needed. Depends on the condition code.
13082 if (Swap)
13083 std::swap(Op0, Op1);
13084
13085 // Subtract extended integers.
13086 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
13087
13088 // Move the sign bit to the least significant position and zero out the rest.
13089 // Now the least significant bit carries the result of original comparison.
13090 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
13091 DAG.getConstant(Size - 1, DL, MVT::i32));
13092 auto Final = Shifted;
13093
13094 // Complement the result if needed. Based on the condition code.
13095 if (Complement)
13096 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
13097 DAG.getConstant(1, DL, MVT::i64));
13098
13099 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
13100}
13101
13102SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
13103 DAGCombinerInfo &DCI) const {
13104 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.")(static_cast<void> (0));
13105
13106 SelectionDAG &DAG = DCI.DAG;
13107 SDLoc DL(N);
13108
13109 // Size of integers being compared has a critical role in the following
13110 // analysis, so we prefer to do this when all types are legal.
13111 if (!DCI.isAfterLegalizeDAG())
13112 return SDValue();
13113
13114 // If all users of SETCC extend its value to a legal integer type
13115 // then we replace SETCC with a subtraction
13116 for (SDNode::use_iterator UI = N->use_begin(),
13117 UE = N->use_end(); UI != UE; ++UI) {
13118 if (UI->getOpcode() != ISD::ZERO_EXTEND)
13119 return SDValue();
13120 }
13121
13122 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13123 auto OpSize = N->getOperand(0).getValueSizeInBits();
13124
13125 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
13126
13127 if (OpSize < Size) {
13128 switch (CC) {
13129 default: break;
13130 case ISD::SETULT:
13131 return generateEquivalentSub(N, Size, false, false, DL, DAG);
13132 case ISD::SETULE:
13133 return generateEquivalentSub(N, Size, true, true, DL, DAG);
13134 case ISD::SETUGT:
13135 return generateEquivalentSub(N, Size, false, true, DL, DAG);
13136 case ISD::SETUGE:
13137 return generateEquivalentSub(N, Size, true, false, DL, DAG);
13138 }
13139 }
13140
13141 return SDValue();
13142}
13143
13144SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
13145 DAGCombinerInfo &DCI) const {
13146 SelectionDAG &DAG = DCI.DAG;
13147 SDLoc dl(N);
13148
13149 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits")(static_cast<void> (0));
13150 // If we're tracking CR bits, we need to be careful that we don't have:
13151 // trunc(binary-ops(zext(x), zext(y)))
13152 // or
13153 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
13154 // such that we're unnecessarily moving things into GPRs when it would be
13155 // better to keep them in CR bits.
13156
13157 // Note that trunc here can be an actual i1 trunc, or can be the effective
13158 // truncation that comes from a setcc or select_cc.
13159 if (N->getOpcode() == ISD::TRUNCATE &&
13160 N->getValueType(0) != MVT::i1)
13161 return SDValue();
13162
13163 if (N->getOperand(0).getValueType() != MVT::i32 &&
13164 N->getOperand(0).getValueType() != MVT::i64)
13165 return SDValue();
13166
13167 if (N->getOpcode() == ISD::SETCC ||
13168 N->getOpcode() == ISD::SELECT_CC) {
13169 // If we're looking at a comparison, then we need to make sure that the
13170 // high bits (all except for the first) don't matter the result.
13171 ISD::CondCode CC =
13172 cast<CondCodeSDNode>(N->getOperand(
13173 N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
13174 unsigned OpBits = N->getOperand(0).getValueSizeInBits();
13175
13176 if (ISD::isSignedIntSetCC(CC)) {
13177 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
13178 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
13179 return SDValue();
13180 } else if (ISD::isUnsignedIntSetCC(CC)) {
13181 if (!DAG.MaskedValueIsZero(N->getOperand(0),
13182 APInt::getHighBitsSet(OpBits, OpBits-1)) ||
13183 !DAG.MaskedValueIsZero(N->getOperand(1),
13184 APInt::getHighBitsSet(OpBits, OpBits-1)))
13185 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
13186 : SDValue());
13187 } else {
13188 // This is neither a signed nor an unsigned comparison, just make sure
13189 // that the high bits are equal.
13190 KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
13191 KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
13192
13193 // We don't really care about what is known about the first bit (if
13194 // anything), so pretend that it is known zero for both to ensure they can
13195 // be compared as constants.
13196 Op1Known.Zero.setBit(0); Op1Known.One.clearBit(0);
13197 Op2Known.Zero.setBit(0); Op2Known.One.clearBit(0);
13198
13199 if (!Op1Known.isConstant() || !Op2Known.isConstant() ||
13200 Op1Known.getConstant() != Op2Known.getConstant())
13201 return SDValue();
13202 }
13203 }
13204
13205 // We now know that the higher-order bits are irrelevant, we just need to
13206 // make sure that all of the intermediate operations are bit operations, and
13207 // all inputs are extensions.
13208 if (N->getOperand(0).getOpcode() != ISD::AND &&
13209 N->getOperand(0).getOpcode() != ISD::OR &&
13210 N->getOperand(0).getOpcode() != ISD::XOR &&
13211 N->getOperand(0).getOpcode() != ISD::SELECT &&
13212 N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
13213 N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
13214 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
13215 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
13216 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
13217 return SDValue();
13218
13219 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
13220 N->getOperand(1).getOpcode() != ISD::AND &&
13221 N->getOperand(1).getOpcode() != ISD::OR &&
13222 N->getOperand(1).getOpcode() != ISD::XOR &&
13223 N->getOperand(1).getOpcode() != ISD::SELECT &&
13224 N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
13225 N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
13226 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
13227 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
13228 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
13229 return SDValue();
13230
13231 SmallVector<SDValue, 4> Inputs;
13232 SmallVector<SDValue, 8> BinOps, PromOps;
13233 SmallPtrSet<SDNode *, 16> Visited;
13234
13235 for (unsigned i = 0; i < 2; ++i) {
13236 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13237 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13238 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
13239 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
13240 isa<ConstantSDNode>(N->getOperand(i)))
13241 Inputs.push_back(N->getOperand(i));
13242 else
13243 BinOps.push_back(N->getOperand(i));
13244
13245 if (N->getOpcode() == ISD::TRUNCATE)
13246 break;
13247 }
13248
13249 // Visit all inputs, collect all binary operations (and, or, xor and
13250 // select) that are all fed by extensions.
13251 while (!BinOps.empty()) {
13252 SDValue BinOp = BinOps.pop_back_val();
13253
13254 if (!Visited.insert(BinOp.getNode()).second)
13255 continue;
13256
13257 PromOps.push_back(BinOp);
13258
13259 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13260 // The condition of the select is not promoted.
13261 if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13262 continue;
13263 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13264 continue;
13265
13266 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13267 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13268 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
13269 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
13270 isa<ConstantSDNode>(BinOp.getOperand(i))) {
13271 Inputs.push_back(BinOp.getOperand(i));
13272 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13273 BinOp.getOperand(i).getOpcode() == ISD::OR ||
13274 BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13275 BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13276 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
13277 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13278 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13279 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13280 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
13281 BinOps.push_back(BinOp.getOperand(i));
13282 } else {
13283 // We have an input that is not an extension or another binary
13284 // operation; we'll abort this transformation.
13285 return SDValue();
13286 }
13287 }
13288 }
13289
13290 // Make sure that this is a self-contained cluster of operations (which
13291 // is not quite the same thing as saying that everything has only one
13292 // use).
13293 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13294 if (isa<ConstantSDNode>(Inputs[i]))
13295 continue;
13296
13297 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13298 UE = Inputs[i].getNode()->use_end();
13299 UI != UE; ++UI) {
13300 SDNode *User = *UI;
13301 if (User != N && !Visited.count(User))
13302 return SDValue();
13303
13304 // Make sure that we're not going to promote the non-output-value
13305 // operand(s) or SELECT or SELECT_CC.
13306 // FIXME: Although we could sometimes handle this, and it does occur in
13307 // practice that one of the condition inputs to the select is also one of
13308 // the outputs, we currently can't deal with this.
13309 if (User->getOpcode() == ISD::SELECT) {
13310 if (User->getOperand(0) == Inputs[i])
13311 return SDValue();
13312 } else if (User->getOpcode() == ISD::SELECT_CC) {
13313 if (User->getOperand(0) == Inputs[i] ||
13314 User->getOperand(1) == Inputs[i])
13315 return SDValue();
13316 }
13317 }
13318 }
13319
13320 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13321 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13322 UE = PromOps[i].getNode()->use_end();
13323 UI != UE; ++UI) {
13324 SDNode *User = *UI;
13325 if (User != N && !Visited.count(User))
13326 return SDValue();
13327
13328 // Make sure that we're not going to promote the non-output-value
13329 // operand(s) or SELECT or SELECT_CC.
13330 // FIXME: Although we could sometimes handle this, and it does occur in
13331 // practice that one of the condition inputs to the select is also one of
13332 // the outputs, we currently can't deal with this.
13333 if (User->getOpcode() == ISD::SELECT) {
13334 if (User->getOperand(0) == PromOps[i])
13335 return SDValue();
13336 } else if (User->getOpcode() == ISD::SELECT_CC) {
13337 if (User->getOperand(0) == PromOps[i] ||
13338 User->getOperand(1) == PromOps[i])
13339 return SDValue();
13340 }
13341 }
13342 }
13343
13344 // Replace all inputs with the extension operand.
13345 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13346 // Constants may have users outside the cluster of to-be-promoted nodes,
13347 // and so we need to replace those as we do the promotions.
13348 if (isa<ConstantSDNode>(Inputs[i]))
13349 continue;
13350 else
13351 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
13352 }
13353
13354 std::list<HandleSDNode> PromOpHandles;
13355 for (auto &PromOp : PromOps)
13356 PromOpHandles.emplace_back(PromOp);
13357
13358 // Replace all operations (these are all the same, but have a different
13359 // (i1) return type). DAG.getNode will validate that the types of
13360 // a binary operator match, so go through the list in reverse so that
13361 // we've likely promoted both operands first. Any intermediate truncations or
13362 // extensions disappear.
13363 while (!PromOpHandles.empty()) {
13364 SDValue PromOp = PromOpHandles.back().getValue();
13365 PromOpHandles.pop_back();
13366
13367 if (PromOp.getOpcode() == ISD::TRUNCATE ||
13368 PromOp.getOpcode() == ISD::SIGN_EXTEND ||
13369 PromOp.getOpcode() == ISD::ZERO_EXTEND ||
13370 PromOp.getOpcode() == ISD::ANY_EXTEND) {
13371 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
13372 PromOp.getOperand(0).getValueType() != MVT::i1) {
13373 // The operand is not yet ready (see comment below).
13374 PromOpHandles.emplace_front(PromOp);
13375 continue;
13376 }
13377
13378 SDValue RepValue = PromOp.getOperand(0);
13379 if (isa<ConstantSDNode>(RepValue))
13380 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
13381
13382 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
13383 continue;
13384 }
13385
13386 unsigned C;
13387 switch (PromOp.getOpcode()) {
13388 default: C = 0; break;
13389 case ISD::SELECT: C = 1; break;
13390 case ISD::SELECT_CC: C = 2; break;
13391 }
13392
13393 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13394 PromOp.getOperand(C).getValueType() != MVT::i1) ||
13395 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13396 PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
13397 // The to-be-promoted operands of this node have not yet been
13398 // promoted (this should be rare because we're going through the
13399 // list backward, but if one of the operands has several users in
13400 // this cluster of to-be-promoted nodes, it is possible).
13401 PromOpHandles.emplace_front(PromOp);
13402 continue;
13403 }
13404
13405 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13406 PromOp.getNode()->op_end());
13407
13408 // If there are any constant inputs, make sure they're replaced now.
13409 for (unsigned i = 0; i < 2; ++i)
13410 if (isa<ConstantSDNode>(Ops[C+i]))
13411 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
13412
13413 DAG.ReplaceAllUsesOfValueWith(PromOp,
13414 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
13415 }
13416
13417 // Now we're left with the initial truncation itself.
13418 if (N->getOpcode() == ISD::TRUNCATE)
13419 return N->getOperand(0);
13420
13421 // Otherwise, this is a comparison. The operands to be compared have just
13422 // changed type (to i1), but everything else is the same.
13423 return SDValue(N, 0);
13424}
13425
13426SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
13427 DAGCombinerInfo &DCI) const {
13428 SelectionDAG &DAG = DCI.DAG;
13429 SDLoc dl(N);
13430
13431 // If we're tracking CR bits, we need to be careful that we don't have:
13432 // zext(binary-ops(trunc(x), trunc(y)))
13433 // or
13434 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
13435 // such that we're unnecessarily moving things into CR bits that can more
13436 // efficiently stay in GPRs. Note that if we're not certain that the high
13437 // bits are set as required by the final extension, we still may need to do
13438 // some masking to get the proper behavior.
13439
13440 // This same functionality is important on PPC64 when dealing with
13441 // 32-to-64-bit extensions; these occur often when 32-bit values are used as
13442 // the return values of functions. Because it is so similar, it is handled
13443 // here as well.
13444
13445 if (N->getValueType(0) != MVT::i32 &&
13446 N->getValueType(0) != MVT::i64)
13447 return SDValue();
13448
13449 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
13450 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
13451 return SDValue();
13452
13453 if (N->getOperand(0).getOpcode() != ISD::AND &&
13454 N->getOperand(0).getOpcode() != ISD::OR &&
13455 N->getOperand(0).getOpcode() != ISD::XOR &&
13456 N->getOperand(0).getOpcode() != ISD::SELECT &&
13457 N->getOperand(0).getOpcode() != ISD::SELECT_CC)
13458 return SDValue();
13459
13460 SmallVector<SDValue, 4> Inputs;
13461 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
13462 SmallPtrSet<SDNode *, 16> Visited;
13463
13464 // Visit all inputs, collect all binary operations (and, or, xor and
13465 // select) that are all fed by truncations.
13466 while (!BinOps.empty()) {
13467 SDValue BinOp = BinOps.pop_back_val();
13468
13469 if (!Visited.insert(BinOp.getNode()).second)
13470 continue;
13471
13472 PromOps.push_back(BinOp);
13473
13474 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13475 // The condition of the select is not promoted.
13476 if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13477 continue;
13478 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13479 continue;
13480
13481 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13482 isa<ConstantSDNode>(BinOp.getOperand(i))) {
13483 Inputs.push_back(BinOp.getOperand(i));
13484 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13485 BinOp.getOperand(i).getOpcode() == ISD::OR ||
13486 BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13487 BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13488 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
13489 BinOps.push_back(BinOp.getOperand(i));
13490 } else {
13491 // We have an input that is not a truncation or another binary
13492 // operation; we'll abort this transformation.
13493 return SDValue();
13494 }
13495 }
13496 }
13497
13498 // The operands of a select that must be truncated when the select is
13499 // promoted because the operand is actually part of the to-be-promoted set.
13500 DenseMap<SDNode *, EVT> SelectTruncOp[2];
13501
13502 // Make sure that this is a self-contained cluster of operations (which
13503 // is not quite the same thing as saying that everything has only one
13504 // use).
13505 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13506 if (isa<ConstantSDNode>(Inputs[i]))
13507 continue;
13508
13509 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13510 UE = Inputs[i].getNode()->use_end();
13511 UI != UE; ++UI) {
13512 SDNode *User = *UI;
13513 if (User != N && !Visited.count(User))
13514 return SDValue();
13515
13516 // If we're going to promote the non-output-value operand(s) or SELECT or
13517 // SELECT_CC, record them for truncation.
13518 if (User->getOpcode() == ISD::SELECT) {
13519 if (User->getOperand(0) == Inputs[i])
13520 SelectTruncOp[0].insert(std::make_pair(User,
13521 User->getOperand(0).getValueType()));
13522 } else if (User->getOpcode() == ISD::SELECT_CC) {
13523 if (User->getOperand(0) == Inputs[i])
13524 SelectTruncOp[0].insert(std::make_pair(User,
13525 User->getOperand(0).getValueType()));
13526 if (User->getOperand(1) == Inputs[i])
13527 SelectTruncOp[1].insert(std::make_pair(User,
13528 User->getOperand(1).getValueType()));
13529 }
13530 }
13531 }
13532
13533 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13534 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13535 UE = PromOps[i].getNode()->use_end();
13536 UI != UE; ++UI) {
13537 SDNode *User = *UI;
13538 if (User != N && !Visited.count(User))
13539 return SDValue();
13540
13541 // If we're going to promote the non-output-value operand(s) or SELECT or
13542 // SELECT_CC, record them for truncation.
13543 if (User->getOpcode() == ISD::SELECT) {
13544 if (User->getOperand(0) == PromOps[i])
13545 SelectTruncOp[0].insert(std::make_pair(User,
13546 User->getOperand(0).getValueType()));
13547 } else if (User->getOpcode() == ISD::SELECT_CC) {
13548 if (User->getOperand(0) == PromOps[i])
13549 SelectTruncOp[0].insert(std::make_pair(User,
13550 User->getOperand(0).getValueType()));
13551 if (User->getOperand(1) == PromOps[i])
13552 SelectTruncOp[1].insert(std::make_pair(User,
13553 User->getOperand(1).getValueType()));
13554 }
13555 }
13556 }
13557
13558 unsigned PromBits = N->getOperand(0).getValueSizeInBits();
13559 bool ReallyNeedsExt = false;
13560 if (N->getOpcode() != ISD::ANY_EXTEND) {
13561 // If all of the inputs are not already sign/zero extended, then
13562 // we'll still need to do that at the end.
13563 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13564 if (isa<ConstantSDNode>(Inputs[i]))
13565 continue;
13566
13567 unsigned OpBits =
13568 Inputs[i].getOperand(0).getValueSizeInBits();
13569 assert(PromBits < OpBits && "Truncation not to a smaller bit count?")(static_cast<void> (0));
13570
13571 if ((N->getOpcode() == ISD::ZERO_EXTEND &&
13572 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
13573 APInt::getHighBitsSet(OpBits,
13574 OpBits-PromBits))) ||
13575 (N->getOpcode() == ISD::SIGN_EXTEND &&
13576 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
13577 (OpBits-(PromBits-1)))) {
13578 ReallyNeedsExt = true;
13579 break;
13580 }
13581 }
13582 }
13583
13584 // Replace all inputs, either with the truncation operand, or a
13585 // truncation or extension to the final output type.
13586 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13587 // Constant inputs need to be replaced with the to-be-promoted nodes that
13588 // use them because they might have users outside of the cluster of
13589 // promoted nodes.
13590 if (isa<ConstantSDNode>(Inputs[i]))
13591 continue;
13592
13593 SDValue InSrc = Inputs[i].getOperand(0);
13594 if (Inputs[i].getValueType() == N->getValueType(0))
13595 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
13596 else if (N->getOpcode() == ISD::SIGN_EXTEND)
13597 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13598 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
13599 else if (N->getOpcode() == ISD::ZERO_EXTEND)
13600 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13601 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
13602 else
13603 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13604 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
13605 }
13606
13607 std::list<HandleSDNode> PromOpHandles;
13608 for (auto &PromOp : PromOps)
13609 PromOpHandles.emplace_back(PromOp);
13610
13611 // Replace all operations (these are all the same, but have a different
13612 // (promoted) return type). DAG.getNode will validate that the types of
13613 // a binary operator match, so go through the list in reverse so that
13614 // we've likely promoted both operands first.
13615 while (!PromOpHandles.empty()) {
13616 SDValue PromOp = PromOpHandles.back().getValue();
13617 PromOpHandles.pop_back();
13618
13619 unsigned C;
13620 switch (PromOp.getOpcode()) {
13621 default: C = 0; break;
13622 case ISD::SELECT: C = 1; break;
13623 case ISD::SELECT_CC: C = 2; break;
13624 }
13625
13626 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13627 PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
13628 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13629 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
13630 // The to-be-promoted operands of this node have not yet been
13631 // promoted (this should be rare because we're going through the
13632 // list backward, but if one of the operands has several users in
13633 // this cluster of to-be-promoted nodes, it is possible).
13634 PromOpHandles.emplace_front(PromOp);
13635 continue;
13636 }
13637
13638 // For SELECT and SELECT_CC nodes, we do a similar check for any
13639 // to-be-promoted comparison inputs.
13640 if (PromOp.getOpcode() == ISD::SELECT ||
13641 PromOp.getOpcode() == ISD::SELECT_CC) {
13642 if ((SelectTruncOp[0].count(PromOp.getNode()) &&
13643 PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
13644 (SelectTruncOp[1].count(PromOp.getNode()) &&
13645 PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
13646 PromOpHandles.emplace_front(PromOp);
13647 continue;
13648 }
13649 }
13650
13651 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13652 PromOp.getNode()->op_end());
13653
13654 // If this node has constant inputs, then they'll need to be promoted here.
13655 for (unsigned i = 0; i < 2; ++i) {
13656 if (!isa<ConstantSDNode>(Ops[C+i]))
13657 continue;
13658 if (Ops[C+i].getValueType() == N->getValueType(0))
13659 continue;
13660
13661 if (N->getOpcode() == ISD::SIGN_EXTEND)
13662 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13663 else if (N->getOpcode() == ISD::ZERO_EXTEND)
13664 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13665 else
13666 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13667 }
13668
13669 // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
13670 // truncate them again to the original value type.
13671 if (PromOp.getOpcode() == ISD::SELECT ||
13672 PromOp.getOpcode() == ISD::SELECT_CC) {
13673 auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
13674 if (SI0 != SelectTruncOp[0].end())
13675 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
13676 auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
13677 if (SI1 != SelectTruncOp[1].end())
13678 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
13679 }
13680
13681 DAG.ReplaceAllUsesOfValueWith(PromOp,
13682 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
13683 }
13684
13685 // Now we're left with the initial extension itself.
13686 if (!ReallyNeedsExt)
13687 return N->getOperand(0);
13688
13689 // To zero extend, just mask off everything except for the first bit (in the
13690 // i1 case).
13691 if (N->getOpcode() == ISD::ZERO_EXTEND)
13692 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
13693 DAG.getConstant(APInt::getLowBitsSet(
13694 N->getValueSizeInBits(0), PromBits),
13695 dl, N->getValueType(0)));
13696
13697 assert(N->getOpcode() == ISD::SIGN_EXTEND &&(static_cast<void> (0))
13698 "Invalid extension type")(static_cast<void> (0));
13699 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
13700 SDValue ShiftCst =
13701 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
13702 return DAG.getNode(
13703 ISD::SRA, dl, N->getValueType(0),
13704 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
13705 ShiftCst);
13706}
13707
13708SDValue PPCTargetLowering::combineSetCC(SDNode *N,
13709 DAGCombinerInfo &DCI) const {
13710 assert(N->getOpcode() == ISD::SETCC &&(static_cast<void> (0))
13711 "Should be called with a SETCC node")(static_cast<void> (0));
13712
13713 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13714 if (CC == ISD::SETNE || CC == ISD::SETEQ) {
13715 SDValue LHS = N->getOperand(0);
13716 SDValue RHS = N->getOperand(1);
13717
13718 // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
13719 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
13720 LHS.hasOneUse())
13721 std::swap(LHS, RHS);
13722
13723 // x == 0-y --> x+y == 0
13724 // x != 0-y --> x+y != 0
13725 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
13726 RHS.hasOneUse()) {
13727 SDLoc DL(N);
13728 SelectionDAG &DAG = DCI.DAG;
13729 EVT VT = N->getValueType(0);
13730 EVT OpVT = LHS.getValueType();
13731 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
13732 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
13733 }
13734 }
13735
13736 return DAGCombineTruncBoolExt(N, DCI);
13737}
13738
13739// Is this an extending load from an f32 to an f64?
13740static bool isFPExtLoad(SDValue Op) {
13741 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
13742 return LD->getExtensionType() == ISD::EXTLOAD &&
13743 Op.getValueType() == MVT::f64;
13744 return false;
13745}
13746
13747/// Reduces the number of fp-to-int conversion when building a vector.
13748///
13749/// If this vector is built out of floating to integer conversions,
13750/// transform it to a vector built out of floating point values followed by a
13751/// single floating to integer conversion of the vector.
13752/// Namely (build_vector (fptosi $A), (fptosi $B), ...)
13753/// becomes (fptosi (build_vector ($A, $B, ...)))
13754SDValue PPCTargetLowering::
13755combineElementTruncationToVectorTruncation(SDNode *N,
13756 DAGCombinerInfo &DCI) const {
13757 assert(N->getOpcode() == ISD::BUILD_VECTOR &&(static_cast<void> (0))
13758 "Should be called with a BUILD_VECTOR node")(static_cast<void> (0));
13759
13760 SelectionDAG &DAG = DCI.DAG;
13761 SDLoc dl(N);
13762
13763 SDValue FirstInput = N->getOperand(0);
13764 assert(FirstInput.getOpcode() == PPCISD::MFVSR &&(static_cast<void> (0))
13765 "The input operand must be an fp-to-int conversion.")(static_cast<void> (0));
13766
13767 // This combine happens after legalization so the fp_to_[su]i nodes are
13768 // already converted to PPCSISD nodes.
13769 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
13770 if (FirstConversion == PPCISD::FCTIDZ ||
13771 FirstConversion == PPCISD::FCTIDUZ ||
13772 FirstConversion == PPCISD::FCTIWZ ||
13773 FirstConversion == PPCISD::FCTIWUZ) {
13774 bool IsSplat = true;
13775 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
13776 FirstConversion == PPCISD::FCTIWUZ;
13777 EVT SrcVT = FirstInput.getOperand(0).getValueType();
13778 SmallVector<SDValue, 4> Ops;
13779 EVT TargetVT = N->getValueType(0);
13780 for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13781 SDValue NextOp = N->getOperand(i);
13782 if (NextOp.getOpcode() != PPCISD::MFVSR)
13783 return SDValue();
13784 unsigned NextConversion = NextOp.getOperand(0).getOpcode();
13785 if (NextConversion != FirstConversion)
13786 return SDValue();
13787 // If we are converting to 32-bit integers, we need to add an FP_ROUND.
13788 // This is not valid if the input was originally double precision. It is
13789 // also not profitable to do unless this is an extending load in which
13790 // case doing this combine will allow us to combine consecutive loads.
13791 if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
13792 return SDValue();
13793 if (N->getOperand(i) != FirstInput)
13794 IsSplat = false;
13795 }
13796
13797 // If this is a splat, we leave it as-is since there will be only a single
13798 // fp-to-int conversion followed by a splat of the integer. This is better
13799 // for 32-bit and smaller ints and neutral for 64-bit ints.
13800 if (IsSplat)
13801 return SDValue();
13802
13803 // Now that we know we have the right type of node, get its operands
13804 for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13805 SDValue In = N->getOperand(i).getOperand(0);
13806 if (Is32Bit) {
13807 // For 32-bit values, we need to add an FP_ROUND node (if we made it
13808 // here, we know that all inputs are extending loads so this is safe).
13809 if (In.isUndef())
13810 Ops.push_back(DAG.getUNDEF(SrcVT));
13811 else {
13812 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
13813 MVT::f32, In.getOperand(0),
13814 DAG.getIntPtrConstant(1, dl));
13815 Ops.push_back(Trunc);
13816 }
13817 } else
13818 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
13819 }
13820
13821 unsigned Opcode;
13822 if (FirstConversion == PPCISD::FCTIDZ ||
13823 FirstConversion == PPCISD::FCTIWZ)
13824 Opcode = ISD::FP_TO_SINT;
13825 else
13826 Opcode = ISD::FP_TO_UINT;
13827
13828 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
13829 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
13830 return DAG.getNode(Opcode, dl, TargetVT, BV);
13831 }
13832 return SDValue();
13833}
13834
13835/// Reduce the number of loads when building a vector.
13836///
13837/// Building a vector out of multiple loads can be converted to a load
13838/// of the vector type if the loads are consecutive. If the loads are
13839/// consecutive but in descending order, a shuffle is added at the end
13840/// to reorder the vector.
13841static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
13842 assert(N->getOpcode() == ISD::BUILD_VECTOR &&(static_cast<void> (0))
13843 "Should be called with a BUILD_VECTOR node")(static_cast<void> (0));
13844
13845 SDLoc dl(N);
13846
13847 // Return early for non byte-sized type, as they can't be consecutive.
13848 if (!N->getValueType(0).getVectorElementType().isByteSized())
13849 return SDValue();
13850
13851 bool InputsAreConsecutiveLoads = true;
13852 bool InputsAreReverseConsecutive = true;
13853 unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
13854 SDValue FirstInput = N->getOperand(0);
13855 bool IsRoundOfExtLoad = false;
13856
13857 if (FirstInput.getOpcode() == ISD::FP_ROUND &&
13858 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
13859 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
13860 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
13861 }
13862 // Not a build vector of (possibly fp_rounded) loads.
13863 if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
13864 N->getNumOperands() == 1)
13865 return SDValue();
13866
13867 for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
13868 // If any inputs are fp_round(extload), they all must be.
13869 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
13870 return SDValue();
13871
13872 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
13873 N->getOperand(i);
13874 if (NextInput.getOpcode() != ISD::LOAD)
13875 return SDValue();
13876
13877 SDValue PreviousInput =
13878 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
13879 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
13880 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
13881
13882 // If any inputs are fp_round(extload), they all must be.
13883 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
13884 return SDValue();
13885
13886 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
13887 InputsAreConsecutiveLoads = false;
13888 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
13889 InputsAreReverseConsecutive = false;
13890
13891 // Exit early if the loads are neither consecutive nor reverse consecutive.
13892 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
13893 return SDValue();
13894 }
13895
13896 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&(static_cast<void> (0))
13897 "The loads cannot be both consecutive and reverse consecutive.")(static_cast<void> (0));
13898
13899 SDValue FirstLoadOp =
13900 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
13901 SDValue LastLoadOp =
13902 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
13903 N->getOperand(N->getNumOperands()-1);
13904
13905 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
13906 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
13907 if (InputsAreConsecutiveLoads) {
13908 assert(LD1 && "Input needs to be a LoadSDNode.")(static_cast<void> (0));
13909 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
13910 LD1->getBasePtr(), LD1->getPointerInfo(),
13911 LD1->getAlignment());
13912 }
13913 if (InputsAreReverseConsecutive) {
13914 assert(LDL && "Input needs to be a LoadSDNode.")(static_cast<void> (0));
13915 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
13916 LDL->getBasePtr(), LDL->getPointerInfo(),
13917 LDL->getAlignment());
13918 SmallVector<int, 16> Ops;
13919 for (int i = N->getNumOperands() - 1; i >= 0; i--)
13920 Ops.push_back(i);
13921
13922 return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
13923 DAG.getUNDEF(N->getValueType(0)), Ops);
13924 }
13925 return SDValue();
13926}
13927
13928// This function adds the required vector_shuffle needed to get
13929// the elements of the vector extract in the correct position
13930// as specified by the CorrectElems encoding.
13931static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
13932 SDValue Input, uint64_t Elems,
13933 uint64_t CorrectElems) {
13934 SDLoc dl(N);
13935
13936 unsigned NumElems = Input.getValueType().getVectorNumElements();
13937 SmallVector<int, 16> ShuffleMask(NumElems, -1);
13938
13939 // Knowing the element indices being extracted from the original
13940 // vector and the order in which they're being inserted, just put
13941 // them at element indices required for the instruction.
13942 for (unsigned i = 0; i < N->getNumOperands(); i++) {
13943 if (DAG.getDataLayout().isLittleEndian())
13944 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
13945 else
13946 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
13947 CorrectElems = CorrectElems >> 8;
13948 Elems = Elems >> 8;
13949 }
13950
13951 SDValue Shuffle =
13952 DAG.getVectorShuffle(Input.getValueType(), dl, Input,
13953 DAG.getUNDEF(Input.getValueType()), ShuffleMask);
13954
13955 EVT VT = N->getValueType(0);
13956 SDValue Conv = DAG.getBitcast(VT, Shuffle);
13957
13958 EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
13959 Input.getValueType().getVectorElementType(),
13960 VT.getVectorNumElements());
13961 return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv,
13962 DAG.getValueType(ExtVT));
13963}
13964
13965// Look for build vector patterns where input operands come from sign
13966// extended vector_extract elements of specific indices. If the correct indices
13967// aren't used, add a vector shuffle to fix up the indices and create
13968// SIGN_EXTEND_INREG node which selects the vector sign extend instructions
13969// during instruction selection.
13970static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
13971 // This array encodes the indices that the vector sign extend instructions
13972 // extract from when extending from one type to another for both BE and LE.
13973 // The right nibble of each byte corresponds to the LE incides.
13974 // and the left nibble of each byte corresponds to the BE incides.
13975 // For example: 0x3074B8FC byte->word
13976 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
13977 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
13978 // For example: 0x000070F8 byte->double word
13979 // For LE: the allowed indices are: 0x0,0x8
13980 // For BE: the allowed indices are: 0x7,0xF
13981 uint64_t TargetElems[] = {
13982 0x3074B8FC, // b->w
13983 0x000070F8, // b->d
13984 0x10325476, // h->w
13985 0x00003074, // h->d
13986 0x00001032, // w->d
13987 };
13988
13989 uint64_t Elems = 0;
13990 int Index;
13991 SDValue Input;
13992
13993 auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
13994 if (!Op)
13995 return false;
13996 if (Op.getOpcode() != ISD::SIGN_EXTEND &&
13997 Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
13998 return false;
13999
14000 // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
14001 // of the right width.
14002 SDValue Extract = Op.getOperand(0);
14003 if (Extract.getOpcode() == ISD::ANY_EXTEND)
14004 Extract = Extract.getOperand(0);
14005 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14006 return false;
14007
14008 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
14009 if (!ExtOp)
14010 return false;
14011
14012 Index = ExtOp->getZExtValue();
14013 if (Input && Input != Extract.getOperand(0))
14014 return false;
14015
14016 if (!Input)
14017 Input = Extract.getOperand(0);
14018
14019 Elems = Elems << 8;
14020 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
14021 Elems |= Index;
14022
14023 return true;
14024 };
14025
14026 // If the build vector operands aren't sign extended vector extracts,
14027 // of the same input vector, then return.
14028 for (unsigned i = 0; i < N->getNumOperands(); i++) {
14029 if (!isSExtOfVecExtract(N->getOperand(i))) {
14030 return SDValue();
14031 }
14032 }
14033
14034 // If the vector extract indicies are not correct, add the appropriate
14035 // vector_shuffle.
14036 int TgtElemArrayIdx;
14037 int InputSize = Input.getValueType().getScalarSizeInBits();
14038 int OutputSize = N->getValueType(0).getScalarSizeInBits();
14039 if (InputSize + OutputSize == 40)
14040 TgtElemArrayIdx = 0;
14041 else if (InputSize + OutputSize == 72)
14042 TgtElemArrayIdx = 1;
14043 else if (InputSize + OutputSize == 48)
14044 TgtElemArrayIdx = 2;
14045 else if (InputSize + OutputSize == 80)
14046 TgtElemArrayIdx = 3;
14047 else if (InputSize + OutputSize == 96)
14048 TgtElemArrayIdx = 4;
14049 else
14050 return SDValue();
14051
14052 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
14053 CorrectElems = DAG.getDataLayout().isLittleEndian()
14054 ? CorrectElems & 0x0F0F0F0F0F0F0F0F
14055 : CorrectElems & 0xF0F0F0F0F0F0F0F0;
14056 if (Elems != CorrectElems) {
14057 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
14058 }
14059
14060 // Regular lowering will catch cases where a shuffle is not needed.
14061 return SDValue();
14062}
14063
14064// Look for the pattern of a load from a narrow width to i128, feeding
14065// into a BUILD_VECTOR of v1i128. Replace this sequence with a PPCISD node
14066// (LXVRZX). This node represents a zero extending load that will be matched
14067// to the Load VSX Vector Rightmost instructions.
14068static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG) {
14069 SDLoc DL(N);
14070
14071 // This combine is only eligible for a BUILD_VECTOR of v1i128.
14072 if (N->getValueType(0) != MVT::v1i128)
14073 return SDValue();
14074
14075 SDValue Operand = N->getOperand(0);
14076 // Proceed with the transformation if the operand to the BUILD_VECTOR
14077 // is a load instruction.
14078 if (Operand.getOpcode() != ISD::LOAD)
14079 return SDValue();
14080
14081 auto *LD = cast<LoadSDNode>(Operand);
14082 EVT MemoryType = LD->getMemoryVT();
14083
14084 // This transformation is only valid if the we are loading either a byte,
14085 // halfword, word, or doubleword.
14086 bool ValidLDType = MemoryType == MVT::i8 || MemoryType == MVT::i16 ||
14087 MemoryType == MVT::i32 || MemoryType == MVT::i64;
14088
14089 // Ensure that the load from the narrow width is being zero extended to i128.
14090 if (!ValidLDType ||
14091 (LD->getExtensionType() != ISD::ZEXTLOAD &&
14092 LD->getExtensionType() != ISD::EXTLOAD))
14093 return SDValue();
14094
14095 SDValue LoadOps[] = {
14096 LD->getChain(), LD->getBasePtr(),
14097 DAG.getIntPtrConstant(MemoryType.getScalarSizeInBits(), DL)};
14098
14099 return DAG.getMemIntrinsicNode(PPCISD::LXVRZX, DL,
14100 DAG.getVTList(MVT::v1i128, MVT::Other),
14101 LoadOps, MemoryType, LD->getMemOperand());
14102}
14103
14104SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
14105 DAGCombinerInfo &DCI) const {
14106 assert(N->getOpcode() == ISD::BUILD_VECTOR &&(static_cast<void> (0))
14107 "Should be called with a BUILD_VECTOR node")(static_cast<void> (0));
14108
14109 SelectionDAG &DAG = DCI.DAG;
14110 SDLoc dl(N);
14111
14112 if (!Subtarget.hasVSX())
14113 return SDValue();
14114
14115 // The target independent DAG combiner will leave a build_vector of
14116 // float-to-int conversions intact. We can generate MUCH better code for
14117 // a float-to-int conversion of a vector of floats.
14118 SDValue FirstInput = N->getOperand(0);
14119 if (FirstInput.getOpcode() == PPCISD::MFVSR) {
14120 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
14121 if (Reduced)
14122 return Reduced;
14123 }
14124
14125 // If we're building a vector out of consecutive loads, just load that
14126 // vector type.
14127 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
14128 if (Reduced)
14129 return Reduced;
14130
14131 // If we're building a vector out of extended elements from another vector
14132 // we have P9 vector integer extend instructions. The code assumes legal
14133 // input types (i.e. it can't handle things like v4i16) so do not run before
14134 // legalization.
14135 if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
14136 Reduced = combineBVOfVecSExt(N, DAG);
14137 if (Reduced)
14138 return Reduced;
14139 }
14140
14141 // On Power10, the Load VSX Vector Rightmost instructions can be utilized
14142 // if this is a BUILD_VECTOR of v1i128, and if the operand to the BUILD_VECTOR
14143 // is a load from <valid narrow width> to i128.
14144 if (Subtarget.isISA3_1()) {
14145 SDValue BVOfZLoad = combineBVZEXTLOAD(N, DAG);
14146 if (BVOfZLoad)
14147 return BVOfZLoad;
14148 }
14149
14150 if (N->getValueType(0) != MVT::v2f64)
14151 return SDValue();
14152
14153 // Looking for:
14154 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
14155 if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
14156 FirstInput.getOpcode() != ISD::UINT_TO_FP)
14157 return SDValue();
14158 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
14159 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
14160 return SDValue();
14161 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
14162 return SDValue();
14163
14164 SDValue Ext1 = FirstInput.getOperand(0);
14165 SDValue Ext2 = N->getOperand(1).getOperand(0);
14166 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
14167 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14168 return SDValue();
14169
14170 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
14171 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
14172 if (!Ext1Op || !Ext2Op)
14173 return SDValue();
14174 if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
14175 Ext1.getOperand(0) != Ext2.getOperand(0))
14176 return SDValue();
14177
14178 int FirstElem = Ext1Op->getZExtValue();
14179 int SecondElem = Ext2Op->getZExtValue();
14180 int SubvecIdx;
14181 if (FirstElem == 0 && SecondElem == 1)
14182 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
14183 else if (FirstElem == 2 && SecondElem == 3)
14184 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
14185 else
14186 return SDValue();
14187
14188 SDValue SrcVec = Ext1.getOperand(0);
14189 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
14190 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
14191 return DAG.getNode(NodeType, dl, MVT::v2f64,
14192 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
14193}
14194
14195SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
14196 DAGCombinerInfo &DCI) const {
14197 assert((N->getOpcode() == ISD::SINT_TO_FP ||(static_cast<void> (0))
14198 N->getOpcode() == ISD::UINT_TO_FP) &&(static_cast<void> (0))
14199 "Need an int -> FP conversion node here")(static_cast<void> (0));
14200
14201 if (useSoftFloat() || !Subtarget.has64BitSupport())
14202 return SDValue();
14203
14204 SelectionDAG &DAG = DCI.DAG;
14205 SDLoc dl(N);
14206 SDValue Op(N, 0);
14207
14208 // Don't handle ppc_fp128 here or conversions that are out-of-range capable
14209 // from the hardware.
14210 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
14211 return SDValue();
14212 if (!Op.getOperand(0).getValueType().isSimple())
14213 return SDValue();
14214 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
14215 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
14216 return SDValue();
14217
14218 SDValue FirstOperand(Op.getOperand(0));
14219 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
14220 (FirstOperand.getValueType() == MVT::i8 ||
14221 FirstOperand.getValueType() == MVT::i16);
14222 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
14223 bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
14224 bool DstDouble = Op.getValueType() == MVT::f64;
14225 unsigned ConvOp = Signed ?
14226 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) :
14227 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
14228 SDValue WidthConst =
14229 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
14230 dl, false);
14231 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
14232 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
14233 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
14234 DAG.getVTList(MVT::f64, MVT::Other),
14235 Ops, MVT::i8, LDN->getMemOperand());
14236
14237 // For signed conversion, we need to sign-extend the value in the VSR
14238 if (Signed) {
14239 SDValue ExtOps[] = { Ld, WidthConst };
14240 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
14241 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
14242 } else
14243 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
14244 }
14245
14246
14247 // For i32 intermediate values, unfortunately, the conversion functions
14248 // leave the upper 32 bits of the value are undefined. Within the set of
14249 // scalar instructions, we have no method for zero- or sign-extending the
14250 // value. Thus, we cannot handle i32 intermediate values here.
14251 if (Op.getOperand(0).getValueType() == MVT::i32)
14252 return SDValue();
14253
14254 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&(static_cast<void> (0))
14255 "UINT_TO_FP is supported only with FPCVT")(static_cast<void> (0));
14256
14257 // If we have FCFIDS, then use it when converting to single-precision.
14258 // Otherwise, convert to double-precision and then round.
14259 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
14260 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
14261 : PPCISD::FCFIDS)
14262 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
14263 : PPCISD::FCFID);
14264 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
14265 ? MVT::f32
14266 : MVT::f64;
14267
14268 // If we're converting from a float, to an int, and back to a float again,
14269 // then we don't need the store/load pair at all.
14270 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
14271 Subtarget.hasFPCVT()) ||
14272 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
14273 SDValue Src = Op.getOperand(0).getOperand(0);
14274 if (Src.getValueType() == MVT::f32) {
14275 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
14276 DCI.AddToWorklist(Src.getNode());
14277 } else if (Src.getValueType() != MVT::f64) {
14278 // Make sure that we don't pick up a ppc_fp128 source value.
14279 return SDValue();
14280 }
14281
14282 unsigned FCTOp =
14283 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
14284 PPCISD::FCTIDUZ;
14285
14286 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
14287 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
14288
14289 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
14290 FP = DAG.getNode(ISD::FP_ROUND, dl,
14291 MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
14292 DCI.AddToWorklist(FP.getNode());
14293 }
14294
14295 return FP;
14296 }
14297
14298 return SDValue();
14299}
14300
14301// expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
14302// builtins) into loads with swaps.
14303SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
14304 DAGCombinerInfo &DCI) const {
14305 SelectionDAG &DAG = DCI.DAG;
14306 SDLoc dl(N);
14307 SDValue Chain;
14308 SDValue Base;
14309 MachineMemOperand *MMO;
14310
14311 switch (N->getOpcode()) {
14312 default:
14313 llvm_unreachable("Unexpected opcode for little endian VSX load")__builtin_unreachable();
14314 case ISD::LOAD: {
14315 LoadSDNode *LD = cast<LoadSDNode>(N);
14316 Chain = LD->getChain();
14317 Base = LD->getBasePtr();
14318 MMO = LD->getMemOperand();
14319 // If the MMO suggests this isn't a load of a full vector, leave
14320 // things alone. For a built-in, we have to make the change for
14321 // correctness, so if there is a size problem that will be a bug.
14322 if (MMO->getSize() < 16)
14323 return SDValue();
14324 break;
14325 }
14326 case ISD::INTRINSIC_W_CHAIN: {
14327 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14328 Chain = Intrin->getChain();
14329 // Similarly to the store case below, Intrin->getBasePtr() doesn't get
14330 // us what we want. Get operand 2 instead.
14331 Base = Intrin->getOperand(2);
14332 MMO = Intrin->getMemOperand();
14333 break;
14334 }
14335 }
14336
14337 MVT VecTy = N->getValueType(0).getSimpleVT();
14338
14339 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
14340 // aligned and the type is a vector with elements up to 4 bytes
14341 if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14342 VecTy.getScalarSizeInBits() <= 32) {
14343 return SDValue();
14344 }
14345
14346 SDValue LoadOps[] = { Chain, Base };
14347 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
14348 DAG.getVTList(MVT::v2f64, MVT::Other),
14349 LoadOps, MVT::v2f64, MMO);
14350
14351 DCI.AddToWorklist(Load.getNode());
14352 Chain = Load.getValue(1);
14353 SDValue Swap = DAG.getNode(
14354 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
14355 DCI.AddToWorklist(Swap.getNode());
14356
14357 // Add a bitcast if the resulting load type doesn't match v2f64.
14358 if (VecTy != MVT::v2f64) {
14359 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
14360 DCI.AddToWorklist(N.getNode());
14361 // Package {bitcast value, swap's chain} to match Load's shape.
14362 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
14363 N, Swap.getValue(1));
14364 }
14365
14366 return Swap;
14367}
14368
14369// expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
14370// builtins) into stores with swaps.
14371SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
14372 DAGCombinerInfo &DCI) const {
14373 SelectionDAG &DAG = DCI.DAG;
14374 SDLoc dl(N);
14375 SDValue Chain;
14376 SDValue Base;
14377 unsigned SrcOpnd;
14378 MachineMemOperand *MMO;
14379
14380 switch (N->getOpcode()) {
14381 default:
14382 llvm_unreachable("Unexpected opcode for little endian VSX store")__builtin_unreachable();
14383 case ISD::STORE: {
14384 StoreSDNode *ST = cast<StoreSDNode>(N);
14385 Chain = ST->getChain();
14386 Base = ST->getBasePtr();
14387 MMO = ST->getMemOperand();
14388 SrcOpnd = 1;
14389 // If the MMO suggests this isn't a store of a full vector, leave
14390 // things alone. For a built-in, we have to make the change for
14391 // correctness, so if there is a size problem that will be a bug.
14392 if (MMO->getSize() < 16)
14393 return SDValue();
14394 break;
14395 }
14396 case ISD::INTRINSIC_VOID: {
14397 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14398 Chain = Intrin->getChain();
14399 // Intrin->getBasePtr() oddly does not get what we want.
14400 Base = Intrin->getOperand(3);
14401 MMO = Intrin->getMemOperand();
14402 SrcOpnd = 2;
14403 break;
14404 }
14405 }
14406
14407 SDValue Src = N->getOperand(SrcOpnd);
14408 MVT VecTy = Src.getValueType().getSimpleVT();
14409
14410 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
14411 // aligned and the type is a vector with elements up to 4 bytes
14412 if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14413 VecTy.getScalarSizeInBits() <= 32) {
14414 return SDValue();
14415 }
14416
14417 // All stores are done as v2f64 and possible bit cast.
14418 if (VecTy != MVT::v2f64) {
14419 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
14420 DCI.AddToWorklist(Src.getNode());
14421 }
14422
14423 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
14424 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
14425 DCI.AddToWorklist(Swap.getNode());
14426 Chain = Swap.getValue(1);
14427 SDValue StoreOps[] = { Chain, Swap, Base };
14428 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
14429 DAG.getVTList(MVT::Other),
14430 StoreOps, VecTy, MMO);
14431 DCI.AddToWorklist(Store.getNode());
14432 return Store;
14433}
14434
14435// Handle DAG combine for STORE (FP_TO_INT F).
14436SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
14437 DAGCombinerInfo &DCI) const {
14438
14439 SelectionDAG &DAG = DCI.DAG;
14440 SDLoc dl(N);
14441 unsigned Opcode = N->getOperand(1).getOpcode();
14442
14443 assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)(static_cast<void> (0))
14444 && "Not a FP_TO_INT Instruction!")(static_cast<void> (0));
14445
14446 SDValue Val = N->getOperand(1).getOperand(0);
14447 EVT Op1VT = N->getOperand(1).getValueType();
14448 EVT ResVT = Val.getValueType();
14449
14450 if (!isTypeLegal(ResVT))
14451 return SDValue();
14452
14453 // Only perform combine for conversion to i64/i32 or power9 i16/i8.
14454 bool ValidTypeForStoreFltAsInt =
14455 (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
14456 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
14457
14458 if (ResVT == MVT::f128 && !Subtarget.hasP9Vector())
14459 return SDValue();
14460
14461 if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
14462 cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
14463 return SDValue();
14464
14465 // Extend f32 values to f64
14466 if (ResVT.getScalarSizeInBits() == 32) {
14467 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
14468 DCI.AddToWorklist(Val.getNode());
14469 }
14470
14471 // Set signed or unsigned conversion opcode.
14472 unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
14473 PPCISD::FP_TO_SINT_IN_VSR :
14474 PPCISD::FP_TO_UINT_IN_VSR;
14475
14476 Val = DAG.getNode(ConvOpcode,
14477 dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
14478 DCI.AddToWorklist(Val.getNode());
14479
14480 // Set number of bytes being converted.
14481 unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
14482 SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
14483 DAG.getIntPtrConstant(ByteSize, dl, false),
14484 DAG.getValueType(Op1VT) };
14485
14486 Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
14487 DAG.getVTList(MVT::Other), Ops,
14488 cast<StoreSDNode>(N)->getMemoryVT(),
14489 cast<StoreSDNode>(N)->getMemOperand());
14490
14491 DCI.AddToWorklist(Val.getNode());
14492 return Val;
14493}
14494
14495static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) {
14496 // Check that the source of the element keeps flipping
14497 // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts).
14498 bool PrevElemFromFirstVec = Mask[0] < NumElts;
14499 for (int i = 1, e = Mask.size(); i < e; i++) {
14500 if (PrevElemFromFirstVec && Mask[i] < NumElts)
14501 return false;
14502 if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
14503 return false;
14504 PrevElemFromFirstVec = !PrevElemFromFirstVec;
14505 }
14506 return true;
14507}
14508
14509static bool isSplatBV(SDValue Op) {
14510 if (Op.getOpcode() != ISD::BUILD_VECTOR)
14511 return false;
14512 SDValue FirstOp;
14513
14514 // Find first non-undef input.
14515 for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
14516 FirstOp = Op.getOperand(i);
14517 if (!FirstOp.isUndef())
14518 break;
14519 }
14520
14521 // All inputs are undef or the same as the first non-undef input.
14522 for (int i = 1, e = Op.getNumOperands(); i < e; i++)
14523 if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
14524 return false;
14525 return true;
14526}
14527
14528static SDValue isScalarToVec(SDValue Op) {
14529 if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14530 return Op;
14531 if (Op.getOpcode() != ISD::BITCAST)
14532 return SDValue();
14533 Op = Op.getOperand(0);
14534 if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14535 return Op;
14536 return SDValue();
14537}
14538
14539// Fix up the shuffle mask to account for the fact that the result of
14540// scalar_to_vector is not in lane zero. This just takes all values in
14541// the ranges specified by the min/max indices and adds the number of
14542// elements required to ensure each element comes from the respective
14543// position in the valid lane.
14544// On little endian, that's just the corresponding element in the other
14545// half of the vector. On big endian, it is in the same half but right
14546// justified rather than left justified in that half.
14547static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV,
14548 int LHSMaxIdx, int RHSMinIdx,
14549 int RHSMaxIdx, int HalfVec,
14550 unsigned ValidLaneWidth,
14551 const PPCSubtarget &Subtarget) {
14552 for (int i = 0, e = ShuffV.size(); i < e; i++) {
14553 int Idx = ShuffV[i];
14554 if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx))
14555 ShuffV[i] +=
14556 Subtarget.isLittleEndian() ? HalfVec : HalfVec - ValidLaneWidth;
14557 }
14558}
14559
14560// Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if
14561// the original is:
14562// (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C))))
14563// In such a case, just change the shuffle mask to extract the element
14564// from the permuted index.
14565static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG,
14566 const PPCSubtarget &Subtarget) {
14567 SDLoc dl(OrigSToV);
14568 EVT VT = OrigSToV.getValueType();
14569 assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR &&(static_cast<void> (0))
14570 "Expecting a SCALAR_TO_VECTOR here")(static_cast<void> (0));
14571 SDValue Input = OrigSToV.getOperand(0);
14572
14573 if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
14574 ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1));
14575 SDValue OrigVector = Input.getOperand(0);
14576
14577 // Can't handle non-const element indices or different vector types
14578 // for the input to the extract and the output of the scalar_to_vector.
14579 if (Idx && VT == OrigVector.getValueType()) {
14580 unsigned NumElts = VT.getVectorNumElements();
14581 assert((static_cast<void> (0))
14582 NumElts > 1 &&(static_cast<void> (0))
14583 "Cannot produce a permuted scalar_to_vector for one element vector")(static_cast<void> (0));
14584 SmallVector<int, 16> NewMask(NumElts, -1);
14585 unsigned ResultInElt = NumElts / 2;
14586 ResultInElt -= Subtarget.isLittleEndian() ? 0 : 1;
14587 NewMask[ResultInElt] = Idx->getZExtValue();
14588 return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask);
14589 }
14590 }
14591 return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT,
14592 OrigSToV.getOperand(0));
14593}
14594
14595// On little endian subtargets, combine shuffles such as:
14596// vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b
14597// into:
14598// vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b
14599// because the latter can be matched to a single instruction merge.
14600// Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute
14601// to put the value into element zero. Adjust the shuffle mask so that the
14602// vector can remain in permuted form (to prevent a swap prior to a shuffle).
14603// On big endian targets, this is still useful for SCALAR_TO_VECTOR
14604// nodes with elements smaller than doubleword because all the ways
14605// of getting scalar data into a vector register put the value in the
14606// rightmost element of the left half of the vector.
14607SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN,
14608 SelectionDAG &DAG) const {
14609 SDValue LHS = SVN->getOperand(0);
14610 SDValue RHS = SVN->getOperand(1);
14611 auto Mask = SVN->getMask();
14612 int NumElts = LHS.getValueType().getVectorNumElements();
14613 SDValue Res(SVN, 0);
14614 SDLoc dl(SVN);
14615 bool IsLittleEndian = Subtarget.isLittleEndian();
14616
14617 // On big endian targets this is only useful for subtargets with direct moves.
14618 // On little endian targets it would be useful for all subtargets with VSX.
14619 // However adding special handling for LE subtargets without direct moves
14620 // would be wasted effort since the minimum arch for LE is ISA 2.07 (Power8)
14621 // which includes direct moves.
14622 if (!Subtarget.hasDirectMove())
14623 return Res;
14624
14625 // If this is not a shuffle of a shuffle and the first element comes from
14626 // the second vector, canonicalize to the commuted form. This will make it
14627 // more likely to match one of the single instruction patterns.
14628 if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
14629 RHS.getOpcode() != ISD::VECTOR_SHUFFLE) {
14630 std::swap(LHS, RHS);
14631 Res = DAG.getCommutedVectorShuffle(*SVN);
14632 Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14633 }
14634
14635 // Adjust the shuffle mask if either input vector comes from a
14636 // SCALAR_TO_VECTOR and keep the respective input vector in permuted
14637 // form (to prevent the need for a swap).
14638 SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end());
14639 SDValue SToVLHS = isScalarToVec(LHS);
14640 SDValue SToVRHS = isScalarToVec(RHS);
14641 if (SToVLHS || SToVRHS) {
14642 int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements()
14643 : SToVRHS.getValueType().getVectorNumElements();
14644 int NumEltsOut = ShuffV.size();
14645 // The width of the "valid lane" (i.e. the lane that contains the value that
14646 // is vectorized) needs to be expressed in terms of the number of elements
14647 // of the shuffle. It is thereby the ratio of the values before and after
14648 // any bitcast.
14649 unsigned ValidLaneWidth =
14650 SToVLHS ? SToVLHS.getValueType().getScalarSizeInBits() /
14651 LHS.getValueType().getScalarSizeInBits()
14652 : SToVRHS.getValueType().getScalarSizeInBits() /
14653 RHS.getValueType().getScalarSizeInBits();
14654
14655 // Initially assume that neither input is permuted. These will be adjusted
14656 // accordingly if either input is.
14657 int LHSMaxIdx = -1;
14658 int RHSMinIdx = -1;
14659 int RHSMaxIdx = -1;
14660 int HalfVec = LHS.getValueType().getVectorNumElements() / 2;
14661
14662 // Get the permuted scalar to vector nodes for the source(s) that come from
14663 // ISD::SCALAR_TO_VECTOR.
14664 // On big endian systems, this only makes sense for element sizes smaller
14665 // than 64 bits since for 64-bit elements, all instructions already put
14666 // the value into element zero. Since scalar size of LHS and RHS may differ
14667 // after isScalarToVec, this should be checked using their own sizes.
14668 if (SToVLHS) {
14669 if (!IsLittleEndian && SToVLHS.getValueType().getScalarSizeInBits() >= 64)
14670 return Res;
14671 // Set up the values for the shuffle vector fixup.
14672 LHSMaxIdx = NumEltsOut / NumEltsIn;
14673 SToVLHS = getSToVPermuted(SToVLHS, DAG, Subtarget);
14674 if (SToVLHS.getValueType() != LHS.getValueType())
14675 SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS);
14676 LHS = SToVLHS;
14677 }
14678 if (SToVRHS) {
14679 if (!IsLittleEndian && SToVRHS.getValueType().getScalarSizeInBits() >= 64)
14680 return Res;
14681 RHSMinIdx = NumEltsOut;
14682 RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx;
14683 SToVRHS = getSToVPermuted(SToVRHS, DAG, Subtarget);
14684 if (SToVRHS.getValueType() != RHS.getValueType())
14685 SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS);
14686 RHS = SToVRHS;
14687 }
14688
14689 // Fix up the shuffle mask to reflect where the desired element actually is.
14690 // The minimum and maximum indices that correspond to element zero for both
14691 // the LHS and RHS are computed and will control which shuffle mask entries
14692 // are to be changed. For example, if the RHS is permuted, any shuffle mask
14693 // entries in the range [RHSMinIdx,RHSMaxIdx) will be adjusted.
14694 fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx,
14695 HalfVec, ValidLaneWidth, Subtarget);
14696 Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14697
14698 // We may have simplified away the shuffle. We won't be able to do anything
14699 // further with it here.
14700 if (!isa<ShuffleVectorSDNode>(Res))
14701 return Res;
14702 Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14703 }
14704
14705 SDValue TheSplat = IsLittleEndian ? RHS : LHS;
14706 // The common case after we commuted the shuffle is that the RHS is a splat
14707 // and we have elements coming in from the splat at indices that are not
14708 // conducive to using a merge.
14709 // Example:
14710 // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero>
14711 if (!isSplatBV(TheSplat))
14712 return Res;
14713
14714 // We are looking for a mask such that all even elements are from
14715 // one vector and all odd elements from the other.
14716 if (!isAlternatingShuffMask(Mask, NumElts))
14717 return Res;
14718
14719 // Adjust the mask so we are pulling in the same index from the splat
14720 // as the index from the interesting vector in consecutive elements.
14721 if (IsLittleEndian) {
14722 // Example (even elements from first vector):
14723 // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero>
14724 if (Mask[0] < NumElts)
14725 for (int i = 1, e = Mask.size(); i < e; i += 2)
14726 ShuffV[i] = (ShuffV[i - 1] + NumElts);
14727 // Example (odd elements from first vector):
14728 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero>
14729 else
14730 for (int i = 0, e = Mask.size(); i < e; i += 2)
14731 ShuffV[i] = (ShuffV[i + 1] + NumElts);
14732 } else {
14733 // Example (even elements from first vector):
14734 // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> <zero>, t1
14735 if (Mask[0] < NumElts)
14736 for (int i = 0, e = Mask.size(); i < e; i += 2)
14737 ShuffV[i] = ShuffV[i + 1] - NumElts;
14738 // Example (odd elements from first vector):
14739 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> <zero>, t1
14740 else
14741 for (int i = 1, e = Mask.size(); i < e; i += 2)
14742 ShuffV[i] = ShuffV[i - 1] - NumElts;
14743 }
14744
14745 // If the RHS has undefs, we need to remove them since we may have created
14746 // a shuffle that adds those instead of the splat value.
14747 SDValue SplatVal =
14748 cast<BuildVectorSDNode>(TheSplat.getNode())->getSplatValue();
14749 TheSplat = DAG.getSplatBuildVector(TheSplat.getValueType(), dl, SplatVal);
14750
14751 if (IsLittleEndian)
14752 RHS = TheSplat;
14753 else
14754 LHS = TheSplat;
14755 return DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14756}
14757
14758SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
14759 LSBaseSDNode *LSBase,
14760 DAGCombinerInfo &DCI) const {
14761 assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&(static_cast<void> (0))
14762 "Not a reverse memop pattern!")(static_cast<void> (0));
14763
14764 auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
14765 auto Mask = SVN->getMask();
14766 int i = 0;
14767 auto I = Mask.rbegin();
14768 auto E = Mask.rend();
14769
14770 for (; I != E; ++I) {
14771 if (*I != i)
14772 return false;
14773 i++;
14774 }
14775 return true;
14776 };
14777
14778 SelectionDAG &DAG = DCI.DAG;
14779 EVT VT = SVN->getValueType(0);
14780
14781 if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14782 return SDValue();
14783
14784 // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
14785 // See comment in PPCVSXSwapRemoval.cpp.
14786 // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
14787 if (!Subtarget.hasP9Vector())
14788 return SDValue();
14789
14790 if(!IsElementReverse(SVN))
14791 return SDValue();
14792
14793 if (LSBase->getOpcode() == ISD::LOAD) {
14794 // If the load return value 0 has more than one user except the
14795 // shufflevector instruction, it is not profitable to replace the
14796 // shufflevector with a reverse load.
14797 for (SDNode::use_iterator UI = LSBase->use_begin(), UE = LSBase->use_end();
14798 UI != UE; ++UI)
14799 if (UI.getUse().getResNo() == 0 && UI->getOpcode() != ISD::VECTOR_SHUFFLE)
14800 return SDValue();
14801
14802 SDLoc dl(LSBase);
14803 SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
14804 return DAG.getMemIntrinsicNode(
14805 PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
14806 LSBase->getMemoryVT(), LSBase->getMemOperand());
14807 }
14808
14809 if (LSBase->getOpcode() == ISD::STORE) {
14810 // If there are other uses of the shuffle, the swap cannot be avoided.
14811 // Forcing the use of an X-Form (since swapped stores only have
14812 // X-Forms) without removing the swap is unprofitable.
14813 if (!SVN->hasOneUse())
14814 return SDValue();
14815
14816 SDLoc dl(LSBase);
14817 SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
14818 LSBase->getBasePtr()};
14819 return DAG.getMemIntrinsicNode(
14820 PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
14821 LSBase->getMemoryVT(), LSBase->getMemOperand());
14822 }
14823
14824 llvm_unreachable("Expected a load or store node here")__builtin_unreachable();
14825}
14826
14827SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
14828 DAGCombinerInfo &DCI) const {
14829 SelectionDAG &DAG = DCI.DAG;
14830 SDLoc dl(N);
14831 switch (N->getOpcode()) {
14832 default: break;
14833 case ISD::ADD:
14834 return combineADD(N, DCI);
14835 case ISD::SHL:
14836 return combineSHL(N, DCI);
14837 case ISD::SRA:
14838 return combineSRA(N, DCI);
14839 case ISD::SRL:
14840 return combineSRL(N, DCI);
14841 case ISD::MUL:
14842 return combineMUL(N, DCI);
14843 case ISD::FMA:
14844 case PPCISD::FNMSUB:
14845 return combineFMALike(N, DCI);
14846 case PPCISD::SHL:
14847 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
14848 return N->getOperand(0);
14849 break;
14850 case PPCISD::SRL:
14851 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
14852 return N->getOperand(0);
14853 break;
14854 case PPCISD::SRA:
14855 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
14856 if (C->isNullValue() || // 0 >>s V -> 0.
14857 C->isAllOnesValue()) // -1 >>s V -> -1.
14858 return N->getOperand(0);
14859 }
14860 break;
14861 case ISD::SIGN_EXTEND:
14862 case ISD::ZERO_EXTEND:
14863 case ISD::ANY_EXTEND:
14864 return DAGCombineExtBoolTrunc(N, DCI);
14865 case ISD::TRUNCATE:
14866 return combineTRUNCATE(N, DCI);
14867 case ISD::SETCC:
14868 if (SDValue CSCC = combineSetCC(N, DCI))
14869 return CSCC;
14870 LLVM_FALLTHROUGH[[gnu::fallthrough]];
14871 case ISD::SELECT_CC:
14872 return DAGCombineTruncBoolExt(N, DCI);
14873 case ISD::SINT_TO_FP:
14874 case ISD::UINT_TO_FP:
14875 return combineFPToIntToFP(N, DCI);
14876 case ISD::VECTOR_SHUFFLE:
14877 if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
14878 LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
14879 return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
14880 }
14881 return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG);
14882 case ISD::STORE: {
14883
14884 EVT Op1VT = N->getOperand(1).getValueType();
14885 unsigned Opcode = N->getOperand(1).getOpcode();
14886
14887 if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
14888 SDValue Val= combineStoreFPToInt(N, DCI);
14889 if (Val)
14890 return Val;
14891 }
14892
14893 if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
14894 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
14895 SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
14896 if (Val)
14897 return Val;
14898 }
14899
14900 // Turn STORE (BSWAP) -> sthbrx/stwbrx.
14901 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
14902 N->getOperand(1).getNode()->hasOneUse() &&
14903 (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
14904 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
14905
14906 // STBRX can only handle simple types and it makes no sense to store less
14907 // two bytes in byte-reversed order.
14908 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
14909 if (mVT.isExtended() || mVT.getSizeInBits() < 16)
14910 break;
14911
14912 SDValue BSwapOp = N->getOperand(1).getOperand(0);
14913 // Do an any-extend to 32-bits if this is a half-word input.
14914 if (BSwapOp.getValueType() == MVT::i16)
14915 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
14916
14917 // If the type of BSWAP operand is wider than stored memory width
14918 // it need to be shifted to the right side before STBRX.
14919 if (Op1VT.bitsGT(mVT)) {
14920 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
14921 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
14922 DAG.getConstant(Shift, dl, MVT::i32));
14923 // Need to truncate if this is a bswap of i64 stored as i32/i16.
14924 if (Op1VT == MVT::i64)
14925 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
14926 }
14927
14928 SDValue Ops[] = {
14929 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
14930 };
14931 return
14932 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
14933 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
14934 cast<StoreSDNode>(N)->getMemOperand());
14935 }
14936
14937 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0>
14938 // So it can increase the chance of CSE constant construction.
14939 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
14940 isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
14941 // Need to sign-extended to 64-bits to handle negative values.
14942 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
14943 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
14944 MemVT.getSizeInBits());
14945 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
14946
14947 // DAG.getTruncStore() can't be used here because it doesn't accept
14948 // the general (base + offset) addressing mode.
14949 // So we use UpdateNodeOperands and setTruncatingStore instead.
14950 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
14951 N->getOperand(3));
14952 cast<StoreSDNode>(N)->setTruncatingStore(true);
14953 return SDValue(N, 0);
14954 }
14955
14956 // For little endian, VSX stores require generating xxswapd/lxvd2x.
14957 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14958 if (Op1VT.isSimple()) {
14959 MVT StoreVT = Op1VT.getSimpleVT();
14960 if (Subtarget.needsSwapsForVSXMemOps() &&
14961 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
14962 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
14963 return expandVSXStoreForLE(N, DCI);
14964 }
14965 break;
14966 }
14967 case ISD::LOAD: {
14968 LoadSDNode *LD = cast<LoadSDNode>(N);
14969 EVT VT = LD->getValueType(0);
14970
14971 // For little endian, VSX loads require generating lxvd2x/xxswapd.
14972 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14973 if (VT.isSimple()) {
14974 MVT LoadVT = VT.getSimpleVT();
14975 if (Subtarget.needsSwapsForVSXMemOps() &&
14976 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
14977 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
14978 return expandVSXLoadForLE(N, DCI);
14979 }
14980
14981 // We sometimes end up with a 64-bit integer load, from which we extract
14982 // two single-precision floating-point numbers. This happens with
14983 // std::complex<float>, and other similar structures, because of the way we
14984 // canonicalize structure copies. However, if we lack direct moves,
14985 // then the final bitcasts from the extracted integer values to the
14986 // floating-point numbers turn into store/load pairs. Even with direct moves,
14987 // just loading the two floating-point numbers is likely better.
14988 auto ReplaceTwoFloatLoad = [&]() {
14989 if (VT != MVT::i64)
14990 return false;
14991
14992 if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
14993 LD->isVolatile())
14994 return false;
14995
14996 // We're looking for a sequence like this:
14997 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
14998 // t16: i64 = srl t13, Constant:i32<32>
14999 // t17: i32 = truncate t16
15000 // t18: f32 = bitcast t17
15001 // t19: i32 = truncate t13
15002 // t20: f32 = bitcast t19
15003
15004 if (!LD->hasNUsesOfValue(2, 0))
15005 return false;
15006
15007 auto UI = LD->use_begin();
15008 while (UI.getUse().getResNo() != 0) ++UI;
15009 SDNode *Trunc = *UI++;
15010 while (UI.getUse().getResNo() != 0) ++UI;
15011 SDNode *RightShift = *UI;
15012 if (Trunc->getOpcode() != ISD::TRUNCATE)
15013 std::swap(Trunc, RightShift);
15014
15015 if (Trunc->getOpcode() != ISD::TRUNCATE ||
15016 Trunc->getValueType(0) != MVT::i32 ||
15017 !Trunc->hasOneUse())
15018 return false;
15019 if (RightShift->getOpcode() != ISD::SRL ||
15020 !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
15021 RightShift->getConstantOperandVal(1) != 32 ||
15022 !RightShift->hasOneUse())
15023 return false;
15024
15025 SDNode *Trunc2 = *RightShift->use_begin();
15026 if (Trunc2->getOpcode() != ISD::TRUNCATE ||
15027 Trunc2->getValueType(0) != MVT::i32 ||
15028 !Trunc2->hasOneUse())
15029 return false;
15030
15031 SDNode *Bitcast = *Trunc->use_begin();
15032 SDNode *Bitcast2 = *Trunc2->use_begin();
15033
15034 if (Bitcast->getOpcode() != ISD::BITCAST ||
15035 Bitcast->getValueType(0) != MVT::f32)
15036 return false;
15037 if (Bitcast2->getOpcode() != ISD::BITCAST ||
15038 Bitcast2->getValueType(0) != MVT::f32)
15039 return false;
15040
15041 if (Subtarget.isLittleEndian())
15042 std::swap(Bitcast, Bitcast2);
15043
15044 // Bitcast has the second float (in memory-layout order) and Bitcast2
15045 // has the first one.
15046
15047 SDValue BasePtr = LD->getBasePtr();
15048 if (LD->isIndexed()) {
15049 assert(LD->getAddressingMode() == ISD::PRE_INC &&(static_cast<void> (0))
15050 "Non-pre-inc AM on PPC?")(static_cast<void> (0));
15051 BasePtr =
15052 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
15053 LD->getOffset());
15054 }
15055
15056 auto MMOFlags =
15057 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
15058 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
15059 LD->getPointerInfo(), LD->getAlignment(),
15060 MMOFlags, LD->getAAInfo());
15061 SDValue AddPtr =
15062 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
15063 BasePtr, DAG.getIntPtrConstant(4, dl));
15064 SDValue FloatLoad2 = DAG.getLoad(
15065 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
15066 LD->getPointerInfo().getWithOffset(4),
15067 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
15068
15069 if (LD->isIndexed()) {
15070 // Note that DAGCombine should re-form any pre-increment load(s) from
15071 // what is produced here if that makes sense.
15072 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
15073 }
15074
15075 DCI.CombineTo(Bitcast2, FloatLoad);
15076 DCI.CombineTo(Bitcast, FloatLoad2);
15077
15078 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
15079 SDValue(FloatLoad2.getNode(), 1));
15080 return true;
15081 };
15082
15083 if (ReplaceTwoFloatLoad())
15084 return SDValue(N, 0);
15085
15086 EVT MemVT = LD->getMemoryVT();
15087 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
15088 Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
15089 if (LD->isUnindexed() && VT.isVector() &&
15090 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
15091 // P8 and later hardware should just use LOAD.
15092 !Subtarget.hasP8Vector() &&
15093 (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
15094 VT == MVT::v4f32))) &&
15095 LD->getAlign() < ABIAlignment) {
15096 // This is a type-legal unaligned Altivec load.
15097 SDValue Chain = LD->getChain();
15098 SDValue Ptr = LD->getBasePtr();
15099 bool isLittleEndian = Subtarget.isLittleEndian();
15100
15101 // This implements the loading of unaligned vectors as described in
15102 // the venerable Apple Velocity Engine overview. Specifically:
15103 // https://developer.apple.com/hardwaredrivers/ve/alignment.html
15104 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
15105 //
15106 // The general idea is to expand a sequence of one or more unaligned
15107 // loads into an alignment-based permutation-control instruction (lvsl
15108 // or lvsr), a series of regular vector loads (which always truncate
15109 // their input address to an aligned address), and a series of
15110 // permutations. The results of these permutations are the requested
15111 // loaded values. The trick is that the last "extra" load is not taken
15112 // from the address you might suspect (sizeof(vector) bytes after the
15113 // last requested load), but rather sizeof(vector) - 1 bytes after the
15114 // last requested vector. The point of this is to avoid a page fault if
15115 // the base address happened to be aligned. This works because if the
15116 // base address is aligned, then adding less than a full vector length
15117 // will cause the last vector in the sequence to be (re)loaded.
15118 // Otherwise, the next vector will be fetched as you might suspect was
15119 // necessary.
15120
15121 // We might be able to reuse the permutation generation from
15122 // a different base address offset from this one by an aligned amount.
15123 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
15124 // optimization later.
15125 Intrinsic::ID Intr, IntrLD, IntrPerm;
15126 MVT PermCntlTy, PermTy, LDTy;
15127 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr
15128 : Intrinsic::ppc_altivec_lvsl;
15129 IntrLD = Intrinsic::ppc_altivec_lvx;
15130 IntrPerm = Intrinsic::ppc_altivec_vperm;
15131 PermCntlTy = MVT::v16i8;
15132 PermTy = MVT::v4i32;
15133 LDTy = MVT::v4i32;
15134
15135 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
15136
15137 // Create the new MMO for the new base load. It is like the original MMO,
15138 // but represents an area in memory almost twice the vector size centered
15139 // on the original address. If the address is unaligned, we might start
15140 // reading up to (sizeof(vector)-1) bytes below the address of the
15141 // original unaligned load.
15142 MachineFunction &MF = DAG.getMachineFunction();
15143 MachineMemOperand *BaseMMO =
15144 MF.getMachineMemOperand(LD->getMemOperand(),
15145 -(long)MemVT.getStoreSize()+1,
15146 2*MemVT.getStoreSize()-1);
15147
15148 // Create the new base load.
15149 SDValue LDXIntID =
15150 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
15151 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
15152 SDValue BaseLoad =
15153 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
15154 DAG.getVTList(PermTy, MVT::Other),
15155 BaseLoadOps, LDTy, BaseMMO);
15156
15157 // Note that the value of IncOffset (which is provided to the next
15158 // load's pointer info offset value, and thus used to calculate the
15159 // alignment), and the value of IncValue (which is actually used to
15160 // increment the pointer value) are different! This is because we
15161 // require the next load to appear to be aligned, even though it
15162 // is actually offset from the base pointer by a lesser amount.
15163 int IncOffset = VT.getSizeInBits() / 8;
15164 int IncValue = IncOffset;
15165
15166 // Walk (both up and down) the chain looking for another load at the real
15167 // (aligned) offset (the alignment of the other load does not matter in
15168 // this case). If found, then do not use the offset reduction trick, as
15169 // that will prevent the loads from being later combined (as they would
15170 // otherwise be duplicates).
15171 if (!findConsecutiveLoad(LD, DAG))
15172 --IncValue;
15173
15174 SDValue Increment =
15175 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
15176 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
15177
15178 MachineMemOperand *ExtraMMO =
15179 MF.getMachineMemOperand(LD->getMemOperand(),
15180 1, 2*MemVT.getStoreSize()-1);
15181 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
15182 SDValue ExtraLoad =
15183 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
15184 DAG.getVTList(PermTy, MVT::Other),
15185 ExtraLoadOps, LDTy, ExtraMMO);
15186
15187 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
15188 BaseLoad.getValue(1), ExtraLoad.getValue(1));
15189
15190 // Because vperm has a big-endian bias, we must reverse the order
15191 // of the input vectors and complement the permute control vector
15192 // when generating little endian code. We have already handled the
15193 // latter by using lvsr instead of lvsl, so just reverse BaseLoad
15194 // and ExtraLoad here.
15195 SDValue Perm;
15196 if (isLittleEndian)
15197 Perm = BuildIntrinsicOp(IntrPerm,
15198 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
15199 else
15200 Perm = BuildIntrinsicOp(IntrPerm,
15201 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
15202
15203 if (VT != PermTy)
15204 Perm = Subtarget.hasAltivec()
15205 ? DAG.getNode(ISD::BITCAST, dl, VT, Perm)
15206 : DAG.getNode(ISD::FP_ROUND, dl, VT, Perm,
15207 DAG.getTargetConstant(1, dl, MVT::i64));
15208 // second argument is 1 because this rounding
15209 // is always exact.
15210
15211 // The output of the permutation is our loaded result, the TokenFactor is
15212 // our new chain.
15213 DCI.CombineTo(N, Perm, TF);
15214 return SDValue(N, 0);
15215 }
15216 }
15217 break;
15218 case ISD::INTRINSIC_WO_CHAIN: {
15219 bool isLittleEndian = Subtarget.isLittleEndian();
15220 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
15221 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
15222 : Intrinsic::ppc_altivec_lvsl);
15223 if (IID == Intr && N->getOperand(1)->getOpcode() == ISD::ADD) {
15224 SDValue Add = N->getOperand(1);
15225
15226 int Bits = 4 /* 16 byte alignment */;
15227
15228 if (DAG.MaskedValueIsZero(Add->getOperand(1),
15229 APInt::getAllOnesValue(Bits /* alignment */)
15230 .zext(Add.getScalarValueSizeInBits()))) {
15231 SDNode *BasePtr = Add->getOperand(0).getNode();
15232 for (SDNode::use_iterator UI = BasePtr->use_begin(),
15233 UE = BasePtr->use_end();
15234 UI != UE; ++UI) {
15235 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15236 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() ==
15237 IID) {
15238 // We've found another LVSL/LVSR, and this address is an aligned
15239 // multiple of that one. The results will be the same, so use the
15240 // one we've just found instead.
15241
15242 return SDValue(*UI, 0);
15243 }
15244 }
15245 }
15246
15247 if (isa<ConstantSDNode>(Add->getOperand(1))) {
15248 SDNode *BasePtr = Add->getOperand(0).getNode();
15249 for (SDNode::use_iterator UI = BasePtr->use_begin(),
15250 UE = BasePtr->use_end(); UI != UE; ++UI) {
15251 if (UI->getOpcode() == ISD::ADD &&
15252 isa<ConstantSDNode>(UI->getOperand(1)) &&
15253 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
15254 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
15255 (1ULL << Bits) == 0) {
15256 SDNode *OtherAdd = *UI;
15257 for (SDNode::use_iterator VI = OtherAdd->use_begin(),
15258 VE = OtherAdd->use_end(); VI != VE; ++VI) {
15259 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15260 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
15261 return SDValue(*VI, 0);
15262 }
15263 }
15264 }
15265 }
15266 }
15267 }
15268
15269 // Combine vmaxsw/h/b(a, a's negation) to abs(a)
15270 // Expose the vabsduw/h/b opportunity for down stream
15271 if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
15272 (IID == Intrinsic::ppc_altivec_vmaxsw ||
15273 IID == Intrinsic::ppc_altivec_vmaxsh ||
15274 IID == Intrinsic::ppc_altivec_vmaxsb)) {
15275 SDValue V1 = N->getOperand(1);
15276 SDValue V2 = N->getOperand(2);
15277 if ((V1.getSimpleValueType() == MVT::v4i32 ||
15278 V1.getSimpleValueType() == MVT::v8i16 ||
15279 V1.getSimpleValueType() == MVT::v16i8) &&
15280 V1.getSimpleValueType() == V2.getSimpleValueType()) {
15281 // (0-a, a)
15282 if (V1.getOpcode() == ISD::SUB &&
15283 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
15284 V1.getOperand(1) == V2) {
15285 return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
15286 }
15287 // (a, 0-a)
15288 if (V2.getOpcode() == ISD::SUB &&
15289 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
15290 V2.getOperand(1) == V1) {
15291 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
15292 }
15293 // (x-y, y-x)
15294 if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
15295 V1.getOperand(0) == V2.getOperand(1) &&
15296 V1.getOperand(1) == V2.getOperand(0)) {
15297 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
15298 }
15299 }
15300 }
15301 }
15302
15303 break;
15304 case ISD::INTRINSIC_W_CHAIN:
15305 // For little endian, VSX loads require generating lxvd2x/xxswapd.
15306 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
15307 if (Subtarget.needsSwapsForVSXMemOps()) {
15308 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
15309 default:
15310 break;
15311 case Intrinsic::ppc_vsx_lxvw4x:
15312 case Intrinsic::ppc_vsx_lxvd2x:
15313 return expandVSXLoadForLE(N, DCI);
15314 }
15315 }
15316 break;
15317 case ISD::INTRINSIC_VOID:
15318 // For little endian, VSX stores require generating xxswapd/stxvd2x.
15319 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
15320 if (Subtarget.needsSwapsForVSXMemOps()) {
15321 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
15322 default:
15323 break;
15324 case Intrinsic::ppc_vsx_stxvw4x:
15325 case Intrinsic::ppc_vsx_stxvd2x:
15326 return expandVSXStoreForLE(N, DCI);
15327 }
15328 }
15329 break;
15330 case ISD::BSWAP: {
15331 // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
15332 // For subtargets without LDBRX, we can still do better than the default
15333 // expansion even for 64-bit BSWAP (LOAD).
15334 bool Is64BitBswapOn64BitTgt =
15335 Subtarget.isPPC64() && N->getValueType(0) == MVT::i64;
15336 bool IsSingleUseNormalLd = ISD::isNormalLoad(N->getOperand(0).getNode()) &&
15337 N->getOperand(0).hasOneUse();
15338 if (IsSingleUseNormalLd &&
15339 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
15340 (Subtarget.hasLDBRX() && Is64BitBswapOn64BitTgt))) {
15341 SDValue Load = N->getOperand(0);
15342 LoadSDNode *LD = cast<LoadSDNode>(Load);
15343 // Create the byte-swapping load.
15344 SDValue Ops[] = {
15345 LD->getChain(), // Chain
15346 LD->getBasePtr(), // Ptr
15347 DAG.getValueType(N->getValueType(0)) // VT
15348 };
15349 SDValue BSLoad =
15350 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
15351 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
15352 MVT::i64 : MVT::i32, MVT::Other),
15353 Ops, LD->getMemoryVT(), LD->getMemOperand());
15354
15355 // If this is an i16 load, insert the truncate.
15356 SDValue ResVal = BSLoad;
15357 if (N->getValueType(0) == MVT::i16)
15358 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
15359
15360 // First, combine the bswap away. This makes the value produced by the
15361 // load dead.
15362 DCI.CombineTo(N, ResVal);
15363
15364 // Next, combine the load away, we give it a bogus result value but a real
15365 // chain result. The result value is dead because the bswap is dead.
15366 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
15367
15368 // Return N so it doesn't get rechecked!
15369 return SDValue(N, 0);
15370 }
15371 // Convert this to two 32-bit bswap loads and a BUILD_PAIR. Do this only
15372 // before legalization so that the BUILD_PAIR is handled correctly.
15373 if (!DCI.isBeforeLegalize() || !Is64BitBswapOn64BitTgt ||
15374 !IsSingleUseNormalLd)
15375 return SDValue();
15376 LoadSDNode *LD = cast<LoadSDNode>(N->getOperand(0));
15377
15378 // Can't split volatile or atomic loads.
15379 if (!LD->isSimple())
15380 return SDValue();
15381 SDValue BasePtr = LD->getBasePtr();
15382 SDValue Lo = DAG.getLoad(MVT::i32, dl, LD->getChain(), BasePtr,
15383 LD->getPointerInfo(), LD->getAlignment());
15384 Lo = DAG.getNode(ISD::BSWAP, dl, MVT::i32, Lo);
15385 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
15386 DAG.getIntPtrConstant(4, dl));
15387 MachineMemOperand *NewMMO = DAG.getMachineFunction().getMachineMemOperand(
15388 LD->getMemOperand(), 4, 4);
15389 SDValue Hi = DAG.getLoad(MVT::i32, dl, LD->getChain(), BasePtr, NewMMO);
15390 Hi = DAG.getNode(ISD::BSWAP, dl, MVT::i32, Hi);
15391 SDValue Res;
15392 if (Subtarget.isLittleEndian())
15393 Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Hi, Lo);
15394 else
15395 Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
15396 SDValue TF =
15397 DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
15398 Hi.getOperand(0).getValue(1), Lo.getOperand(0).getValue(1));
15399 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), TF);
15400 return Res;
15401 }
15402 case PPCISD::VCMP:
15403 // If a VCMP_rec node already exists with exactly the same operands as this
15404 // node, use its result instead of this node (VCMP_rec computes both a CR6
15405 // and a normal output).
15406 //
15407 if (!N->getOperand(0).hasOneUse() &&
15408 !N->getOperand(1).hasOneUse() &&
15409 !N->getOperand(2).hasOneUse()) {
15410
15411 // Scan all of the users of the LHS, looking for VCMP_rec's that match.
15412 SDNode *VCMPrecNode = nullptr;
15413
15414 SDNode *LHSN = N->getOperand(0).getNode();
15415 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
15416 UI != E; ++UI)
15417 if (UI->getOpcode() == PPCISD::VCMP_rec &&
15418 UI->getOperand(1) == N->getOperand(1) &&
15419 UI->getOperand(2) == N->getOperand(2) &&
15420 UI->getOperand(0) == N->getOperand(0)) {
15421 VCMPrecNode = *UI;
15422 break;
15423 }
15424
15425 // If there is no VCMP_rec node, or if the flag value has a single use,
15426 // don't transform this.
15427 if (!VCMPrecNode || VCMPrecNode->hasNUsesOfValue(0, 1))
15428 break;
15429
15430 // Look at the (necessarily single) use of the flag value. If it has a
15431 // chain, this transformation is more complex. Note that multiple things
15432 // could use the value result, which we should ignore.
15433 SDNode *FlagUser = nullptr;
15434 for (SDNode::use_iterator UI = VCMPrecNode->use_begin();
15435 FlagUser == nullptr; ++UI) {
15436 assert(UI != VCMPrecNode->use_end() && "Didn't find user!")(static_cast<void> (0));
15437 SDNode *User = *UI;
15438 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
15439 if (User->getOperand(i) == SDValue(VCMPrecNode, 1)) {
15440 FlagUser = User;
15441 break;
15442 }
15443 }
15444 }
15445
15446 // If the user is a MFOCRF instruction, we know this is safe.
15447 // Otherwise we give up for right now.
15448 if (FlagUser->getOpcode() == PPCISD::MFOCRF)
15449 return SDValue(VCMPrecNode, 0);
15450 }
15451 break;
15452 case ISD::BRCOND: {
15453 SDValue Cond = N->getOperand(1);
15454 SDValue Target = N->getOperand(2);
15455
15456 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15457 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
15458 Intrinsic::loop_decrement) {
15459
15460 // We now need to make the intrinsic dead (it cannot be instruction
15461 // selected).
15462 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
15463 assert(Cond.getNode()->hasOneUse() &&(static_cast<void> (0))
15464 "Counter decrement has more than one use")(static_cast<void> (0));
15465
15466 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
15467 N->getOperand(0), Target);
15468 }
15469 }
15470 break;
15471 case ISD::BR_CC: {
15472 // If this is a branch on an altivec predicate comparison, lower this so
15473 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This
15474 // lowering is done pre-legalize, because the legalizer lowers the predicate
15475 // compare down to code that is difficult to reassemble.
15476 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
15477 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
15478
15479 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
15480 // value. If so, pass-through the AND to get to the intrinsic.
15481 if (LHS.getOpcode() == ISD::AND &&
15482 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15483 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
15484 Intrinsic::loop_decrement &&
15485 isa<ConstantSDNode>(LHS.getOperand(1)) &&
15486 !isNullConstant(LHS.getOperand(1)))
15487 LHS = LHS.getOperand(0);
15488
15489 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15490 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
15491 Intrinsic::loop_decrement &&
15492 isa<ConstantSDNode>(RHS)) {
15493 assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&(static_cast<void> (0))
15494 "Counter decrement comparison is not EQ or NE")(static_cast<void> (0));
15495
15496 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15497 bool isBDNZ = (CC == ISD::SETEQ && Val) ||
15498 (CC == ISD::SETNE && !Val);
15499
15500 // We now need to make the intrinsic dead (it cannot be instruction
15501 // selected).
15502 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
15503 assert(LHS.getNode()->hasOneUse() &&(static_cast<void> (0))
15504 "Counter decrement has more than one use")(static_cast<void> (0));
15505
15506 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
15507 N->getOperand(0), N->getOperand(4));
15508 }
15509
15510 int CompareOpc;
15511 bool isDot;
15512
15513 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15514 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
15515 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
15516 assert(isDot && "Can't compare against a vector result!")(static_cast<void> (0));
15517
15518 // If this is a comparison against something other than 0/1, then we know
15519 // that the condition is never/always true.
15520 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15521 if (Val != 0 && Val != 1) {
15522 if (CC == ISD::SETEQ) // Cond never true, remove branch.
15523 return N->getOperand(0);
15524 // Always !=, turn it into an unconditional branch.
15525 return DAG.getNode(ISD::BR, dl, MVT::Other,
15526 N->getOperand(0), N->getOperand(4));
15527 }
15528
15529 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
15530
15531 // Create the PPCISD altivec 'dot' comparison node.
15532 SDValue Ops[] = {
15533 LHS.getOperand(2), // LHS of compare
15534 LHS.getOperand(3), // RHS of compare
15535 DAG.getConstant(CompareOpc, dl, MVT::i32)
15536 };
15537 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
15538 SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
15539
15540 // Unpack the result based on how the target uses it.
15541 PPC::Predicate CompOpc;
15542 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
15543 default: // Can't happen, don't crash on invalid number though.
15544 case 0: // Branch on the value of the EQ bit of CR6.
15545 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
15546 break;
15547 case 1: // Branch on the inverted value of the EQ bit of CR6.
15548 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
15549 break;
15550 case 2: // Branch on the value of the LT bit of CR6.
15551 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
15552 break;
15553 case 3: // Branch on the inverted value of the LT bit of CR6.
15554 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
15555 break;
15556 }
15557
15558 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
15559 DAG.getConstant(CompOpc, dl, MVT::i32),
15560 DAG.getRegister(PPC::CR6, MVT::i32),
15561 N->getOperand(4), CompNode.getValue(1));
15562 }
15563 break;
15564 }
15565 case ISD::BUILD_VECTOR:
15566 return DAGCombineBuildVector(N, DCI);
15567 case ISD::ABS:
15568 return combineABS(N, DCI);
15569 case ISD::VSELECT:
15570 return combineVSelect(N, DCI);
15571 }
15572
15573 return SDValue();
15574}
15575
15576SDValue
15577PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
15578 SelectionDAG &DAG,
15579 SmallVectorImpl<SDNode *> &Created) const {
15580 // fold (sdiv X, pow2)
15581 EVT VT = N->getValueType(0);
15582 if (VT == MVT::i64 && !Subtarget.isPPC64())
15583 return SDValue();
15584 if ((VT != MVT::i32 && VT != MVT::i64) ||
15585 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
15586 return SDValue();
15587
15588 SDLoc DL(N);
15589 SDValue N0 = N->getOperand(0);
15590
15591 bool IsNegPow2 = (-Divisor).isPowerOf2();
15592 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
15593 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
15594
15595 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
15596 Created.push_back(Op.getNode());
15597
15598 if (IsNegPow2) {
15599 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
15600 Created.push_back(Op.getNode());
15601 }
15602
15603 return Op;
15604}
15605
15606//===----------------------------------------------------------------------===//
15607// Inline Assembly Support
15608//===----------------------------------------------------------------------===//
15609
15610void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
15611 KnownBits &Known,
15612 const APInt &DemandedElts,
15613 const SelectionDAG &DAG,
15614 unsigned Depth) const {
15615 Known.resetAll();
15616 switch (Op.getOpcode()) {
15617 default: break;
15618 case PPCISD::LBRX: {
15619 // lhbrx is known to have the top bits cleared out.
15620 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
15621 Known.Zero = 0xFFFF0000;
15622 break;
15623 }
15624 case ISD::INTRINSIC_WO_CHAIN: {
15625 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
15626 default: break;
15627 case Intrinsic::ppc_altivec_vcmpbfp_p:
15628 case Intrinsic::ppc_altivec_vcmpeqfp_p:
15629 case Intrinsic::ppc_altivec_vcmpequb_p:
15630 case Intrinsic::ppc_altivec_vcmpequh_p:
15631 case Intrinsic::ppc_altivec_vcmpequw_p:
15632 case Intrinsic::ppc_altivec_vcmpequd_p:
15633 case Intrinsic::ppc_altivec_vcmpequq_p:
15634 case Intrinsic::ppc_altivec_vcmpgefp_p:
15635 case Intrinsic::ppc_altivec_vcmpgtfp_p:
15636 case Intrinsic::ppc_altivec_vcmpgtsb_p:
15637 case Intrinsic::ppc_altivec_vcmpgtsh_p:
15638 case Intrinsic::ppc_altivec_vcmpgtsw_p:
15639 case Intrinsic::ppc_altivec_vcmpgtsd_p:
15640 case Intrinsic::ppc_altivec_vcmpgtsq_p:
15641 case Intrinsic::ppc_altivec_vcmpgtub_p:
15642 case Intrinsic::ppc_altivec_vcmpgtuh_p:
15643 case Intrinsic::ppc_altivec_vcmpgtuw_p:
15644 case Intrinsic::ppc_altivec_vcmpgtud_p:
15645 case Intrinsic::ppc_altivec_vcmpgtuq_p:
15646 Known.Zero = ~1U; // All bits but the low one are known to be zero.
15647 break;
15648 }
15649 }
15650 }
15651}
15652
15653Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
15654 switch (Subtarget.getCPUDirective()) {
15655 default: break;
15656 case PPC::DIR_970:
15657 case PPC::DIR_PWR4:
15658 case PPC::DIR_PWR5:
15659 case PPC::DIR_PWR5X:
15660 case PPC::DIR_PWR6:
15661 case PPC::DIR_PWR6X:
15662 case PPC::DIR_PWR7:
15663 case PPC::DIR_PWR8:
15664 case PPC::DIR_PWR9:
15665 case PPC::DIR_PWR10:
15666 case PPC::DIR_PWR_FUTURE: {
15667 if (!ML)
15668 break;
15669
15670 if (!DisableInnermostLoopAlign32) {
15671 // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
15672 // so that we can decrease cache misses and branch-prediction misses.
15673 // Actual alignment of the loop will depend on the hotness check and other
15674 // logic in alignBlocks.
15675 if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
15676 return Align(32);
15677 }
15678
15679 const PPCInstrInfo *TII = Subtarget.getInstrInfo();
15680
15681 // For small loops (between 5 and 8 instructions), align to a 32-byte
15682 // boundary so that the entire loop fits in one instruction-cache line.
15683 uint64_t LoopSize = 0;
15684 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
15685 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
15686 LoopSize += TII->getInstSizeInBytes(*J);
15687 if (LoopSize > 32)
15688 break;
15689 }
15690
15691 if (LoopSize > 16 && LoopSize <= 32)
15692 return Align(32);
15693
15694 break;
15695 }
15696 }
15697
15698 return TargetLowering::getPrefLoopAlignment(ML);
15699}
15700
15701/// getConstraintType - Given a constraint, return the type of
15702/// constraint it is for this target.
15703PPCTargetLowering::ConstraintType
15704PPCTargetLowering::getConstraintType(StringRef Constraint) const {
15705 if (Constraint.size() == 1) {
15706 switch (Constraint[0]) {
15707 default: break;
15708 case 'b':
15709 case 'r':
15710 case 'f':
15711 case 'd':
15712 case 'v':
15713 case 'y':
15714 return C_RegisterClass;
15715 case 'Z':
15716 // FIXME: While Z does indicate a memory constraint, it specifically
15717 // indicates an r+r address (used in conjunction with the 'y' modifier
15718 // in the replacement string). Currently, we're forcing the base
15719 // register to be r0 in the asm printer (which is interpreted as zero)
15720 // and forming the complete address in the second register. This is
15721 // suboptimal.
15722 return C_Memory;
15723 }
15724 } else if (Constraint == "wc") { // individual CR bits.
15725 return C_RegisterClass;
15726 } else if (Constraint == "wa" || Constraint == "wd" ||
15727 Constraint == "wf" || Constraint == "ws" ||
15728 Constraint == "wi" || Constraint == "ww") {
15729 return C_RegisterClass; // VSX registers.
15730 }
15731 return TargetLowering::getConstraintType(Constraint);
15732}
15733
15734/// Examine constraint type and operand type and determine a weight value.
15735/// This object must already have been set up with the operand type
15736/// and the current alternative constraint selected.
15737TargetLowering::ConstraintWeight
15738PPCTargetLowering::getSingleConstraintMatchWeight(
15739 AsmOperandInfo &info, const char *constraint) const {
15740 ConstraintWeight weight = CW_Invalid;
15741 Value *CallOperandVal = info.CallOperandVal;
15742 // If we don't have a value, we can't do a match,
15743 // but allow it at the lowest weight.
15744 if (!CallOperandVal)
15745 return CW_Default;
15746 Type *type = CallOperandVal->getType();
15747
15748 // Look at the constraint type.
15749 if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
15750 return CW_Register; // an individual CR bit.
15751 else if ((StringRef(constraint) == "wa" ||
15752 StringRef(constraint) == "wd" ||
15753 StringRef(constraint) == "wf") &&
15754 type->isVectorTy())
15755 return CW_Register;
15756 else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
15757 return CW_Register; // just hold 64-bit integers data.
15758 else if (StringRef(constraint) == "ws" && type->isDoubleTy())
15759 return CW_Register;
15760 else if (StringRef(constraint) == "ww" && type->isFloatTy())
15761 return CW_Register;
15762
15763 switch (*constraint) {
15764 default:
15765 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
15766 break;
15767 case 'b':
15768 if (type->isIntegerTy())
15769 weight = CW_Register;
15770 break;
15771 case 'f':
15772 if (type->isFloatTy())
15773 weight = CW_Register;
15774 break;
15775 case 'd':
15776 if (type->isDoubleTy())
15777 weight = CW_Register;
15778 break;
15779 case 'v':
15780 if (type->isVectorTy())
15781 weight = CW_Register;
15782 break;
15783 case 'y':
15784 weight = CW_Register;
15785 break;
15786 case 'Z':
15787 weight = CW_Memory;
15788 break;
15789 }
15790 return weight;
15791}
15792
15793std::pair<unsigned, const TargetRegisterClass *>
15794PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
15795 StringRef Constraint,
15796 MVT VT) const {
15797 if (Constraint.size() == 1) {
15798 // GCC RS6000 Constraint Letters
15799 switch (Constraint[0]) {
15800 case 'b': // R1-R31
15801 if (VT == MVT::i64 && Subtarget.isPPC64())
15802 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
15803 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
15804 case 'r': // R0-R31
15805 if (VT == MVT::i64 && Subtarget.isPPC64())
15806 return std::make_pair(0U, &PPC::G8RCRegClass);
15807 return std::make_pair(0U, &PPC::GPRCRegClass);
15808 // 'd' and 'f' constraints are both defined to be "the floating point
15809 // registers", where one is for 32-bit and the other for 64-bit. We don't
15810 // really care overly much here so just give them all the same reg classes.
15811 case 'd':
15812 case 'f':
15813 if (Subtarget.hasSPE()) {
15814 if (VT == MVT::f32 || VT == MVT::i32)
15815 return std::make_pair(0U, &PPC::GPRCRegClass);
15816 if (VT == MVT::f64 || VT == MVT::i64)
15817 return std::make_pair(0U, &PPC::SPERCRegClass);
15818 } else {
15819 if (VT == MVT::f32 || VT == MVT::i32)
15820 return std::make_pair(0U, &PPC::F4RCRegClass);
15821 if (VT == MVT::f64 || VT == MVT::i64)
15822 return std::make_pair(0U, &PPC::F8RCRegClass);
15823 }
15824 break;
15825 case 'v':
15826 if (Subtarget.hasAltivec())
15827 return std::make_pair(0U, &PPC::VRRCRegClass);
15828 break;
15829 case 'y': // crrc
15830 return std::make_pair(0U, &PPC::CRRCRegClass);
15831 }
15832 } else if (Constraint == "wc" && Subtarget.useCRBits()) {
15833 // An individual CR bit.
15834 return std::make_pair(0U, &PPC::CRBITRCRegClass);
15835 } else if ((Constraint == "wa" || Constraint == "wd" ||
15836 Constraint == "wf" || Constraint == "wi") &&
15837 Subtarget.hasVSX()) {
15838 // A VSX register for either a scalar (FP) or vector. There is no
15839 // support for single precision scalars on subtargets prior to Power8.
15840 if (VT.isVector())
15841 return std::make_pair(0U, &PPC::VSRCRegClass);
15842 if (VT == MVT::f32 && Subtarget.hasP8Vector())
15843 return std::make_pair(0U, &PPC::VSSRCRegClass);
15844 return std::make_pair(0U, &PPC::VSFRCRegClass);
15845 } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
15846 if (VT == MVT::f32 && Subtarget.hasP8Vector())
15847 return std::make_pair(0U, &PPC::VSSRCRegClass);
15848 else
15849 return std::make_pair(0U, &PPC::VSFRCRegClass);
15850 } else if (Constraint == "lr") {
15851 if (VT == MVT::i64)
15852 return std::make_pair(0U, &PPC::LR8RCRegClass);
15853 else
15854 return std::make_pair(0U, &PPC::LRRCRegClass);
15855 }
15856
15857 // Handle special cases of physical registers that are not properly handled
15858 // by the base class.
15859 if (Constraint[0] == '{' && Constraint[Constraint.size() - 1] == '}') {
15860 // If we name a VSX register, we can't defer to the base class because it
15861 // will not recognize the correct register (their names will be VSL{0-31}
15862 // and V{0-31} so they won't match). So we match them here.
15863 if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
15864 int VSNum = atoi(Constraint.data() + 3);
15865 assert(VSNum >= 0 && VSNum <= 63 &&(static_cast<void> (0))
15866 "Attempted to access a vsr out of range")(static_cast<void> (0));
15867 if (VSNum < 32)
15868 return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
15869 return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
15870 }
15871
15872 // For float registers, we can't defer to the base class as it will match
15873 // the SPILLTOVSRRC class.
15874 if (Constraint.size() > 3 && Constraint[1] == 'f') {
15875 int RegNum = atoi(Constraint.data() + 2);
15876 if (RegNum > 31 || RegNum < 0)
15877 report_fatal_error("Invalid floating point register number");
15878 if (VT == MVT::f32 || VT == MVT::i32)
15879 return Subtarget.hasSPE()
15880 ? std::make_pair(PPC::R0 + RegNum, &PPC::GPRCRegClass)
15881 : std::make_pair(PPC::F0 + RegNum, &PPC::F4RCRegClass);
15882 if (VT == MVT::f64 || VT == MVT::i64)
15883 return Subtarget.hasSPE()
15884 ? std::make_pair(PPC::S0 + RegNum, &PPC::SPERCRegClass)
15885 : std::make_pair(PPC::F0 + RegNum, &PPC::F8RCRegClass);
15886 }
15887 }
15888
15889 std::pair<unsigned, const TargetRegisterClass *> R =
15890 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
15891
15892 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
15893 // (which we call X[0-9]+). If a 64-bit value has been requested, and a
15894 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
15895 // register.
15896 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
15897 // the AsmName field from *RegisterInfo.td, then this would not be necessary.
15898 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
15899 PPC::GPRCRegClass.contains(R.first))
15900 return std::make_pair(TRI->getMatchingSuperReg(R.first,
15901 PPC::sub_32, &PPC::G8RCRegClass),
15902 &PPC::G8RCRegClass);
15903
15904 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
15905 if (!R.second && StringRef("{cc}").equals_insensitive(Constraint)) {
15906 R.first = PPC::CR0;
15907 R.second = &PPC::CRRCRegClass;
15908 }
15909 // FIXME: This warning should ideally be emitted in the front end.
15910 const auto &TM = getTargetMachine();
15911 if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI()) {
15912 if (((R.first >= PPC::V20 && R.first <= PPC::V31) ||
15913 (R.first >= PPC::VF20 && R.first <= PPC::VF31)) &&
15914 (R.second == &PPC::VSRCRegClass || R.second == &PPC::VSFRCRegClass))
15915 errs() << "warning: vector registers 20 to 32 are reserved in the "
15916 "default AIX AltiVec ABI and cannot be used\n";
15917 }
15918
15919 return R;
15920}
15921
15922/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
15923/// vector. If it is invalid, don't add anything to Ops.
15924void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
15925 std::string &Constraint,
15926 std::vector<SDValue>&Ops,
15927 SelectionDAG &DAG) const {
15928 SDValue Result;
15929
15930 // Only support length 1 constraints.
15931 if (Constraint.length() > 1) return;
15932
15933 char Letter = Constraint[0];
15934 switch (Letter) {
15935 default: break;
15936 case 'I':
15937 case 'J':
15938 case 'K':
15939 case 'L':
15940 case 'M':
15941 case 'N':
15942 case 'O':
15943 case 'P': {
15944 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
15945 if (!CST) return; // Must be an immediate to match.
15946 SDLoc dl(Op);
15947 int64_t Value = CST->getSExtValue();
15948 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
15949 // numbers are printed as such.
15950 switch (Letter) {
15951 default: llvm_unreachable("Unknown constraint letter!")__builtin_unreachable();
15952 case 'I': // "I" is a signed 16-bit constant.
15953 if (isInt<16>(Value))
15954 Result = DAG.getTargetConstant(Value, dl, TCVT);
15955 break;
15956 case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
15957 if (isShiftedUInt<16, 16>(Value))
15958 Result = DAG.getTargetConstant(Value, dl, TCVT);
15959 break;
15960 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
15961 if (isShiftedInt<16, 16>(Value))
15962 Result = DAG.getTargetConstant(Value, dl, TCVT);
15963 break;
15964 case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
15965 if (isUInt<16>(Value))
15966 Result = DAG.getTargetConstant(Value, dl, TCVT);
15967 break;
15968 case 'M': // "M" is a constant that is greater than 31.
15969 if (Value > 31)
15970 Result = DAG.getTargetConstant(Value, dl, TCVT);
15971 break;
15972 case 'N': // "N" is a positive constant that is an exact power of two.
15973 if (Value > 0 && isPowerOf2_64(Value))
15974 Result = DAG.getTargetConstant(Value, dl, TCVT);
15975 break;
15976 case 'O': // "O" is the constant zero.
15977 if (Value == 0)
15978 Result = DAG.getTargetConstant(Value, dl, TCVT);
15979 break;
15980 case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
15981 if (isInt<16>(-Value))
15982 Result = DAG.getTargetConstant(Value, dl, TCVT);
15983 break;
15984 }
15985 break;
15986 }
15987 }
15988
15989 if (Result.getNode()) {
15990 Ops.push_back(Result);
15991 return;
15992 }
15993
15994 // Handle standard constraint letters.
15995 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
15996}
15997
15998// isLegalAddressingMode - Return true if the addressing mode represented
15999// by AM is legal for this target, for a load/store of the specified type.
16000bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
16001 const AddrMode &AM, Type *Ty,
16002 unsigned AS,
16003 Instruction *I) const {
16004 // Vector type r+i form is supported since power9 as DQ form. We don't check
16005 // the offset matching DQ form requirement(off % 16 == 0), because on PowerPC,
16006 // imm form is preferred and the offset can be adjusted to use imm form later
16007 // in pass PPCLoopInstrFormPrep. Also in LSR, for one LSRUse, it uses min and
16008 // max offset to check legal addressing mode, we should be a little aggressive
16009 // to contain other offsets for that LSRUse.
16010 if (Ty->isVectorTy() && AM.BaseOffs != 0 && !Subtarget.hasP9Vector())
16011 return false;
16012
16013 // PPC allows a sign-extended 16-bit immediate field.
16014 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
16015 return false;
16016
16017 // No global is ever allowed as a base.
16018 if (AM.BaseGV)
16019 return false;
16020
16021 // PPC only support r+r,
16022 switch (AM.Scale) {
16023 case 0: // "r+i" or just "i", depending on HasBaseReg.
16024 break;
16025 case 1:
16026 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
16027 return false;
16028 // Otherwise we have r+r or r+i.
16029 break;
16030 case 2:
16031 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
16032 return false;
16033 // Allow 2*r as r+r.
16034 break;
16035 default:
16036 // No other scales are supported.
16037 return false;
16038 }
16039
16040 return true;
16041}
16042
16043SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
16044 SelectionDAG &DAG) const {
16045 MachineFunction &MF = DAG.getMachineFunction();
16046 MachineFrameInfo &MFI = MF.getFrameInfo();
16047 MFI.setReturnAddressIsTaken(true);
16048
16049 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
16050 return SDValue();
16051
16052 SDLoc dl(Op);
16053 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
16054
16055 // Make sure the function does not optimize away the store of the RA to
16056 // the stack.
16057 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
16058 FuncInfo->setLRStoreRequired();
16059 bool isPPC64 = Subtarget.isPPC64();
16060 auto PtrVT = getPointerTy(MF.getDataLayout());
16061
16062 if (Depth > 0) {
16063 // The link register (return address) is saved in the caller's frame
16064 // not the callee's stack frame. So we must get the caller's frame
16065 // address and load the return address at the LR offset from there.
16066 SDValue FrameAddr =
16067 DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
16068 LowerFRAMEADDR(Op, DAG), MachinePointerInfo());
16069 SDValue Offset =
16070 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
16071 isPPC64 ? MVT::i64 : MVT::i32);
16072 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
16073 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
16074 MachinePointerInfo());
16075 }
16076
16077 // Just load the return address off the stack.
16078 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
16079 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
16080 MachinePointerInfo());
16081}
16082
16083SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
16084 SelectionDAG &DAG) const {
16085 SDLoc dl(Op);
16086 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
16087
16088 MachineFunction &MF = DAG.getMachineFunction();
16089 MachineFrameInfo &MFI = MF.getFrameInfo();
16090 MFI.setFrameAddressIsTaken(true);
16091
16092 EVT PtrVT = getPointerTy(MF.getDataLayout());
16093 bool isPPC64 = PtrVT == MVT::i64;
16094
16095 // Naked functions never have a frame pointer, and so we use r1. For all
16096 // other functions, this decision must be delayed until during PEI.
16097 unsigned FrameReg;
16098 if (MF.getFunction().hasFnAttribute(Attribute::Naked))
16099 FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
16100 else
16101 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
16102
16103 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
16104 PtrVT);
16105 while (Depth--)
16106 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
16107 FrameAddr, MachinePointerInfo());
16108 return FrameAddr;
16109}
16110
16111// FIXME? Maybe this could be a TableGen attribute on some registers and
16112// this table could be generated automatically from RegInfo.
16113Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
16114 const MachineFunction &MF) const {
16115 bool isPPC64 = Subtarget.isPPC64();
16116
16117 bool is64Bit = isPPC64 && VT == LLT::scalar(64);
16118 if (!is64Bit && VT != LLT::scalar(32))
16119 report_fatal_error("Invalid register global variable type");
16120
16121 Register Reg = StringSwitch<Register>(RegName)
16122 .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
16123 .Case("r2", isPPC64 ? Register() : PPC::R2)
16124 .Case("r13", (is64Bit ? PPC::X13 : PPC::R13))
16125 .Default(Register());
16126
16127 if (Reg)
16128 return Reg;
16129 report_fatal_error("Invalid register name global variable");
16130}
16131
16132bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
16133 // 32-bit SVR4 ABI access everything as got-indirect.
16134 if (Subtarget.is32BitELFABI())
16135 return true;
16136
16137 // AIX accesses everything indirectly through the TOC, which is similar to
16138 // the GOT.
16139 if (Subtarget.isAIXABI())
16140 return true;
16141
16142 CodeModel::Model CModel = getTargetMachine().getCodeModel();
16143 // If it is small or large code model, module locals are accessed
16144 // indirectly by loading their address from .toc/.got.
16145 if (CModel == CodeModel::Small || CModel == CodeModel::Large)
16146 return true;
16147
16148 // JumpTable and BlockAddress are accessed as got-indirect.
16149 if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
16150 return true;
16151
16152 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
16153 return Subtarget.isGVIndirectSymbol(G->getGlobal());
16154
16155 return false;
16156}
16157
16158bool
16159PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
16160 // The PowerPC target isn't yet aware of offsets.
16161 return false;
16162}
16163
16164bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
16165 const CallInst &I,
16166 MachineFunction &MF,
16167 unsigned Intrinsic) const {
16168 switch (Intrinsic) {
16169 case Intrinsic::ppc_atomicrmw_xchg_i128:
16170 case Intrinsic::ppc_atomicrmw_add_i128:
16171 case Intrinsic::ppc_atomicrmw_sub_i128:
16172 case Intrinsic::ppc_atomicrmw_nand_i128:
16173 case Intrinsic::ppc_atomicrmw_and_i128:
16174 case Intrinsic::ppc_atomicrmw_or_i128:
16175 case Intrinsic::ppc_atomicrmw_xor_i128:
16176 case Intrinsic::ppc_cmpxchg_i128:
16177 Info.opc = ISD::INTRINSIC_W_CHAIN;
16178 Info.memVT = MVT::i128;
16179 Info.ptrVal = I.getArgOperand(0);
16180 Info.offset = 0;
16181 Info.align = Align(16);
16182 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
16183 MachineMemOperand::MOVolatile;
16184 return true;
16185 case Intrinsic::ppc_atomic_load_i128:
16186 Info.opc = ISD::INTRINSIC_W_CHAIN;
16187 Info.memVT = MVT::i128;
16188 Info.ptrVal = I.getArgOperand(0);
16189 Info.offset = 0;
16190 Info.align = Align(16);
16191 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
16192 return true;
16193 case Intrinsic::ppc_atomic_store_i128:
16194 Info.opc = ISD::INTRINSIC_VOID;
16195 Info.memVT = MVT::i128;
16196 Info.ptrVal = I.getArgOperand(2);
16197 Info.offset = 0;
16198 Info.align = Align(16);
16199 Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
16200 return true;
16201 case Intrinsic::ppc_altivec_lvx:
16202 case Intrinsic::ppc_altivec_lvxl:
16203 case Intrinsic::ppc_altivec_lvebx:
16204 case Intrinsic::ppc_altivec_lvehx:
16205 case Intrinsic::ppc_altivec_lvewx:
16206 case Intrinsic::ppc_vsx_lxvd2x:
16207 case Intrinsic::ppc_vsx_lxvw4x:
16208 case Intrinsic::ppc_vsx_lxvd2x_be:
16209 case Intrinsic::ppc_vsx_lxvw4x_be:
16210 case Intrinsic::ppc_vsx_lxvl:
16211 case Intrinsic::ppc_vsx_lxvll: {
16212 EVT VT;
16213 switch (Intrinsic) {
16214 case Intrinsic::ppc_altivec_lvebx:
16215 VT = MVT::i8;
16216 break;
16217 case Intrinsic::ppc_altivec_lvehx:
16218 VT = MVT::i16;
16219 break;
16220 case Intrinsic::ppc_altivec_lvewx:
16221 VT = MVT::i32;
16222 break;
16223 case Intrinsic::ppc_vsx_lxvd2x:
16224 case Intrinsic::ppc_vsx_lxvd2x_be:
16225 VT = MVT::v2f64;
16226 break;
16227 default:
16228 VT = MVT::v4i32;
16229 break;
16230 }
16231
16232 Info.opc = ISD::INTRINSIC_W_CHAIN;
16233 Info.memVT = VT;
16234 Info.ptrVal = I.getArgOperand(0);
16235 Info.offset = -VT.getStoreSize()+1;
16236 Info.size = 2*VT.getStoreSize()-1;
16237 Info.align = Align(1);
16238 Info.flags = MachineMemOperand::MOLoad;
16239 return true;
16240 }
16241 case Intrinsic::ppc_altivec_stvx:
16242 case Intrinsic::ppc_altivec_stvxl:
16243 case Intrinsic::ppc_altivec_stvebx:
16244 case Intrinsic::ppc_altivec_stvehx:
16245 case Intrinsic::ppc_altivec_stvewx:
16246 case Intrinsic::ppc_vsx_stxvd2x:
16247 case Intrinsic::ppc_vsx_stxvw4x:
16248 case Intrinsic::ppc_vsx_stxvd2x_be:
16249 case Intrinsic::ppc_vsx_stxvw4x_be:
16250 case Intrinsic::ppc_vsx_stxvl:
16251 case Intrinsic::ppc_vsx_stxvll: {
16252 EVT VT;
16253 switch (Intrinsic) {
16254 case Intrinsic::ppc_altivec_stvebx:
16255 VT = MVT::i8;
16256 break;
16257 case Intrinsic::ppc_altivec_stvehx:
16258 VT = MVT::i16;
16259 break;
16260 case Intrinsic::ppc_altivec_stvewx:
16261 VT = MVT::i32;
16262 break;
16263 case Intrinsic::ppc_vsx_stxvd2x:
16264 case Intrinsic::ppc_vsx_stxvd2x_be:
16265 VT = MVT::v2f64;
16266 break;
16267 default:
16268 VT = MVT::v4i32;
16269 break;
16270 }
16271
16272 Info.opc = ISD::INTRINSIC_VOID;
16273 Info.memVT = VT;
16274 Info.ptrVal = I.getArgOperand(1);
16275 Info.offset = -VT.getStoreSize()+1;
16276 Info.size = 2*VT.getStoreSize()-1;
16277 Info.align = Align(1);
16278 Info.flags = MachineMemOperand::MOStore;
16279 return true;
16280 }
16281 default:
16282 break;
16283 }
16284
16285 return false;
16286}
16287
16288/// It returns EVT::Other if the type should be determined using generic
16289/// target-independent logic.
16290EVT PPCTargetLowering::getOptimalMemOpType(
16291 const MemOp &Op, const AttributeList &FuncAttributes) const {
16292 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
16293 // We should use Altivec/VSX loads and stores when available. For unaligned
16294 // addresses, unaligned VSX loads are only fast starting with the P8.
16295 if (Subtarget.hasAltivec() && Op.size() >= 16 &&
16296 (Op.isAligned(Align(16)) ||
16297 ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
16298 return MVT::v4i32;
16299 }
16300
16301 if (Subtarget.isPPC64()) {
16302 return MVT::i64;
16303 }
16304
16305 return MVT::i32;
16306}
16307
16308/// Returns true if it is beneficial to convert a load of a constant
16309/// to just the constant itself.
16310bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
16311 Type *Ty) const {
16312 assert(Ty->isIntegerTy())(static_cast<void> (0));
16313
16314 unsigned BitSize = Ty->getPrimitiveSizeInBits();
16315 return !(BitSize == 0 || BitSize > 64);
16316}
16317
16318bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
16319 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
16320 return false;
16321 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
16322 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
16323 return NumBits1 == 64 && NumBits2 == 32;
16324}
16325
16326bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
16327 if (!VT1.isInteger() || !VT2.isInteger())
16328 return false;
16329 unsigned NumBits1 = VT1.getSizeInBits();
16330 unsigned NumBits2 = VT2.getSizeInBits();
16331 return NumBits1 == 64 && NumBits2 == 32;
16332}
16333
16334bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
16335 // Generally speaking, zexts are not free, but they are free when they can be
16336 // folded with other operations.
16337 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
16338 EVT MemVT = LD->getMemoryVT();
16339 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
16340 (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
16341 (LD->getExtensionType() == ISD::NON_EXTLOAD ||
16342 LD->getExtensionType() == ISD::ZEXTLOAD))
16343 return true;
16344 }
16345
16346 // FIXME: Add other cases...
16347 // - 32-bit shifts with a zext to i64
16348 // - zext after ctlz, bswap, etc.
16349 // - zext after and by a constant mask
16350
16351 return TargetLowering::isZExtFree(Val, VT2);
16352}
16353
16354bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
16355 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&(static_cast<void> (0))
16356 "invalid fpext types")(static_cast<void> (0));
16357 // Extending to float128 is not free.
16358 if (DestVT == MVT::f128)
16359 return false;
16360 return true;
16361}
16362
16363bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
16364 return isInt<16>(Imm) || isUInt<16>(Imm);
16365}
16366
16367bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
16368 return isInt<16>(Imm) || isUInt<16>(Imm);
16369}
16370
16371bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align,
16372 MachineMemOperand::Flags,
16373 bool *Fast) const {
16374 if (DisablePPCUnaligned)
16375 return false;
16376
16377 // PowerPC supports unaligned memory access for simple non-vector types.
16378 // Although accessing unaligned addresses is not as efficient as accessing
16379 // aligned addresses, it is generally more efficient than manual expansion,
16380 // and generally only traps for software emulation when crossing page
16381 // boundaries.
16382
16383 if (!VT.isSimple())
16384 return false;
16385
16386 if (VT.isFloatingPoint() && !VT.isVector() &&
16387 !Subtarget.allowsUnalignedFPAccess())
16388 return false;
16389
16390 if (VT.getSimpleVT().isVector()) {
16391 if (Subtarget.hasVSX()) {
16392 if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
16393 VT != MVT::v4f32 && VT != MVT::v4i32)
16394 return false;
16395 } else {
16396 return false;
16397 }
16398 }
16399
16400 if (VT == MVT::ppcf128)
16401 return false;
16402
16403 if (Fast)
16404 *Fast = true;
16405
16406 return true;
16407}
16408
16409bool PPCTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
16410 SDValue C) const {
16411 // Check integral scalar types.
16412 if (!VT.isScalarInteger())
16413 return false;
16414 if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
16415 if (!ConstNode->getAPIntValue().isSignedIntN(64))
16416 return false;
16417 // This transformation will generate >= 2 operations. But the following
16418 // cases will generate <= 2 instructions during ISEL. So exclude them.
16419 // 1. If the constant multiplier fits 16 bits, it can be handled by one
16420 // HW instruction, ie. MULLI
16421 // 2. If the multiplier after shifted fits 16 bits, an extra shift
16422 // instruction is needed than case 1, ie. MULLI and RLDICR
16423 int64_t Imm = ConstNode->getSExtValue();
16424 unsigned Shift = countTrailingZeros<uint64_t>(Imm);
16425 Imm >>= Shift;
16426 if (isInt<16>(Imm))
16427 return false;
16428 uint64_t UImm = static_cast<uint64_t>(Imm);
16429 if (isPowerOf2_64(UImm + 1) || isPowerOf2_64(UImm - 1) ||
16430 isPowerOf2_64(1 - UImm) || isPowerOf2_64(-1 - UImm))
16431 return true;
16432 }
16433 return false;
16434}
16435
16436bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
16437 EVT VT) const {
16438 return isFMAFasterThanFMulAndFAdd(
16439 MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext()));
16440}
16441
16442bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
16443 Type *Ty) const {
16444 switch (Ty->getScalarType()->getTypeID()) {
16445 case Type::FloatTyID:
16446 case Type::DoubleTyID:
16447 return true;
16448 case Type::FP128TyID:
16449 return Subtarget.hasP9Vector();
16450 default:
16451 return false;
16452 }
16453}
16454
16455// FIXME: add more patterns which are not profitable to hoist.
16456bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const {
16457 if (!I->hasOneUse())
16458 return true;
16459
16460 Instruction *User = I->user_back();
16461 assert(User && "A single use instruction with no uses.")(static_cast<void> (0));
16462
16463 switch (I->getOpcode()) {
16464 case Instruction::FMul: {
16465 // Don't break FMA, PowerPC prefers FMA.
16466 if (User->getOpcode() != Instruction::FSub &&
16467 User->getOpcode() != Instruction::FAdd)
16468 return true;
16469
16470 const TargetOptions &Options = getTargetMachine().Options;
16471 const Function *F = I->getFunction();
16472 const DataLayout &DL = F->getParent()->getDataLayout();
16473 Type *Ty = User->getOperand(0)->getType();
16474
16475 return !(
16476 isFMAFasterThanFMulAndFAdd(*F, Ty) &&
16477 isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
16478 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath));
16479 }
16480 case Instruction::Load: {
16481 // Don't break "store (load float*)" pattern, this pattern will be combined
16482 // to "store (load int32)" in later InstCombine pass. See function
16483 // combineLoadToOperationType. On PowerPC, loading a float point takes more
16484 // cycles than loading a 32 bit integer.
16485 LoadInst *LI = cast<LoadInst>(I);
16486 // For the loads that combineLoadToOperationType does nothing, like
16487 // ordered load, it should be profitable to hoist them.
16488 // For swifterror load, it can only be used for pointer to pointer type, so
16489 // later type check should get rid of this case.
16490 if (!LI->isUnordered())
16491 return true;
16492
16493 if (User->getOpcode() != Instruction::Store)
16494 return true;
16495
16496 if (I->getType()->getTypeID() != Type::FloatTyID)
16497 return true;
16498
16499 return false;
16500 }
16501 default:
16502 return true;
16503 }
16504 return true;
16505}
16506
16507const MCPhysReg *
16508PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
16509 // LR is a callee-save register, but we must treat it as clobbered by any call
16510 // site. Hence we include LR in the scratch registers, which are in turn added
16511 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
16512 // to CTR, which is used by any indirect call.
16513 static const MCPhysReg ScratchRegs[] = {
16514 PPC::X12, PPC::LR8, PPC::CTR8, 0
16515 };
16516
16517 return ScratchRegs;
16518}
16519
16520Register PPCTargetLowering::getExceptionPointerRegister(
16521 const Constant *PersonalityFn) const {
16522 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
16523}
16524
16525Register PPCTargetLowering::getExceptionSelectorRegister(
16526 const Constant *PersonalityFn) const {
16527 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
16528}
16529
16530bool
16531PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
16532 EVT VT , unsigned DefinedValues) const {
16533 if (VT == MVT::v2i64)
16534 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
16535
16536 if (Subtarget.hasVSX())
16537 return true;
16538
16539 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
16540}
16541
16542Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
16543 if (DisableILPPref || Subtarget.enableMachineScheduler())
16544 return TargetLowering::getSchedulingPreference(N);
16545
16546 return Sched::ILP;
16547}
16548
16549// Create a fast isel object.
16550FastISel *
16551PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
16552 const TargetLibraryInfo *LibInfo) const {
16553 return PPC::createFastISel(FuncInfo, LibInfo);
16554}
16555
16556// 'Inverted' means the FMA opcode after negating one multiplicand.
16557// For example, (fma -a b c) = (fnmsub a b c)
16558static unsigned invertFMAOpcode(unsigned Opc) {
16559 switch (Opc) {
16560 default:
16561 llvm_unreachable("Invalid FMA opcode for PowerPC!")__builtin_unreachable();
16562 case ISD::FMA:
16563 return PPCISD::FNMSUB;
16564 case PPCISD::FNMSUB:
16565 return ISD::FMA;
16566 }
16567}
16568
16569SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
16570 bool LegalOps, bool OptForSize,
16571 NegatibleCost &Cost,
16572 unsigned Depth) const {
16573 if (Depth > SelectionDAG::MaxRecursionDepth)
16574 return SDValue();
16575
16576 unsigned Opc = Op.getOpcode();
16577 EVT VT = Op.getValueType();
16578 SDNodeFlags Flags = Op.getNode()->getFlags();
16579
16580 switch (Opc) {
16581 case PPCISD::FNMSUB:
16582 if (!Op.hasOneUse() || !isTypeLegal(VT))
16583 break;
16584
16585 const TargetOptions &Options = getTargetMachine().Options;
16586 SDValue N0 = Op.getOperand(0);
16587 SDValue N1 = Op.getOperand(1);
16588 SDValue N2 = Op.getOperand(2);
16589 SDLoc Loc(Op);
16590
16591 NegatibleCost N2Cost = NegatibleCost::Expensive;
16592 SDValue NegN2 =
16593 getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1);
16594
16595 if (!NegN2)
16596 return SDValue();
16597
16598 // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c))
16599 // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c))
16600 // These transformations may change sign of zeroes. For example,
16601 // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1.
16602 if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) {
16603 // Try and choose the cheaper one to negate.
16604 NegatibleCost N0Cost = NegatibleCost::Expensive;
16605 SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize,
16606 N0Cost, Depth + 1);
16607
16608 NegatibleCost N1Cost = NegatibleCost::Expensive;
16609 SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize,
16610 N1Cost, Depth + 1);
16611
16612 if (NegN0 && N0Cost <= N1Cost) {
16613 Cost = std::min(N0Cost, N2Cost);
16614 return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags);
16615 } else if (NegN1) {
16616 Cost = std::min(N1Cost, N2Cost);
16617 return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags);
16618 }
16619 }
16620
16621 // (fneg (fnmsub a b c)) => (fma a b (fneg c))
16622 if (isOperationLegal(ISD::FMA, VT)) {
16623 Cost = N2Cost;
16624 return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags);
16625 }
16626
16627 break;
16628 }
16629
16630 return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize,
16631 Cost, Depth);
16632}
16633
16634// Override to enable LOAD_STACK_GUARD lowering on Linux.
16635bool PPCTargetLowering::useLoadStackGuardNode() const {
16636 if (!Subtarget.isTargetLinux())
16637 return TargetLowering::useLoadStackGuardNode();
16638 return true;
16639}
16640
16641// Override to disable global variable loading on Linux and insert AIX canary
16642// word declaration.
16643void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
16644 if (Subtarget.isAIXABI()) {
16645 M.getOrInsertGlobal(AIXSSPCanaryWordName,
16646 Type::getInt8PtrTy(M.getContext()));
16647 return;
16648 }
16649 if (!Subtarget.isTargetLinux())
16650 return TargetLowering::insertSSPDeclarations(M);
16651}
16652
16653Value *PPCTargetLowering::getSDagStackGuard(const Module &M) const {
16654 if (Subtarget.isAIXABI())
16655 return M.getGlobalVariable(AIXSSPCanaryWordName);
16656 return TargetLowering::getSDagStackGuard(M);
16657}
16658
16659bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
16660 bool ForCodeSize) const {
16661 if (!VT.isSimple() || !Subtarget.hasVSX())
16662 return false;
16663
16664 switch(VT.getSimpleVT().SimpleTy) {
16665 default:
16666 // For FP types that are currently not supported by PPC backend, return
16667 // false. Examples: f16, f80.
16668 return false;
16669 case MVT::f32:
16670 case MVT::f64:
16671 if (Subtarget.hasPrefixInstrs()) {
16672 // we can materialize all immediatess via XXSPLTI32DX and XXSPLTIDP.
16673 return true;
16674 }
16675 LLVM_FALLTHROUGH[[gnu::fallthrough]];
16676 case MVT::ppcf128:
16677 return Imm.isPosZero();
16678 }
16679}
16680
16681// For vector shift operation op, fold
16682// (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
16683static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
16684 SelectionDAG &DAG) {
16685 SDValue N0 = N->getOperand(0);
16686 SDValue N1 = N->getOperand(1);
16687 EVT VT = N0.getValueType();
16688 unsigned OpSizeInBits = VT.getScalarSizeInBits();
16689 unsigned Opcode = N->getOpcode();
16690 unsigned TargetOpcode;
16691
16692 switch (Opcode) {
16693 default:
16694 llvm_unreachable("Unexpected shift operation")__builtin_unreachable();
16695 case ISD::SHL:
16696 TargetOpcode = PPCISD::SHL;
16697 break;
16698 case ISD::SRL:
16699 TargetOpcode = PPCISD::SRL;
16700 break;
16701 case ISD::SRA:
16702 TargetOpcode = PPCISD::SRA;
16703 break;
16704 }
16705
16706 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
16707 N1->getOpcode() == ISD::AND)
16708 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
16709 if (Mask->getZExtValue() == OpSizeInBits - 1)
16710 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
16711
16712 return SDValue();
16713}
16714
16715SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
16716 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16717 return Value;
16718
16719 SDValue N0 = N->getOperand(0);
16720 ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
16721 if (!Subtarget.isISA3_0() || !Subtarget.isPPC64() ||
16722 N0.getOpcode() != ISD::SIGN_EXTEND ||
16723 N0.getOperand(0).getValueType() != MVT::i32 || CN1 == nullptr ||
16724 N->getValueType(0) != MVT::i64)
16725 return SDValue();
16726
16727 // We can't save an operation here if the value is already extended, and
16728 // the existing shift is easier to combine.
16729 SDValue ExtsSrc = N0.getOperand(0);
16730 if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
16731 ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
16732 return SDValue();
16733
16734 SDLoc DL(N0);
16735 SDValue ShiftBy = SDValue(CN1, 0);
16736 // We want the shift amount to be i32 on the extswli, but the shift could
16737 // have an i64.
16738 if (ShiftBy.getValueType() == MVT::i64)
16739 ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
16740
16741 return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
16742 ShiftBy);
16743}
16744
16745SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
16746 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16747 return Value;
16748
16749 return SDValue();
16750}
16751
16752SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
16753 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16754 return Value;
16755
16756 return SDValue();
16757}
16758
16759// Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
16760// Transform (add X, (zext(sete Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
16761// When C is zero, the equation (addi Z, -C) can be simplified to Z
16762// Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
16763static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
16764 const PPCSubtarget &Subtarget) {
16765 if (!Subtarget.isPPC64())
16766 return SDValue();
16767
16768 SDValue LHS = N->getOperand(0);
16769 SDValue RHS = N->getOperand(1);
16770
16771 auto isZextOfCompareWithConstant = [](SDValue Op) {
16772 if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
16773 Op.getValueType() != MVT::i64)
16774 return false;
16775
16776 SDValue Cmp = Op.getOperand(0);
16777 if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
16778 Cmp.getOperand(0).getValueType() != MVT::i64)
16779 return false;
16780
16781 if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
16782 int64_t NegConstant = 0 - Constant->getSExtValue();
16783 // Due to the limitations of the addi instruction,
16784 // -C is required to be [-32768, 32767].
16785 return isInt<16>(NegConstant);
16786 }
16787
16788 return false;
16789 };
16790
16791 bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
16792 bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
16793
16794 // If there is a pattern, canonicalize a zext operand to the RHS.
16795 if (LHSHasPattern && !RHSHasPattern)
16796 std::swap(LHS, RHS);
16797 else if (!LHSHasPattern && !RHSHasPattern)
16798 return SDValue();
16799
16800 SDLoc DL(N);
16801 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
16802 SDValue Cmp = RHS.getOperand(0);
16803 SDValue Z = Cmp.getOperand(0);
16804 auto *Constant = cast<ConstantSDNode>(Cmp.getOperand(1));
16805 int64_t NegConstant = 0 - Constant->getSExtValue();
16806
16807 switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
16808 default: break;
16809 case ISD::SETNE: {
16810 // when C == 0
16811 // --> addze X, (addic Z, -1).carry
16812 // /
16813 // add X, (zext(setne Z, C))--
16814 // \ when -32768 <= -C <= 32767 && C != 0
16815 // --> addze X, (addic (addi Z, -C), -1).carry
16816 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16817 DAG.getConstant(NegConstant, DL, MVT::i64));
16818 SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16819 SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16820 AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
16821 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16822 SDValue(Addc.getNode(), 1));
16823 }
16824 case ISD::SETEQ: {
16825 // when C == 0
16826 // --> addze X, (subfic Z, 0).carry
16827 // /
16828 // add X, (zext(sete Z, C))--
16829 // \ when -32768 <= -C <= 32767 && C != 0
16830 // --> addze X, (subfic (addi Z, -C), 0).carry
16831 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16832 DAG.getConstant(NegConstant, DL, MVT::i64));
16833 SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16834 SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16835 DAG.getConstant(0, DL, MVT::i64), AddOrZ);
16836 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16837 SDValue(Subc.getNode(), 1));
16838 }
16839 }
16840
16841 return SDValue();
16842}
16843
16844// Transform
16845// (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to
16846// (MAT_PCREL_ADDR GlobalAddr+(C1+C2))
16847// In this case both C1 and C2 must be known constants.
16848// C1+C2 must fit into a 34 bit signed integer.
16849static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG,
16850 const PPCSubtarget &Subtarget) {
16851 if (!Subtarget.isUsingPCRelativeCalls())
16852 return SDValue();
16853
16854 // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node.
16855 // If we find that node try to cast the Global Address and the Constant.
16856 SDValue LHS = N->getOperand(0);
16857 SDValue RHS = N->getOperand(1);
16858
16859 if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16860 std::swap(LHS, RHS);
16861
16862 if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16863 return SDValue();
16864
16865 // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node.
16866 GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0));
16867 ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS);
16868
16869 // Check that both casts succeeded.
16870 if (!GSDN || !ConstNode)
16871 return SDValue();
16872
16873 int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue();
16874 SDLoc DL(GSDN);
16875
16876 // The signed int offset needs to fit in 34 bits.
16877 if (!isInt<34>(NewOffset))
16878 return SDValue();
16879
16880 // The new global address is a copy of the old global address except
16881 // that it has the updated Offset.
16882 SDValue GA =
16883 DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0),
16884 NewOffset, GSDN->getTargetFlags());
16885 SDValue MatPCRel =
16886 DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA);
16887 return MatPCRel;
16888}
16889
16890SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
16891 if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
16892 return Value;
16893
16894 if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget))
16895 return Value;
16896
16897 return SDValue();
16898}
16899
16900// Detect TRUNCATE operations on bitcasts of float128 values.
16901// What we are looking for here is the situtation where we extract a subset
16902// of bits from a 128 bit float.
16903// This can be of two forms:
16904// 1) BITCAST of f128 feeding TRUNCATE
16905// 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
16906// The reason this is required is because we do not have a legal i128 type
16907// and so we want to prevent having to store the f128 and then reload part
16908// of it.
16909SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
16910 DAGCombinerInfo &DCI) const {
16911 // If we are using CRBits then try that first.
16912 if (Subtarget.useCRBits()) {
16913 // Check if CRBits did anything and return that if it did.
16914 if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
16915 return CRTruncValue;
16916 }
16917
16918 SDLoc dl(N);
16919 SDValue Op0 = N->getOperand(0);
16920
16921 // fold (truncate (abs (sub (zext a), (zext b)))) -> (vabsd a, b)
16922 if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) {
16923 EVT VT = N->getValueType(0);
16924 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16925 return SDValue();
16926 SDValue Sub = Op0.getOperand(0);
16927 if (Sub.getOpcode() == ISD::SUB) {
16928 SDValue SubOp0 = Sub.getOperand(0);
16929 SDValue SubOp1 = Sub.getOperand(1);
16930 if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) &&
16931 (SubOp1.getOpcode() == ISD::ZERO_EXTEND)) {
16932 return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0),
16933 SubOp1.getOperand(0),
16934 DCI.DAG.getTargetConstant(0, dl, MVT::i32));
16935 }
16936 }
16937 }
16938
16939 // Looking for a truncate of i128 to i64.
16940 if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
16941 return SDValue();
16942
16943 int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
16944
16945 // SRL feeding TRUNCATE.
16946 if (Op0.getOpcode() == ISD::SRL) {
16947 ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
16948 // The right shift has to be by 64 bits.
16949 if (!ConstNode || ConstNode->getZExtValue() != 64)
16950 return SDValue();
16951
16952 // Switch the element number to extract.
16953 EltToExtract = EltToExtract ? 0 : 1;
16954 // Update Op0 past the SRL.
16955 Op0 = Op0.getOperand(0);
16956 }
16957
16958 // BITCAST feeding a TRUNCATE possibly via SRL.
16959 if (Op0.getOpcode() == ISD::BITCAST &&
16960 Op0.getValueType() == MVT::i128 &&
16961 Op0.getOperand(0).getValueType() == MVT::f128) {
16962 SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
16963 return DCI.DAG.getNode(
16964 ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
16965 DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
16966 }
16967 return SDValue();
16968}
16969
16970SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
16971 SelectionDAG &DAG = DCI.DAG;
16972
16973 ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
16974 if (!ConstOpOrElement)
16975 return SDValue();
16976
16977 // An imul is usually smaller than the alternative sequence for legal type.
16978 if (DAG.getMachineFunction().getFunction().hasMinSize() &&
16979 isOperationLegal(ISD::MUL, N->getValueType(0)))
16980 return SDValue();
16981
16982 auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
16983 switch (this->Subtarget.getCPUDirective()) {
16984 default:
16985 // TODO: enhance the condition for subtarget before pwr8
16986 return false;
16987 case PPC::DIR_PWR8:
16988 // type mul add shl
16989 // scalar 4 1 1
16990 // vector 7 2 2
16991 return true;
16992 case PPC::DIR_PWR9:
16993 case PPC::DIR_PWR10:
16994 case PPC::DIR_PWR_FUTURE:
16995 // type mul add shl
16996 // scalar 5 2 2
16997 // vector 7 2 2
16998
16999 // The cycle RATIO of related operations are showed as a table above.
17000 // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
17001 // scalar and vector type. For 2 instrs patterns, add/sub + shl
17002 // are 4, it is always profitable; but for 3 instrs patterns
17003 // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
17004 // So we should only do it for vector type.
17005 return IsAddOne && IsNeg ? VT.isVector() : true;
17006 }
17007 };
17008
17009 EVT VT = N->getValueType(0);
17010 SDLoc DL(N);
17011
17012 const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
17013 bool IsNeg = MulAmt.isNegative();
17014 APInt MulAmtAbs = MulAmt.abs();
17015
17016 if ((MulAmtAbs - 1).isPowerOf2()) {
17017 // (mul x, 2^N + 1) => (add (shl x, N), x)
17018 // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
17019
17020 if (!IsProfitable(IsNeg, true, VT))
17021 return SDValue();
17022
17023 SDValue Op0 = N->getOperand(0);
17024 SDValue Op1 =
17025 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
17026 DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
17027 SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
17028
17029 if (!IsNeg)
17030 return Res;
17031
17032 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
17033 } else if ((MulAmtAbs + 1).isPowerOf2()) {
17034 // (mul x, 2^N - 1) => (sub (shl x, N), x)
17035 // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
17036
17037 if (!IsProfitable(IsNeg, false, VT))
17038 return SDValue();
17039
17040 SDValue Op0 = N->getOperand(0);
17041 SDValue Op1 =
17042 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
17043 DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
17044
17045 if (!IsNeg)
17046 return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
17047 else
17048 return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
17049
17050 } else {
17051 return SDValue();
17052 }
17053}
17054
17055// Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this
17056// in combiner since we need to check SD flags and other subtarget features.
17057SDValue PPCTargetLowering::combineFMALike(SDNode *N,
17058 DAGCombinerInfo &DCI) const {
17059 SDValue N0 = N->getOperand(0);
17060 SDValue N1 = N->getOperand(1);
17061 SDValue N2 = N->getOperand(2);
17062 SDNodeFlags Flags = N->getFlags();
17063 EVT VT = N->getValueType(0);
17064 SelectionDAG &DAG = DCI.DAG;
17065 const TargetOptions &Options = getTargetMachine().Options;
17066 unsigned Opc = N->getOpcode();
17067 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
17068 bool LegalOps = !DCI.isBeforeLegalizeOps();
17069 SDLoc Loc(N);
17070
17071 if (!isOperationLegal(ISD::FMA, VT))
17072 return SDValue();
17073
17074 // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0
17075 // since (fnmsub a b c)=-0 while c-ab=+0.
17076 if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath)
17077 return SDValue();
17078
17079 // (fma (fneg a) b c) => (fnmsub a b c)
17080 // (fnmsub (fneg a) b c) => (fma a b c)
17081 if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize))
17082 return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags);
17083
17084 // (fma a (fneg b) c) => (fnmsub a b c)
17085 // (fnmsub a (fneg b) c) => (fma a b c)
17086 if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize))
17087 return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags);
17088
17089 return SDValue();
17090}
17091
17092bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
17093 // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
17094 if (!Subtarget.is64BitELFABI())
17095 return false;
17096
17097 // If not a tail call then no need to proceed.
17098 if (!CI->isTailCall())
17099 return false;
17100
17101 // If sibling calls have been disabled and tail-calls aren't guaranteed
17102 // there is no reason to duplicate.
17103 auto &TM = getTargetMachine();
17104 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
17105 return false;
17106
17107 // Can't tail call a function called indirectly, or if it has variadic args.
17108 const Function *Callee = CI->getCalledFunction();
17109 if (!Callee || Callee->isVarArg())
17110 return false;
17111
17112 // Make sure the callee and caller calling conventions are eligible for tco.
17113 const Function *Caller = CI->getParent()->getParent();
17114 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
17115 CI->getCallingConv()))
17116 return false;
17117
17118 // If the function is local then we have a good chance at tail-calling it
17119 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
17120}
17121
17122bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
17123 if (!Subtarget.hasVSX())
17124 return false;
17125 if (Subtarget.hasP9Vector() && VT == MVT::f128)
17126 return true;
17127 return VT == MVT::f32 || VT == MVT::f64 ||
17128 VT == MVT::v4f32 || VT == MVT::v2f64;
17129}
17130
17131bool PPCTargetLowering::
17132isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
17133 const Value *Mask = AndI.getOperand(1);
17134 // If the mask is suitable for andi. or andis. we should sink the and.
17135 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
17136 // Can't handle constants wider than 64-bits.
17137 if (CI->getBitWidth() > 64)
17138 return false;
17139 int64_t ConstVal = CI->getZExtValue();
17140 return isUInt<16>(ConstVal) ||
17141 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
17142 }
17143
17144 // For non-constant masks, we can always use the record-form and.
17145 return true;
17146}
17147
17148// Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
17149// Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
17150// Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
17151// Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
17152// Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
17153SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
17154 assert((N->getOpcode() == ISD::ABS) && "Need ABS node here")(static_cast<void> (0));
17155 assert(Subtarget.hasP9Altivec() &&(static_cast<void> (0))
17156 "Only combine this when P9 altivec supported!")(static_cast<void> (0));
17157 EVT VT = N->getValueType(0);
17158 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
17159 return SDValue();
17160
17161 SelectionDAG &DAG = DCI.DAG;
17162 SDLoc dl(N);
17163 if (N->getOperand(0).getOpcode() == ISD::SUB) {
17164 // Even for signed integers, if it's known to be positive (as signed
17165 // integer) due to zero-extended inputs.
17166 unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
17167 unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
17168 if ((SubOpcd0 == ISD::ZERO_EXTEND ||
17169 SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
17170 (SubOpcd1 == ISD::ZERO_EXTEND ||
17171 SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
17172 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
17173 N->getOperand(0)->getOperand(0),
17174 N->getOperand(0)->getOperand(1),
17175 DAG.getTargetConstant(0, dl, MVT::i32));
17176 }
17177
17178 // For type v4i32, it can be optimized with xvnegsp + vabsduw
17179 if (N->getOperand(0).getValueType() == MVT::v4i32 &&
17180 N->getOperand(0).hasOneUse()) {
17181 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
17182 N->getOperand(0)->getOperand(0),
17183 N->getOperand(0)->getOperand(1),
17184 DAG.getTargetConstant(1, dl, MVT::i32));
17185 }
17186 }
17187
17188 return SDValue();
17189}
17190
17191// For type v4i32/v8ii16/v16i8, transform
17192// from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
17193// from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
17194// from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
17195// from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
17196SDValue PPCTargetLowering::combineVSelect(SDNode *N,
17197 DAGCombinerInfo &DCI) const {
17198 assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here")(static_cast<void> (0));
17199 assert(Subtarget.hasP9Altivec() &&(static_cast<void> (0))
17200 "Only combine this when P9 altivec supported!")(static_cast<void> (0));
17201
17202 SelectionDAG &DAG = DCI.DAG;
17203 SDLoc dl(N);
17204 SDValue Cond = N->getOperand(0);
17205 SDValue TrueOpnd = N->getOperand(1);
17206 SDValue FalseOpnd = N->getOperand(2);
17207 EVT VT = N->getOperand(1).getValueType();
17208
17209 if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
17210 FalseOpnd.getOpcode() != ISD::SUB)
17211 return SDValue();
17212
17213 // ABSD only available for type v4i32/v8i16/v16i8
17214 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
17215 return SDValue();
17216
17217 // At least to save one more dependent computation
17218 if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
17219 return SDValue();
17220
17221 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
17222
17223 // Can only handle unsigned comparison here
17224 switch (CC) {
17225 default:
17226 return SDValue();
17227 case ISD::SETUGT:
17228 case ISD::SETUGE:
17229 break;
17230 case ISD::SETULT:
17231 case ISD::SETULE:
17232 std::swap(TrueOpnd, FalseOpnd);
17233 break;
17234 }
17235
17236 SDValue CmpOpnd1 = Cond.getOperand(0);
17237 SDValue CmpOpnd2 = Cond.getOperand(1);
17238
17239 // SETCC CmpOpnd1 CmpOpnd2 cond
17240 // TrueOpnd = CmpOpnd1 - CmpOpnd2
17241 // FalseOpnd = CmpOpnd2 - CmpOpnd1
17242 if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
17243 TrueOpnd.getOperand(1) == CmpOpnd2 &&
17244 FalseOpnd.getOperand(0) == CmpOpnd2 &&
17245 FalseOpnd.getOperand(1) == CmpOpnd1) {
17246 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
17247 CmpOpnd1, CmpOpnd2,
17248 DAG.getTargetConstant(0, dl, MVT::i32));
17249 }
17250
17251 return SDValue();
17252}
17253
17254/// getAddrModeForFlags - Based on the set of address flags, select the most
17255/// optimal instruction format to match by.
17256PPC::AddrMode PPCTargetLowering::getAddrModeForFlags(unsigned Flags) const {
17257 // This is not a node we should be handling here.
17258 if (Flags == PPC::MOF_None)
17259 return PPC::AM_None;
17260 // Unaligned D-Forms are tried first, followed by the aligned D-Forms.
17261 for (auto FlagSet : AddrModesMap.at(PPC::AM_DForm))
17262 if ((Flags & FlagSet) == FlagSet)
17263 return PPC::AM_DForm;
17264 for (auto FlagSet : AddrModesMap.at(PPC::AM_DSForm))
17265 if ((Flags & FlagSet) == FlagSet)
17266 return PPC::AM_DSForm;
17267 for (auto FlagSet : AddrModesMap.at(PPC::AM_DQForm))
17268 if ((Flags & FlagSet) == FlagSet)
17269 return PPC::AM_DQForm;
17270 // If no other forms are selected, return an X-Form as it is the most
17271 // general addressing mode.
17272 return PPC::AM_XForm;
17273}
17274
17275/// Set alignment flags based on whether or not the Frame Index is aligned.
17276/// Utilized when computing flags for address computation when selecting
17277/// load and store instructions.
17278static void setAlignFlagsForFI(SDValue N, unsigned &FlagSet,
17279 SelectionDAG &DAG) {
17280 bool IsAdd = ((N.getOpcode() == ISD::ADD) || (N.getOpcode() == ISD::OR));
17281 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(IsAdd ? N.getOperand(0) : N);
17282 if (!FI)
17283 return;
17284 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
17285 unsigned FrameIndexAlign = MFI.getObjectAlign(FI->getIndex()).value();
17286 // If this is (add $FI, $S16Imm), the alignment flags are already set
17287 // based on the immediate. We just need to clear the alignment flags
17288 // if the FI alignment is weaker.
17289 if ((FrameIndexAlign % 4) != 0)
17290 FlagSet &= ~PPC::MOF_RPlusSImm16Mult4;
17291 if ((FrameIndexAlign % 16) != 0)
17292 FlagSet &= ~PPC::MOF_RPlusSImm16Mult16;
17293 // If the address is a plain FrameIndex, set alignment flags based on
17294 // FI alignment.
17295 if (!IsAdd) {
17296 if ((FrameIndexAlign % 4) == 0)
17297 FlagSet |= PPC::MOF_RPlusSImm16Mult4;
17298 if ((FrameIndexAlign % 16) == 0)
17299 FlagSet |= PPC::MOF_RPlusSImm16Mult16;
17300 }
17301}
17302
17303/// Given a node, compute flags that are used for address computation when
17304/// selecting load and store instructions. The flags computed are stored in
17305/// FlagSet. This function takes into account whether the node is a constant,
17306/// an ADD, OR, or a constant, and computes the address flags accordingly.
17307static void computeFlagsForAddressComputation(SDValue N, unsigned &FlagSet,
17308 SelectionDAG &DAG) {
17309 // Set the alignment flags for the node depending on if the node is
17310 // 4-byte or 16-byte aligned.
17311 auto SetAlignFlagsForImm = [&](uint64_t Imm) {
17312 if ((Imm & 0x3) == 0)
17313 FlagSet |= PPC::MOF_RPlusSImm16Mult4;
17314 if ((Imm & 0xf) == 0)
17315 FlagSet |= PPC::MOF_RPlusSImm16Mult16;
17316 };
17317
17318 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
17319 // All 32-bit constants can be computed as LIS + Disp.
17320 const APInt &ConstImm = CN->getAPIntValue();
17321 if (ConstImm.isSignedIntN(32)) { // Flag to handle 32-bit constants.
17322 FlagSet |= PPC::MOF_AddrIsSImm32;
17323 SetAlignFlagsForImm(ConstImm.getZExtValue());
17324 setAlignFlagsForFI(N, FlagSet, DAG);
17325 }
17326 if (ConstImm.isSignedIntN(34)) // Flag to handle 34-bit constants.
17327 FlagSet |= PPC::MOF_RPlusSImm34;
17328 else // Let constant materialization handle large constants.
17329 FlagSet |= PPC::MOF_NotAddNorCst;
17330 } else if (N.getOpcode() == ISD::ADD || provablyDisjointOr(DAG, N)) {
17331 // This address can be represented as an addition of:
17332 // - Register + Imm16 (possibly a multiple of 4/16)
17333 // - Register + Imm34
17334 // - Register + PPCISD::Lo
17335 // - Register + Register
17336 // In any case, we won't have to match this as Base + Zero.
17337 SDValue RHS = N.getOperand(1);
17338 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(RHS)) {
17339 const APInt &ConstImm = CN->getAPIntValue();
17340 if (ConstImm.isSignedIntN(16)) {
17341 FlagSet |= PPC::MOF_RPlusSImm16; // Signed 16-bit immediates.
17342 SetAlignFlagsForImm(ConstImm.getZExtValue());
17343 setAlignFlagsForFI(N, FlagSet, DAG);
17344 }
17345 if (ConstImm.isSignedIntN(34))
17346 FlagSet |= PPC::MOF_RPlusSImm34; // Signed 34-bit immediates.
17347 else
17348 FlagSet |= PPC::MOF_RPlusR; // Register.
17349 } else if (RHS.getOpcode() == PPCISD::Lo &&
17350 !cast<ConstantSDNode>(RHS.getOperand(1))->getZExtValue())
17351 FlagSet |= PPC::MOF_RPlusLo; // PPCISD::Lo.
17352 else
17353 FlagSet |= PPC::MOF_RPlusR;
17354 } else { // The address computation is not a constant or an addition.
17355 setAlignFlagsForFI(N, FlagSet, DAG);
17356 FlagSet |= PPC::MOF_NotAddNorCst;
17357 }
17358}
17359
17360/// computeMOFlags - Given a node N and it's Parent (a MemSDNode), compute
17361/// the address flags of the load/store instruction that is to be matched.
17362unsigned PPCTargetLowering::computeMOFlags(const SDNode *Parent, SDValue N,
17363 SelectionDAG &DAG) const {
17364 unsigned FlagSet = PPC::MOF_None;
17365
17366 // Compute subtarget flags.
17367 if (!Subtarget.hasP9Vector())
2
Assuming the condition is true
3
Taking true branch
17368 FlagSet |= PPC::MOF_SubtargetBeforeP9;
17369 else {
17370 FlagSet |= PPC::MOF_SubtargetP9;
17371 if (Subtarget.hasPrefixInstrs())
17372 FlagSet |= PPC::MOF_SubtargetP10;
17373 }
17374 if (Subtarget.hasSPE())
4
Assuming the condition is false
5
Taking false branch
17375 FlagSet |= PPC::MOF_SubtargetSPE;
17376
17377 // Mark this as something we don't want to handle here if it is atomic
17378 // or pre-increment instruction.
17379 if (const LSBaseSDNode *LSB
6.1
'LSB' is null
= dyn_cast<LSBaseSDNode>(Parent))
6
Assuming 'Parent' is not a 'LSBaseSDNode'
7
Taking false branch
17380 if (LSB->isIndexed())
17381 return PPC::MOF_None;
17382
17383 // Compute in-memory type flags. This is based on if there are scalars,
17384 // floats or vectors.
17385 const MemSDNode *MN = dyn_cast<MemSDNode>(Parent);
8
Assuming 'Parent' is not a 'MemSDNode'
9
'MN' initialized to a null pointer value
17386 assert(MN && "Parent should be a MemSDNode!")(static_cast<void> (0));
17387 EVT MemVT = MN->getMemoryVT();
10
Called C++ object pointer is null
17388 unsigned Size = MemVT.getSizeInBits();
17389 if (MemVT.isScalarInteger()) {
17390 assert(Size <= 128 &&(static_cast<void> (0))
17391 "Not expecting scalar integers larger than 16 bytes!")(static_cast<void> (0));
17392 if (Size < 32)
17393 FlagSet |= PPC::MOF_SubWordInt;
17394 else if (Size == 32)
17395 FlagSet |= PPC::MOF_WordInt;
17396 else
17397 FlagSet |= PPC::MOF_DoubleWordInt;
17398 } else if (MemVT.isVector() && !MemVT.isFloatingPoint()) { // Integer vectors.
17399 if (Size == 128)
17400 FlagSet |= PPC::MOF_Vector;
17401 else if (Size == 256)
17402 FlagSet |= PPC::MOF_Vector256;
17403 else
17404 llvm_unreachable("Not expecting illegal vectors!")__builtin_unreachable();
17405 } else { // Floating point type: can be scalar, f128 or vector types.
17406 if (Size == 32 || Size == 64)
17407 FlagSet |= PPC::MOF_ScalarFloat;
17408 else if (MemVT == MVT::f128 || MemVT.isVector())
17409 FlagSet |= PPC::MOF_Vector;
17410 else
17411 llvm_unreachable("Not expecting illegal scalar floats!")__builtin_unreachable();
17412 }
17413
17414 // Compute flags for address computation.
17415 computeFlagsForAddressComputation(N, FlagSet, DAG);
17416
17417 // Compute type extension flags.
17418 if (const LoadSDNode *LN = dyn_cast<LoadSDNode>(Parent)) {
17419 switch (LN->getExtensionType()) {
17420 case ISD::SEXTLOAD:
17421 FlagSet |= PPC::MOF_SExt;
17422 break;
17423 case ISD::EXTLOAD:
17424 case ISD::ZEXTLOAD:
17425 FlagSet |= PPC::MOF_ZExt;
17426 break;
17427 case ISD::NON_EXTLOAD:
17428 FlagSet |= PPC::MOF_NoExt;
17429 break;
17430 }
17431 } else
17432 FlagSet |= PPC::MOF_NoExt;
17433
17434 // For integers, no extension is the same as zero extension.
17435 // We set the extension mode to zero extension so we don't have
17436 // to add separate entries in AddrModesMap for loads and stores.
17437 if (MemVT.isScalarInteger() && (FlagSet & PPC::MOF_NoExt)) {
17438 FlagSet |= PPC::MOF_ZExt;
17439 FlagSet &= ~PPC::MOF_NoExt;
17440 }
17441
17442 // If we don't have prefixed instructions, 34-bit constants should be
17443 // treated as PPC::MOF_NotAddNorCst so they can match D-Forms.
17444 bool IsNonP1034BitConst =
17445 ((PPC::MOF_RPlusSImm34 | PPC::MOF_AddrIsSImm32 | PPC::MOF_SubtargetP10) &
17446 FlagSet) == PPC::MOF_RPlusSImm34;
17447 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::OR &&
17448 IsNonP1034BitConst)
17449 FlagSet |= PPC::MOF_NotAddNorCst;
17450
17451 return FlagSet;
17452}
17453
17454/// SelectForceXFormMode - Given the specified address, force it to be
17455/// represented as an indexed [r+r] operation (an XForm instruction).
17456PPC::AddrMode PPCTargetLowering::SelectForceXFormMode(SDValue N, SDValue &Disp,
17457 SDValue &Base,
17458 SelectionDAG &DAG) const {
17459
17460 PPC::AddrMode Mode = PPC::AM_XForm;
17461 int16_t ForceXFormImm = 0;
17462 if (provablyDisjointOr(DAG, N) &&
17463 !isIntS16Immediate(N.getOperand(1), ForceXFormImm)) {
17464 Disp = N.getOperand(0);
17465 Base = N.getOperand(1);
17466 return Mode;
17467 }
17468
17469 // If the address is the result of an add, we will utilize the fact that the
17470 // address calculation includes an implicit add. However, we can reduce
17471 // register pressure if we do not materialize a constant just for use as the
17472 // index register. We only get rid of the add if it is not an add of a
17473 // value and a 16-bit signed constant and both have a single use.
17474 if (N.getOpcode() == ISD::ADD &&
17475 (!isIntS16Immediate(N.getOperand(1), ForceXFormImm) ||
17476 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
17477 Disp = N.getOperand(0);
17478 Base = N.getOperand(1);
17479 return Mode;
17480 }
17481
17482 // Otherwise, use R0 as the base register.
17483 Disp = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
17484 N.getValueType());
17485 Base = N;
17486
17487 return Mode;
17488}
17489
17490// If we happen to match to an aligned D-Form, check if the Frame Index is
17491// adequately aligned. If it is not, reset the mode to match to X-Form.
17492static void setXFormForUnalignedFI(SDValue N, unsigned Flags,
17493 PPC::AddrMode &Mode) {
17494 if (!isa<FrameIndexSDNode>(N))
17495 return;
17496 if ((Mode == PPC::AM_DSForm && !(Flags & PPC::MOF_RPlusSImm16Mult4)) ||
17497 (Mode == PPC::AM_DQForm && !(Flags & PPC::MOF_RPlusSImm16Mult16)))
17498 Mode = PPC::AM_XForm;
17499}
17500
17501/// SelectOptimalAddrMode - Based on a node N and it's Parent (a MemSDNode),
17502/// compute the address flags of the node, get the optimal address mode based
17503/// on the flags, and set the Base and Disp based on the address mode.
17504PPC::AddrMode PPCTargetLowering::SelectOptimalAddrMode(const SDNode *Parent,
17505 SDValue N, SDValue &Disp,
17506 SDValue &Base,
17507 SelectionDAG &DAG,
17508 MaybeAlign Align) const {
17509 SDLoc DL(Parent);
17510
17511 // Compute the address flags.
17512 unsigned Flags = computeMOFlags(Parent, N, DAG);
1
Calling 'PPCTargetLowering::computeMOFlags'
17513
17514 // Get the optimal address mode based on the Flags.
17515 PPC::AddrMode Mode = getAddrModeForFlags(Flags);
17516
17517 // If the address mode is DS-Form or DQ-Form, check if the FI is aligned.
17518 // Select an X-Form load if it is not.
17519 setXFormForUnalignedFI(N, Flags, Mode);
17520
17521 // Set Base and Disp accordingly depending on the address mode.
17522 switch (Mode) {
17523 case PPC::AM_DForm:
17524 case PPC::AM_DSForm:
17525 case PPC::AM_DQForm: {
17526 // This is a register plus a 16-bit immediate. The base will be the
17527 // register and the displacement will be the immediate unless it
17528 // isn't sufficiently aligned.
17529 if (Flags & PPC::MOF_RPlusSImm16) {
17530 SDValue Op0 = N.getOperand(0);
17531 SDValue Op1 = N.getOperand(1);
17532 int16_t Imm = cast<ConstantSDNode>(Op1)->getAPIntValue().getZExtValue();
17533 if (!Align || isAligned(*Align, Imm)) {
17534 Disp = DAG.getTargetConstant(Imm, DL, N.getValueType());
17535 Base = Op0;
17536 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op0)) {
17537 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
17538 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
17539 }
17540 break;
17541 }
17542 }
17543 // This is a register plus the @lo relocation. The base is the register
17544 // and the displacement is the global address.
17545 else if (Flags & PPC::MOF_RPlusLo) {
17546 Disp = N.getOperand(1).getOperand(0); // The global address.
17547 assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||(static_cast<void> (0))
17548 Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||(static_cast<void> (0))
17549 Disp.getOpcode() == ISD::TargetConstantPool ||(static_cast<void> (0))
17550 Disp.getOpcode() == ISD::TargetJumpTable)(static_cast<void> (0));
17551 Base = N.getOperand(0);
17552 break;
17553 }
17554 // This is a constant address at most 32 bits. The base will be
17555 // zero or load-immediate-shifted and the displacement will be
17556 // the low 16 bits of the address.
17557 else if (Flags & PPC::MOF_AddrIsSImm32) {
17558 auto *CN = cast<ConstantSDNode>(N);
17559 EVT CNType = CN->getValueType(0);
17560 uint64_t CNImm = CN->getZExtValue();
17561 // If this address fits entirely in a 16-bit sext immediate field, codegen
17562 // this as "d, 0".
17563 int16_t Imm;
17564 if (isIntS16Immediate(CN, Imm) && (!Align || isAligned(*Align, Imm))) {
17565 Disp = DAG.getTargetConstant(Imm, DL, CNType);
17566 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
17567 CNType);
17568 break;
17569 }
17570 // Handle 32-bit sext immediate with LIS + Addr mode.
17571 if ((CNType == MVT::i32 || isInt<32>(CNImm)) &&
17572 (!Align || isAligned(*Align, CNImm))) {
17573 int32_t Addr = (int32_t)CNImm;
17574 // Otherwise, break this down into LIS + Disp.
17575 Disp = DAG.getTargetConstant((int16_t)Addr, DL, MVT::i32);
17576 Base =
17577 DAG.getTargetConstant((Addr - (int16_t)Addr) >> 16, DL, MVT::i32);
17578 uint32_t LIS = CNType == MVT::i32 ? PPC::LIS : PPC::LIS8;
17579 Base = SDValue(DAG.getMachineNode(LIS, DL, CNType, Base), 0);
17580 break;
17581 }
17582 }
17583 // Otherwise, the PPC:MOF_NotAdd flag is set. Load/Store is Non-foldable.
17584 Disp = DAG.getTargetConstant(0, DL, getPointerTy(DAG.getDataLayout()));
17585 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
17586 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
17587 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
17588 } else
17589 Base = N;
17590 break;
17591 }
17592 case PPC::AM_None:
17593 break;
17594 default: { // By default, X-Form is always available to be selected.
17595 // When a frame index is not aligned, we also match by XForm.
17596 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N);
17597 Base = FI ? N : N.getOperand(1);
17598 Disp = FI ? DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
17599 N.getValueType())
17600 : N.getOperand(0);
17601 break;
17602 }
17603 }
17604 return Mode;
17605}
17606
17607CCAssignFn *PPCTargetLowering::ccAssignFnForCall(CallingConv::ID CC,
17608 bool Return,
17609 bool IsVarArg) const {
17610 switch (CC) {
17611 case CallingConv::Cold:
17612 return (Return ? RetCC_PPC_Cold : CC_PPC64_ELF_FIS);
17613 default:
17614 return CC_PPC64_ELF_FIS;
17615 }
17616}
17617
17618TargetLowering::AtomicExpansionKind
17619PPCTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
17620 unsigned Size = AI->getType()->getPrimitiveSizeInBits();
17621 if (EnableQuadwordAtomics && Subtarget.hasQuadwordAtomics() && Size == 128)
17622 return AtomicExpansionKind::MaskedIntrinsic;
17623 return TargetLowering::shouldExpandAtomicRMWInIR(AI);
17624}
17625
17626TargetLowering::AtomicExpansionKind
17627PPCTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
17628 unsigned Size = AI->getPointerOperand()
17629 ->getType()
17630 ->getPointerElementType()
17631 ->getPrimitiveSizeInBits();
17632 if (EnableQuadwordAtomics && Subtarget.hasQuadwordAtomics() && Size == 128)
17633 return AtomicExpansionKind::MaskedIntrinsic;
17634 return TargetLowering::shouldExpandAtomicCmpXchgInIR(AI);
17635}
17636
17637static Intrinsic::ID
17638getIntrinsicForAtomicRMWBinOp128(AtomicRMWInst::BinOp BinOp) {
17639 switch (BinOp) {
17640 default:
17641 llvm_unreachable("Unexpected AtomicRMW BinOp")__builtin_unreachable();
17642 case AtomicRMWInst::Xchg:
17643 return Intrinsic::ppc_atomicrmw_xchg_i128;
17644 case AtomicRMWInst::Add:
17645 return Intrinsic::ppc_atomicrmw_add_i128;
17646 case AtomicRMWInst::Sub:
17647 return Intrinsic::ppc_atomicrmw_sub_i128;
17648 case AtomicRMWInst::And:
17649 return Intrinsic::ppc_atomicrmw_and_i128;
17650 case AtomicRMWInst::Or:
17651 return Intrinsic::ppc_atomicrmw_or_i128;
17652 case AtomicRMWInst::Xor:
17653 return Intrinsic::ppc_atomicrmw_xor_i128;
17654 case AtomicRMWInst::Nand:
17655 return Intrinsic::ppc_atomicrmw_nand_i128;
17656 }
17657}
17658
17659Value *PPCTargetLowering::emitMaskedAtomicRMWIntrinsic(
17660 IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
17661 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
17662 assert(EnableQuadwordAtomics && Subtarget.hasQuadwordAtomics() &&(static_cast<void> (0))
17663 "Only support quadword now")(static_cast<void> (0));
17664 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
17665 Type *ValTy = cast<PointerType>(AlignedAddr->getType())->getElementType();
17666 assert(ValTy->getPrimitiveSizeInBits() == 128)(static_cast<void> (0));
17667 Function *RMW = Intrinsic::getDeclaration(
17668 M, getIntrinsicForAtomicRMWBinOp128(AI->getOperation()));
17669 Type *Int64Ty = Type::getInt64Ty(M->getContext());
17670 Value *IncrLo = Builder.CreateTrunc(Incr, Int64Ty, "incr_lo");
17671 Value *IncrHi =
17672 Builder.CreateTrunc(Builder.CreateLShr(Incr, 64), Int64Ty, "incr_hi");
17673 Value *Addr =
17674 Builder.CreateBitCast(AlignedAddr, Type::getInt8PtrTy(M->getContext()));
17675 Value *LoHi = Builder.CreateCall(RMW, {Addr, IncrLo, IncrHi});
17676 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
17677 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
17678 Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
17679 Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
17680 return Builder.CreateOr(
17681 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64");
17682}
17683
17684Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
17685 IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
17686 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
17687 assert(EnableQuadwordAtomics && Subtarget.hasQuadwordAtomics() &&(static_cast<void> (0))
17688 "Only support quadword now")(static_cast<void> (0));
17689 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
17690 Type *ValTy = cast<PointerType>(AlignedAddr->getType())->getElementType();
17691 assert(ValTy->getPrimitiveSizeInBits() == 128)(static_cast<void> (0));
17692 Function *IntCmpXchg =
17693 Intrinsic::getDeclaration(M, Intrinsic::ppc_cmpxchg_i128);
17694 Type *Int64Ty = Type::getInt64Ty(M->getContext());
17695 Value *CmpLo = Builder.CreateTrunc(CmpVal, Int64Ty, "cmp_lo");
17696 Value *CmpHi =
17697 Builder.CreateTrunc(Builder.CreateLShr(CmpVal, 64), Int64Ty, "cmp_hi");
17698 Value *NewLo = Builder.CreateTrunc(NewVal, Int64Ty, "new_lo");
17699 Value *NewHi =
17700 Builder.CreateTrunc(Builder.CreateLShr(NewVal, 64), Int64Ty, "new_hi");
17701 Value *Addr =
17702 Builder.CreateBitCast(AlignedAddr, Type::getInt8PtrTy(M->getContext()));
17703 emitLeadingFence(Builder, CI, Ord);
17704 Value *LoHi =
17705 Builder.CreateCall(IntCmpXchg, {Addr, CmpLo, CmpHi, NewLo, NewHi});
17706 emitTrailingFence(Builder, CI, Ord);
17707 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
17708 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
17709 Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
17710 Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
17711 return Builder.CreateOr(
17712 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64");
17713}